2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
31 #include "CUnit/Basic.h"
33 #include "amdgpu_test.h"
34 #include "amdgpu_drm.h"
35 #include "amdgpu_internal.h"
41 * This defines the delay in MS after which memory location designated for
42 * compression against reference value is written to, unblocking command
45 #define WRITE_MEM_ADDRESS_DELAY_MS 100
47 #define PACKET_TYPE3 3
49 #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
50 (((op) & 0xFF) << 8) | \
53 #define PACKET3_WAIT_REG_MEM 0x3C
54 #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
63 #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
67 #define WAIT_REG_MEM_OPERATION(x) ((x) << 6)
71 #define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
76 #define PACKET3_WRITE_DATA 0x37
77 #define WRITE_DATA_DST_SEL(x) ((x) << 8)
79 * 1 - memory (sync - via GRBM)
83 * 5 - memory (async - direct)
85 #define WR_ONE_ADDR (1 << 16)
86 #define WR_CONFIRM (1 << 20)
87 #define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
91 #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
97 #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x54f
99 #define SDMA_PKT_HEADER_OP(x) (x & 0xff)
100 #define SDMA_OP_POLL_REGMEM 8
102 static amdgpu_device_handle device_handle;
103 static uint32_t major_version;
104 static uint32_t minor_version;
106 static pthread_t stress_thread;
107 static uint32_t *ptr;
109 static uint32_t family_id;
110 static uint32_t chip_rev;
111 static uint32_t chip_id;
113 int use_uc_mtype = 0;
115 static void amdgpu_deadlock_helper(unsigned ip_type);
116 static void amdgpu_deadlock_gfx(void);
117 static void amdgpu_deadlock_compute(void);
118 static void amdgpu_illegal_reg_access();
119 static void amdgpu_illegal_mem_access();
120 static void amdgpu_deadlock_sdma(void);
121 static void amdgpu_dispatch_hang_gfx(void);
122 static void amdgpu_dispatch_hang_compute(void);
123 static void amdgpu_dispatch_hang_slow_gfx(void);
124 static void amdgpu_dispatch_hang_slow_compute(void);
125 static void amdgpu_draw_hang_gfx(void);
126 static void amdgpu_draw_hang_slow_gfx(void);
127 static void amdgpu_hang_sdma(void);
129 CU_BOOL suite_deadlock_tests_enable(void)
131 CU_BOOL enable = CU_TRUE;
133 if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
134 &minor_version, &device_handle))
137 family_id = device_handle->info.family_id;
138 chip_id = device_handle->info.chip_external_rev;
139 chip_rev = device_handle->info.chip_rev;
142 * Only enable for ASICs supporting GPU reset and for which it's enabled
143 * by default (currently GFX8+ dGPUS and gfx9+ APUs). Note that Raven1
144 * did not support GPU reset, but newer variants do.
146 if (family_id == AMDGPU_FAMILY_SI ||
147 family_id == AMDGPU_FAMILY_KV ||
148 family_id == AMDGPU_FAMILY_CZ ||
149 family_id == AMDGPU_FAMILY_RV) {
150 printf("\n\nGPU reset is not enabled for the ASIC, deadlock suite disabled\n");
154 if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
155 if (amdgpu_set_test_active("Deadlock Tests",
156 "gfx ring block test (set amdgpu.lockup_timeout=50)",
158 fprintf(stderr, "test deactivation failed - %s\n",
162 if (device_handle->info.family_id >= AMDGPU_FAMILY_AI)
165 if (amdgpu_device_deinitialize(device_handle))
171 int suite_deadlock_tests_init(void)
175 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
176 &minor_version, &device_handle);
179 if ((r == -EACCES) && (errno == EACCES))
180 printf("\n\nError:%s. "
181 "Hint:Try to run this test program as root.",
183 return CUE_SINIT_FAILED;
189 int suite_deadlock_tests_clean(void)
191 int r = amdgpu_device_deinitialize(device_handle);
196 return CUE_SCLEAN_FAILED;
200 CU_TestInfo deadlock_tests[] = {
201 { "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },
202 { "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },
203 { "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },
204 { "illegal reg access test", amdgpu_illegal_reg_access },
205 { "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },
206 { "gfx ring bad dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_gfx },
207 { "compute ring bad dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_compute },
208 { "gfx ring bad slow dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_slow_gfx },
209 { "compute ring bad slow dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_slow_compute },
210 { "gfx ring bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_gfx },
211 { "gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_slow_gfx },
212 { "sdma ring corrupted header test (set amdgpu.lockup_timeout=50)", amdgpu_hang_sdma },
216 static void *write_mem_address(void *data)
220 /* useconds_t range is [0, 1,000,000] so use loop for waits > 1s */
221 for (i = 0; i < WRITE_MEM_ADDRESS_DELAY_MS; i++)
229 static void amdgpu_deadlock_gfx(void)
231 amdgpu_deadlock_helper(AMDGPU_HW_IP_GFX);
234 static void amdgpu_deadlock_compute(void)
236 amdgpu_deadlock_helper(AMDGPU_HW_IP_COMPUTE);
239 static void amdgpu_deadlock_helper(unsigned ip_type)
241 amdgpu_context_handle context_handle;
242 amdgpu_bo_handle ib_result_handle;
244 uint64_t ib_result_mc_address;
245 struct amdgpu_cs_request ibs_request;
246 struct amdgpu_cs_ib_info ib_info;
247 struct amdgpu_cs_fence fence_status;
250 amdgpu_bo_list_handle bo_list;
251 amdgpu_va_handle va_handle;
253 r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
254 CU_ASSERT_EQUAL(r, 0);
256 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
257 CU_ASSERT_EQUAL(r, 0);
259 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
260 AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
261 &ib_result_handle, &ib_result_cpu,
262 &ib_result_mc_address, &va_handle);
263 CU_ASSERT_EQUAL(r, 0);
265 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
267 CU_ASSERT_EQUAL(r, 0);
271 ptr[0] = PACKET3(PACKET3_WAIT_REG_MEM, 5);
272 ptr[1] = (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
273 WAIT_REG_MEM_FUNCTION(4) | /* != */
274 WAIT_REG_MEM_ENGINE(0)); /* me */
275 ptr[2] = (ib_result_mc_address + 256*4) & 0xfffffffc;
276 ptr[3] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
277 ptr[4] = 0x00000000; /* reference value */
278 ptr[5] = 0xffffffff; /* and mask */
279 ptr[6] = 0x00000004; /* poll interval */
281 for (i = 7; i < 16; ++i)
285 ptr[256] = 0x0; /* the memory we wait on to change */
289 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
290 ib_info.ib_mc_address = ib_result_mc_address;
293 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
294 ibs_request.ip_type = ip_type;
295 ibs_request.ring = 0;
296 ibs_request.number_of_ibs = 1;
297 ibs_request.ibs = &ib_info;
298 ibs_request.resources = bo_list;
299 ibs_request.fence_info.handle = NULL;
300 for (i = 0; i < 200; i++) {
301 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
302 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
306 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
307 fence_status.context = context_handle;
308 fence_status.ip_type = ip_type;
309 fence_status.ip_instance = 0;
310 fence_status.ring = 0;
311 fence_status.fence = ibs_request.seq_no;
313 r = amdgpu_cs_query_fence_status(&fence_status,
314 AMDGPU_TIMEOUT_INFINITE,0, &expired);
315 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
317 pthread_join(stress_thread, NULL);
319 r = amdgpu_bo_list_destroy(bo_list);
320 CU_ASSERT_EQUAL(r, 0);
322 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
323 ib_result_mc_address, 4096);
324 CU_ASSERT_EQUAL(r, 0);
326 r = amdgpu_cs_ctx_free(context_handle);
327 CU_ASSERT_EQUAL(r, 0);
330 static void amdgpu_deadlock_sdma(void)
332 amdgpu_context_handle context_handle;
333 amdgpu_bo_handle ib_result_handle;
335 uint64_t ib_result_mc_address;
336 struct amdgpu_cs_request ibs_request;
337 struct amdgpu_cs_ib_info ib_info;
338 struct amdgpu_cs_fence fence_status;
341 amdgpu_bo_list_handle bo_list;
342 amdgpu_va_handle va_handle;
343 struct drm_amdgpu_info_hw_ip info;
346 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
347 CU_ASSERT_EQUAL(r, 0);
349 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
350 CU_ASSERT_EQUAL(r, 0);
352 for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
353 r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
354 CU_ASSERT_EQUAL(r, 0);
356 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
357 AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
358 &ib_result_handle, &ib_result_cpu,
359 &ib_result_mc_address, &va_handle);
360 CU_ASSERT_EQUAL(r, 0);
362 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
364 CU_ASSERT_EQUAL(r, 0);
369 ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
370 (0 << 26) | /* WAIT_REG_MEM */
372 (1 << 31); /* memory */
373 ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;
374 ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
375 ptr[i++] = 0x00000000; /* reference value */
376 ptr[i++] = 0xffffffff; /* and mask */
377 ptr[i++] = 4 | /* poll interval */
378 (0xfff << 16); /* retry count */
383 ptr[256] = 0x0; /* the memory we wait on to change */
385 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
386 ib_info.ib_mc_address = ib_result_mc_address;
389 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
390 ibs_request.ip_type = AMDGPU_HW_IP_DMA;
391 ibs_request.ring = ring_id;
392 ibs_request.number_of_ibs = 1;
393 ibs_request.ibs = &ib_info;
394 ibs_request.resources = bo_list;
395 ibs_request.fence_info.handle = NULL;
397 for (i = 0; i < 200; i++) {
398 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
399 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
403 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
404 fence_status.context = context_handle;
405 fence_status.ip_type = AMDGPU_HW_IP_DMA;
406 fence_status.ip_instance = 0;
407 fence_status.ring = ring_id;
408 fence_status.fence = ibs_request.seq_no;
410 r = amdgpu_cs_query_fence_status(&fence_status,
411 AMDGPU_TIMEOUT_INFINITE,0, &expired);
412 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
414 pthread_join(stress_thread, NULL);
416 r = amdgpu_bo_list_destroy(bo_list);
417 CU_ASSERT_EQUAL(r, 0);
419 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
420 ib_result_mc_address, 4096);
421 CU_ASSERT_EQUAL(r, 0);
423 r = amdgpu_cs_ctx_free(context_handle);
424 CU_ASSERT_EQUAL(r, 0);
427 static void bad_access_helper(int reg_access)
429 amdgpu_context_handle context_handle;
430 amdgpu_bo_handle ib_result_handle;
432 uint64_t ib_result_mc_address;
433 struct amdgpu_cs_request ibs_request;
434 struct amdgpu_cs_ib_info ib_info;
435 struct amdgpu_cs_fence fence_status;
438 amdgpu_bo_list_handle bo_list;
439 amdgpu_va_handle va_handle;
441 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
442 CU_ASSERT_EQUAL(r, 0);
444 r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
445 AMDGPU_GEM_DOMAIN_GTT, 0, 0,
446 &ib_result_handle, &ib_result_cpu,
447 &ib_result_mc_address, &va_handle);
448 CU_ASSERT_EQUAL(r, 0);
450 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
452 CU_ASSERT_EQUAL(r, 0);
457 ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
458 ptr[i++] = (reg_access ? WRITE_DATA_DST_SEL(0) : WRITE_DATA_DST_SEL(5))| WR_CONFIRM;
459 ptr[i++] = reg_access ? mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR : 0xdeadbee0;
461 ptr[i++] = 0xdeadbeef;
466 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
467 ib_info.ib_mc_address = ib_result_mc_address;
470 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
471 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
472 ibs_request.ring = 0;
473 ibs_request.number_of_ibs = 1;
474 ibs_request.ibs = &ib_info;
475 ibs_request.resources = bo_list;
476 ibs_request.fence_info.handle = NULL;
478 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
479 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
482 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
483 fence_status.context = context_handle;
484 fence_status.ip_type = AMDGPU_HW_IP_GFX;
485 fence_status.ip_instance = 0;
486 fence_status.ring = 0;
487 fence_status.fence = ibs_request.seq_no;
489 r = amdgpu_cs_query_fence_status(&fence_status,
490 AMDGPU_TIMEOUT_INFINITE,0, &expired);
491 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
493 r = amdgpu_bo_list_destroy(bo_list);
494 CU_ASSERT_EQUAL(r, 0);
496 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
497 ib_result_mc_address, 4096);
498 CU_ASSERT_EQUAL(r, 0);
500 r = amdgpu_cs_ctx_free(context_handle);
501 CU_ASSERT_EQUAL(r, 0);
504 static void amdgpu_illegal_reg_access()
506 bad_access_helper(1);
509 static void amdgpu_illegal_mem_access()
511 bad_access_helper(0);
514 static void amdgpu_dispatch_hang_gfx(void)
516 amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_GFX);
518 static void amdgpu_dispatch_hang_compute(void)
520 amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
522 static void amdgpu_dispatch_hang_slow_gfx(void)
524 amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_GFX);
526 static void amdgpu_dispatch_hang_slow_compute(void)
528 amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
530 static void amdgpu_draw_hang_gfx(void)
532 amdgpu_test_draw_hang_helper(device_handle);
534 static void amdgpu_draw_hang_slow_gfx(void)
536 amdgpu_test_draw_hang_slow_helper(device_handle);
539 static void amdgpu_hang_sdma(void)
541 const int sdma_write_length = 1024;
542 amdgpu_context_handle context_handle;
543 amdgpu_bo_handle ib_result_handle;
544 amdgpu_bo_handle bo1, bo2;
545 amdgpu_bo_handle resources[3];
546 amdgpu_bo_list_handle bo_list;
548 struct amdgpu_cs_ib_info ib_info;
549 struct amdgpu_cs_request ibs_request;
550 struct amdgpu_cs_fence fence_status;
551 uint64_t bo1_mc, bo2_mc;
552 uint64_t ib_result_mc_address;
553 volatile unsigned char *bo1_cpu, *bo2_cpu;
554 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
555 amdgpu_va_handle va_handle;
556 struct drm_amdgpu_info_hw_ip hw_ip_info;
560 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &hw_ip_info);
561 CU_ASSERT_EQUAL(r, 0);
563 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
564 CU_ASSERT_EQUAL(r, 0);
566 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
567 AMDGPU_GEM_DOMAIN_GTT, 0,
568 &ib_result_handle, &ib_result_cpu,
569 &ib_result_mc_address, &va_handle);
570 CU_ASSERT_EQUAL(r, 0);
572 r = amdgpu_bo_alloc_and_map(device_handle,
573 sdma_write_length, 4096,
574 AMDGPU_GEM_DOMAIN_GTT,
576 (void**)&bo1_cpu, &bo1_mc,
578 CU_ASSERT_EQUAL(r, 0);
581 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
583 /* allocate UC bo2 for sDMA use */
584 r = amdgpu_bo_alloc_and_map(device_handle,
585 sdma_write_length, 4096,
586 AMDGPU_GEM_DOMAIN_GTT,
588 (void**)&bo2_cpu, &bo2_mc,
590 CU_ASSERT_EQUAL(r, 0);
593 memset((void*)bo2_cpu, 0, sdma_write_length);
597 resources[2] = ib_result_handle;
598 r = amdgpu_bo_list_create(device_handle, 3,
599 resources, NULL, &bo_list);
601 /* fulfill PM4: with bad copy linear header */
604 ptr[i++] = 0x23decd3d;
605 ptr[i++] = sdma_write_length - 1;
607 ptr[i++] = 0xffffffff & bo1_mc;
608 ptr[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
609 ptr[i++] = 0xffffffff & bo2_mc;
610 ptr[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
613 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
614 ib_info.ib_mc_address = ib_result_mc_address;
617 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
618 ibs_request.ip_type = AMDGPU_HW_IP_DMA;
619 ibs_request.ring = 0;
620 ibs_request.number_of_ibs = 1;
621 ibs_request.ibs = &ib_info;
622 ibs_request.resources = bo_list;
623 ibs_request.fence_info.handle = NULL;
625 r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
626 CU_ASSERT_EQUAL(r, 0);
628 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
629 fence_status.context = context_handle;
630 fence_status.ip_type = AMDGPU_HW_IP_DMA;
631 fence_status.ip_instance = 0;
632 fence_status.ring = 0;
633 fence_status.fence = ibs_request.seq_no;
635 r = amdgpu_cs_query_fence_status(&fence_status,
636 AMDGPU_TIMEOUT_INFINITE,
638 CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
640 r = amdgpu_bo_list_destroy(bo_list);
641 CU_ASSERT_EQUAL(r, 0);
643 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
644 ib_result_mc_address, 4096);
645 CU_ASSERT_EQUAL(r, 0);
647 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
649 CU_ASSERT_EQUAL(r, 0);
651 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
653 CU_ASSERT_EQUAL(r, 0);
656 r = amdgpu_cs_ctx_free(context_handle);
657 CU_ASSERT_EQUAL(r, 0);