xref: /aosp_15_r20/external/libdrm/tests/amdgpu/deadlock_tests.c (revision 7688df22e49036ff52a766b7101da3a49edadb8c)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23 
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <unistd.h>
27 #if HAVE_ALLOCA_H
28 # include <alloca.h>
29 #endif
30 
31 #include "CUnit/Basic.h"
32 
33 #include "amdgpu_test.h"
34 #include "amdgpu_drm.h"
35 #include "amdgpu_internal.h"
36 
37 #include <pthread.h>
38 
39 
40 /*
41  * This defines the delay in MS after which memory location designated for
42  * compression against reference value is written to, unblocking command
43  * processor
44  */
45 #define WRITE_MEM_ADDRESS_DELAY_MS 100
46 
47 #define	PACKET_TYPE3	3
48 
49 #define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
50 			 (((op) & 0xFF) << 8) |				\
51 			 ((n) & 0x3FFF) << 16)
52 
53 #define	PACKET3_WAIT_REG_MEM				0x3C
54 #define		WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
55 		/* 0 - always
56 		 * 1 - <
57 		 * 2 - <=
58 		 * 3 - ==
59 		 * 4 - !=
60 		 * 5 - >=
61 		 * 6 - >
62 		 */
63 #define		WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
64 		/* 0 - reg
65 		 * 1 - mem
66 		 */
67 #define		WAIT_REG_MEM_OPERATION(x)               ((x) << 6)
68 		/* 0 - wait_reg_mem
69 		 * 1 - wr_wait_wr_reg
70 		 */
71 #define		WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
72 		/* 0 - me
73 		 * 1 - pfp
74 		 */
75 
76 #define	PACKET3_WRITE_DATA				0x37
77 #define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
78 		/* 0 - register
79 		 * 1 - memory (sync - via GRBM)
80 		 * 2 - gl2
81 		 * 3 - gds
82 		 * 4 - reserved
83 		 * 5 - memory (async - direct)
84 		 */
85 #define		WR_ONE_ADDR                             (1 << 16)
86 #define		WR_CONFIRM                              (1 << 20)
87 #define		WRITE_DATA_CACHE_POLICY(x)              ((x) << 25)
88 		/* 0 - LRU
89 		 * 1 - Stream
90 		 */
91 #define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
92 		/* 0 - me
93 		 * 1 - pfp
94 		 * 2 - ce
95 		 */
96 
97 #define mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR                                      0x54f
98 
99 #define SDMA_PKT_HEADER_OP(x)	(x & 0xff)
100 #define SDMA_OP_POLL_REGMEM  8
101 
102 static  amdgpu_device_handle device_handle;
103 static  uint32_t  major_version;
104 static  uint32_t  minor_version;
105 
106 static pthread_t stress_thread;
107 static uint32_t *ptr;
108 
109 static uint32_t family_id;
110 static uint32_t chip_rev;
111 static uint32_t chip_id;
112 
113 int use_uc_mtype = 0;
114 
115 static void amdgpu_deadlock_helper(unsigned ip_type);
116 static void amdgpu_deadlock_gfx(void);
117 static void amdgpu_deadlock_compute(void);
118 static void amdgpu_illegal_reg_access();
119 static void amdgpu_illegal_mem_access();
120 static void amdgpu_deadlock_sdma(void);
121 static void amdgpu_dispatch_hang_gfx(void);
122 static void amdgpu_dispatch_hang_compute(void);
123 static void amdgpu_dispatch_hang_slow_gfx(void);
124 static void amdgpu_dispatch_hang_slow_compute(void);
125 static void amdgpu_draw_hang_gfx(void);
126 static void amdgpu_draw_hang_slow_gfx(void);
127 static void amdgpu_hang_sdma(void);
128 static void amdgpu_hang_slow_sdma(void);
129 
suite_deadlock_tests_enable(void)130 CU_BOOL suite_deadlock_tests_enable(void)
131 {
132 	CU_BOOL enable = CU_TRUE;
133 
134 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
135 					     &minor_version, &device_handle))
136 		return CU_FALSE;
137 
138 	family_id = device_handle->info.family_id;
139 	chip_id = device_handle->info.chip_external_rev;
140 	chip_rev = device_handle->info.chip_rev;
141 
142 	/*
143 	 * Only enable for ASICs supporting GPU reset and for which it's enabled
144 	 * by default (currently GFX8+ dGPUS and gfx9+ APUs).  Note that Raven1
145 	 * did not support GPU reset, but newer variants do.
146 	 */
147 	if (family_id == AMDGPU_FAMILY_SI ||
148 	    family_id == AMDGPU_FAMILY_KV ||
149 	    family_id == AMDGPU_FAMILY_CZ ||
150 	    family_id == AMDGPU_FAMILY_RV) {
151 		printf("\n\nGPU reset is not enabled for the ASIC, deadlock suite disabled\n");
152 		enable = CU_FALSE;
153 	}
154 
155 	if (asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
156 		if (amdgpu_set_test_active("Deadlock Tests",
157 					"gfx ring block test (set amdgpu.lockup_timeout=50)",
158 					CU_FALSE))
159 			fprintf(stderr, "test deactivation failed - %s\n",
160 				CU_get_error_msg());
161 	}
162 
163 	if (device_handle->info.family_id >= AMDGPU_FAMILY_AI)
164 		use_uc_mtype = 1;
165 
166 	if (amdgpu_device_deinitialize(device_handle))
167 		return CU_FALSE;
168 
169 	return enable;
170 }
171 
suite_deadlock_tests_init(void)172 int suite_deadlock_tests_init(void)
173 {
174 	int r;
175 
176 	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
177 				   &minor_version, &device_handle);
178 
179 	if (r) {
180 		if ((r == -EACCES) && (errno == EACCES))
181 			printf("\n\nError:%s. "
182 				"Hint:Try to run this test program as root.",
183 				strerror(errno));
184 		return CUE_SINIT_FAILED;
185 	}
186 
187 	return CUE_SUCCESS;
188 }
189 
suite_deadlock_tests_clean(void)190 int suite_deadlock_tests_clean(void)
191 {
192 	int r = amdgpu_device_deinitialize(device_handle);
193 
194 	if (r == 0)
195 		return CUE_SUCCESS;
196 	else
197 		return CUE_SCLEAN_FAILED;
198 }
199 
200 
201 CU_TestInfo deadlock_tests[] = {
202 	{ "gfx ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_gfx },
203 	{ "compute ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_compute },
204 	{ "sdma ring block test (set amdgpu.lockup_timeout=50)", amdgpu_deadlock_sdma },
205 	{ "illegal reg access test", amdgpu_illegal_reg_access },
206 	{ "illegal mem access test (set amdgpu.vm_fault_stop=2)", amdgpu_illegal_mem_access },
207 	{ "gfx ring bad dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_gfx },
208 	{ "compute ring bad dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_compute },
209 	{ "gfx ring bad slow dispatch test (set amdgpu.lockup_timeout=50)", amdgpu_dispatch_hang_slow_gfx },
210 	{ "compute ring bad slow dispatch test (set amdgpu.lockup_timeout=50,50)", amdgpu_dispatch_hang_slow_compute },
211 	{ "gfx ring bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_gfx },
212 	{ "gfx ring slow bad draw test (set amdgpu.lockup_timeout=50)", amdgpu_draw_hang_slow_gfx },
213 	{ "sdma ring corrupted header test (set amdgpu.lockup_timeout=50)", amdgpu_hang_sdma },
214 	{ "sdma ring slow linear copy test (set amdgpu.lockup_timeout=50)", amdgpu_hang_slow_sdma },
215 	CU_TEST_INFO_NULL,
216 };
217 
write_mem_address(void * data)218 static void *write_mem_address(void *data)
219 {
220 	int i;
221 
222 	/* useconds_t range is [0, 1,000,000] so use loop for waits > 1s */
223 	for (i = 0; i < WRITE_MEM_ADDRESS_DELAY_MS; i++)
224 		usleep(1000);
225 
226 	ptr[256] = 0x1;
227 
228 	return 0;
229 }
230 
amdgpu_deadlock_gfx(void)231 static void amdgpu_deadlock_gfx(void)
232 {
233 	amdgpu_deadlock_helper(AMDGPU_HW_IP_GFX);
234 }
235 
amdgpu_deadlock_compute(void)236 static void amdgpu_deadlock_compute(void)
237 {
238 	amdgpu_deadlock_helper(AMDGPU_HW_IP_COMPUTE);
239 }
240 
amdgpu_deadlock_helper(unsigned ip_type)241 static void amdgpu_deadlock_helper(unsigned ip_type)
242 {
243 	amdgpu_context_handle context_handle;
244 	amdgpu_bo_handle ib_result_handle;
245 	void *ib_result_cpu;
246 	uint64_t ib_result_mc_address;
247 	struct amdgpu_cs_request ibs_request;
248 	struct amdgpu_cs_ib_info ib_info;
249 	struct amdgpu_cs_fence fence_status;
250 	uint32_t expired;
251 	int i, r;
252 	amdgpu_bo_list_handle bo_list;
253 	amdgpu_va_handle va_handle;
254 
255 	r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
256 	CU_ASSERT_EQUAL(r, 0);
257 
258 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
259 	CU_ASSERT_EQUAL(r, 0);
260 
261 	r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
262 			AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
263 						    &ib_result_handle, &ib_result_cpu,
264 						    &ib_result_mc_address, &va_handle);
265 	CU_ASSERT_EQUAL(r, 0);
266 
267 	r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
268 			       &bo_list);
269 	CU_ASSERT_EQUAL(r, 0);
270 
271 	ptr = ib_result_cpu;
272 
273 	ptr[0] = PACKET3(PACKET3_WAIT_REG_MEM, 5);
274 	ptr[1] = (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
275 			 WAIT_REG_MEM_FUNCTION(4) | /* != */
276 			 WAIT_REG_MEM_ENGINE(0));  /* me */
277 	ptr[2] = (ib_result_mc_address + 256*4) & 0xfffffffc;
278 	ptr[3] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
279 	ptr[4] = 0x00000000; /* reference value */
280 	ptr[5] = 0xffffffff; /* and mask */
281 	ptr[6] = 0x00000004; /* poll interval */
282 
283 	for (i = 7; i < 16; ++i)
284 		ptr[i] = 0xffff1000;
285 
286 
287 	ptr[256] = 0x0; /* the memory we wait on to change */
288 
289 
290 
291 	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
292 	ib_info.ib_mc_address = ib_result_mc_address;
293 	ib_info.size = 16;
294 
295 	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
296 	ibs_request.ip_type = ip_type;
297 	ibs_request.ring = 0;
298 	ibs_request.number_of_ibs = 1;
299 	ibs_request.ibs = &ib_info;
300 	ibs_request.resources = bo_list;
301 	ibs_request.fence_info.handle = NULL;
302 	for (i = 0; i < 200; i++) {
303 		r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
304 		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
305 
306 	}
307 
308 	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
309 	fence_status.context = context_handle;
310 	fence_status.ip_type = ip_type;
311 	fence_status.ip_instance = 0;
312 	fence_status.ring = 0;
313 	fence_status.fence = ibs_request.seq_no;
314 
315 	r = amdgpu_cs_query_fence_status(&fence_status,
316 			AMDGPU_TIMEOUT_INFINITE,0, &expired);
317 	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
318 
319 	pthread_join(stress_thread, NULL);
320 
321 	r = amdgpu_bo_list_destroy(bo_list);
322 	CU_ASSERT_EQUAL(r, 0);
323 
324 	r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
325 				     ib_result_mc_address, 4096);
326 	CU_ASSERT_EQUAL(r, 0);
327 
328 	r = amdgpu_cs_ctx_free(context_handle);
329 	CU_ASSERT_EQUAL(r, 0);
330 }
331 
amdgpu_deadlock_sdma(void)332 static void amdgpu_deadlock_sdma(void)
333 {
334 	amdgpu_context_handle context_handle;
335 	amdgpu_bo_handle ib_result_handle;
336 	void *ib_result_cpu;
337 	uint64_t ib_result_mc_address;
338 	struct amdgpu_cs_request ibs_request;
339 	struct amdgpu_cs_ib_info ib_info;
340 	struct amdgpu_cs_fence fence_status;
341 	uint32_t expired;
342 	int i, r;
343 	amdgpu_bo_list_handle bo_list;
344 	amdgpu_va_handle va_handle;
345 	struct drm_amdgpu_info_hw_ip info;
346 	uint32_t ring_id;
347 
348 	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &info);
349 	CU_ASSERT_EQUAL(r, 0);
350 
351 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
352 	CU_ASSERT_EQUAL(r, 0);
353 
354 	for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
355 		r = pthread_create(&stress_thread, NULL, write_mem_address, NULL);
356 		CU_ASSERT_EQUAL(r, 0);
357 
358 		r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
359 				AMDGPU_GEM_DOMAIN_GTT, 0, use_uc_mtype ? AMDGPU_VM_MTYPE_UC : 0,
360 							    &ib_result_handle, &ib_result_cpu,
361 							    &ib_result_mc_address, &va_handle);
362 		CU_ASSERT_EQUAL(r, 0);
363 
364 		r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
365 				       &bo_list);
366 		CU_ASSERT_EQUAL(r, 0);
367 
368 		ptr = ib_result_cpu;
369 		i = 0;
370 
371 		ptr[i++] = SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
372 				(0 << 26) | /* WAIT_REG_MEM */
373 				(4 << 28) | /* != */
374 				(1 << 31); /* memory */
375 		ptr[i++] = (ib_result_mc_address + 256*4) & 0xfffffffc;
376 		ptr[i++] = ((ib_result_mc_address + 256*4) >> 32) & 0xffffffff;
377 		ptr[i++] = 0x00000000; /* reference value */
378 		ptr[i++] = 0xffffffff; /* and mask */
379 		ptr[i++] =  4 | /* poll interval */
380 				(0xfff << 16); /* retry count */
381 
382 		for (; i < 16; i++)
383 			ptr[i] = 0;
384 
385 		ptr[256] = 0x0; /* the memory we wait on to change */
386 
387 		memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
388 		ib_info.ib_mc_address = ib_result_mc_address;
389 		ib_info.size = 16;
390 
391 		memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
392 		ibs_request.ip_type = AMDGPU_HW_IP_DMA;
393 		ibs_request.ring = ring_id;
394 		ibs_request.number_of_ibs = 1;
395 		ibs_request.ibs = &ib_info;
396 		ibs_request.resources = bo_list;
397 		ibs_request.fence_info.handle = NULL;
398 
399 		for (i = 0; i < 200; i++) {
400 			r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
401 			CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
402 
403 		}
404 
405 		memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
406 		fence_status.context = context_handle;
407 		fence_status.ip_type = AMDGPU_HW_IP_DMA;
408 		fence_status.ip_instance = 0;
409 		fence_status.ring = ring_id;
410 		fence_status.fence = ibs_request.seq_no;
411 
412 		r = amdgpu_cs_query_fence_status(&fence_status,
413 				AMDGPU_TIMEOUT_INFINITE,0, &expired);
414 		CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
415 
416 		pthread_join(stress_thread, NULL);
417 
418 		r = amdgpu_bo_list_destroy(bo_list);
419 		CU_ASSERT_EQUAL(r, 0);
420 
421 		r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
422 					     ib_result_mc_address, 4096);
423 		CU_ASSERT_EQUAL(r, 0);
424 	}
425 	r = amdgpu_cs_ctx_free(context_handle);
426 	CU_ASSERT_EQUAL(r, 0);
427 }
428 
bad_access_helper(int reg_access)429 static void bad_access_helper(int reg_access)
430 {
431 	amdgpu_context_handle context_handle;
432 	amdgpu_bo_handle ib_result_handle;
433 	void *ib_result_cpu;
434 	uint64_t ib_result_mc_address;
435 	struct amdgpu_cs_request ibs_request;
436 	struct amdgpu_cs_ib_info ib_info;
437 	struct amdgpu_cs_fence fence_status;
438 	uint32_t expired;
439 	int i, r;
440 	amdgpu_bo_list_handle bo_list;
441 	amdgpu_va_handle va_handle;
442 
443 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
444 	CU_ASSERT_EQUAL(r, 0);
445 
446 	r = amdgpu_bo_alloc_and_map_raw(device_handle, 4096, 4096,
447 			AMDGPU_GEM_DOMAIN_GTT, 0, 0,
448 							&ib_result_handle, &ib_result_cpu,
449 							&ib_result_mc_address, &va_handle);
450 	CU_ASSERT_EQUAL(r, 0);
451 
452 	r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
453 				   &bo_list);
454 	CU_ASSERT_EQUAL(r, 0);
455 
456 	ptr = ib_result_cpu;
457 	i = 0;
458 
459 	ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
460 	ptr[i++] = (reg_access ? WRITE_DATA_DST_SEL(0) : WRITE_DATA_DST_SEL(5))| WR_CONFIRM;
461 	ptr[i++] = reg_access ? mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR : 0xdeadbee0;
462 	ptr[i++] = 0;
463 	ptr[i++] = 0xdeadbeef;
464 
465 	for (; i < 16; ++i)
466 		ptr[i] = 0xffff1000;
467 
468 	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
469 	ib_info.ib_mc_address = ib_result_mc_address;
470 	ib_info.size = 16;
471 
472 	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
473 	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
474 	ibs_request.ring = 0;
475 	ibs_request.number_of_ibs = 1;
476 	ibs_request.ibs = &ib_info;
477 	ibs_request.resources = bo_list;
478 	ibs_request.fence_info.handle = NULL;
479 
480 	r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
481 	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
482 
483 
484 	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
485 	fence_status.context = context_handle;
486 	fence_status.ip_type = AMDGPU_HW_IP_GFX;
487 	fence_status.ip_instance = 0;
488 	fence_status.ring = 0;
489 	fence_status.fence = ibs_request.seq_no;
490 
491 	r = amdgpu_cs_query_fence_status(&fence_status,
492 			AMDGPU_TIMEOUT_INFINITE,0, &expired);
493 	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
494 
495 	r = amdgpu_bo_list_destroy(bo_list);
496 	CU_ASSERT_EQUAL(r, 0);
497 
498 	r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
499 					 ib_result_mc_address, 4096);
500 	CU_ASSERT_EQUAL(r, 0);
501 
502 	r = amdgpu_cs_ctx_free(context_handle);
503 	CU_ASSERT_EQUAL(r, 0);
504 }
505 
amdgpu_illegal_reg_access()506 static void amdgpu_illegal_reg_access()
507 {
508 	bad_access_helper(1);
509 }
510 
amdgpu_illegal_mem_access()511 static void amdgpu_illegal_mem_access()
512 {
513 	bad_access_helper(0);
514 }
515 
amdgpu_dispatch_hang_gfx(void)516 static void amdgpu_dispatch_hang_gfx(void)
517 {
518 	amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_GFX);
519 }
amdgpu_dispatch_hang_compute(void)520 static void amdgpu_dispatch_hang_compute(void)
521 {
522 	amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
523 }
amdgpu_dispatch_hang_slow_gfx(void)524 static void amdgpu_dispatch_hang_slow_gfx(void)
525 {
526 	amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_GFX);
527 }
amdgpu_dispatch_hang_slow_compute(void)528 static void amdgpu_dispatch_hang_slow_compute(void)
529 {
530 	amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
531 }
amdgpu_draw_hang_gfx(void)532 static void amdgpu_draw_hang_gfx(void)
533 {
534 	amdgpu_test_draw_hang_helper(device_handle);
535 }
amdgpu_draw_hang_slow_gfx(void)536 static void amdgpu_draw_hang_slow_gfx(void)
537 {
538 	amdgpu_test_draw_hang_slow_helper(device_handle);
539 }
540 
541 #define DMA_CORRUPTED_HEADER_HANG	1
542 #define DMA_SLOW_LINEARCOPY_HANG	2
543 
amdgpu_hang_sdma_helper(unsigned hang_type)544 static void amdgpu_hang_sdma_helper(unsigned hang_type)
545 {
546 	const int sdma_write_length = 1024;
547 	amdgpu_context_handle context_handle;
548 	amdgpu_bo_handle ib_result_handle;
549 	amdgpu_bo_handle bo1, bo2;
550 	amdgpu_bo_handle resources[3];
551 	amdgpu_bo_list_handle bo_list;
552 	void *ib_result_cpu;
553 	struct amdgpu_cs_ib_info ib_info;
554 	struct amdgpu_cs_request ibs_request;
555 	struct amdgpu_cs_fence fence_status;
556 	uint64_t bo1_mc, bo2_mc;
557 	uint64_t ib_result_mc_address;
558 	volatile unsigned char *bo1_cpu, *bo2_cpu;
559 	amdgpu_va_handle bo1_va_handle, bo2_va_handle;
560 	amdgpu_va_handle va_handle;
561 	struct drm_amdgpu_info_hw_ip hw_ip_info;
562 	int i, j, r;
563 	uint32_t expired, ib_size;
564 
565 	r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_DMA, 0, &hw_ip_info);
566 	CU_ASSERT_EQUAL(r, 0);
567 
568 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
569 	CU_ASSERT_EQUAL(r, 0);
570 
571 	if (hang_type == DMA_CORRUPTED_HEADER_HANG)
572 		ib_size = 4096;
573 	else
574 		ib_size = 4096 * 0x20000;
575 
576 	r = amdgpu_bo_alloc_and_map(device_handle, ib_size, 4096,
577 				    AMDGPU_GEM_DOMAIN_GTT, 0,
578 				    &ib_result_handle, &ib_result_cpu,
579 				    &ib_result_mc_address, &va_handle);
580 	CU_ASSERT_EQUAL(r, 0);
581 
582 	r = amdgpu_bo_alloc_and_map(device_handle,
583 				    sdma_write_length, 4096,
584 				    AMDGPU_GEM_DOMAIN_GTT,
585 				    0, &bo1,
586 				    (void**)&bo1_cpu, &bo1_mc,
587 				    &bo1_va_handle);
588 	CU_ASSERT_EQUAL(r, 0);
589 
590 	/* set bo1 */
591 	memset((void*)bo1_cpu, 0xaa, sdma_write_length);
592 
593 	/* allocate UC bo2 for sDMA use */
594 	r = amdgpu_bo_alloc_and_map(device_handle,
595 				    sdma_write_length, 4096,
596 				    AMDGPU_GEM_DOMAIN_GTT,
597 				    0, &bo2,
598 				    (void**)&bo2_cpu, &bo2_mc,
599 				    &bo2_va_handle);
600 	CU_ASSERT_EQUAL(r, 0);
601 
602 	/* clear bo2 */
603 	memset((void*)bo2_cpu, 0, sdma_write_length);
604 
605 	resources[0] = bo1;
606 	resources[1] = bo2;
607 	resources[2] = ib_result_handle;
608 	r = amdgpu_bo_list_create(device_handle, 3,
609 				  resources, NULL, &bo_list);
610 
611 	/* fulfill PM4: with bad copy linear header */
612 	ptr = ib_result_cpu;
613 	i = 0;
614 	if (hang_type == DMA_CORRUPTED_HEADER_HANG) {
615 		ptr[i++] = 0x23decd3d;
616 		ptr[i++] = sdma_write_length - 1;
617 		ptr[i++] = 0;
618 		ptr[i++] = 0xffffffff & bo1_mc;
619 		ptr[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
620 		ptr[i++] = 0xffffffff & bo2_mc;
621 		ptr[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
622 	} else {
623 		for (j = 1; j < 0x20000; j++) {
624 			ptr[i++] = 0x1;
625 			ptr[i++] = sdma_write_length - 1;
626 			ptr[i++] = 0;
627 			ptr[i++] = 0xffffffff & bo1_mc;
628 			ptr[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
629 			ptr[i++] = 0xffffffff & bo2_mc;
630 			ptr[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
631 			ptr[i++] = 0x1;
632 			ptr[i++] = sdma_write_length - 1;
633 			ptr[i++] = 0;
634 			ptr[i++] = 0xffffffff & bo2_mc;
635 			ptr[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
636 			ptr[i++] = 0xffffffff & bo1_mc;
637 			ptr[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
638 		}
639 	}
640 
641 	/* exec command */
642 	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
643 	ib_info.ib_mc_address = ib_result_mc_address;
644 	ib_info.size = i;
645 
646 	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
647 	ibs_request.ip_type = AMDGPU_HW_IP_DMA;
648 	ibs_request.ring = 0;
649 	ibs_request.number_of_ibs = 1;
650 	ibs_request.ibs = &ib_info;
651 	ibs_request.resources = bo_list;
652 	ibs_request.fence_info.handle = NULL;
653 
654 	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
655 	CU_ASSERT_EQUAL(r, 0);
656 
657 	memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
658 	fence_status.context = context_handle;
659 	fence_status.ip_type = AMDGPU_HW_IP_DMA;
660 	fence_status.ip_instance = 0;
661 	fence_status.ring = 0;
662 	fence_status.fence = ibs_request.seq_no;
663 
664 	r = amdgpu_cs_query_fence_status(&fence_status,
665 					 AMDGPU_TIMEOUT_INFINITE,
666 					 0, &expired);
667 	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
668 
669 	r = amdgpu_bo_list_destroy(bo_list);
670 	CU_ASSERT_EQUAL(r, 0);
671 
672 	r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
673 				     ib_result_mc_address, 4096);
674 	CU_ASSERT_EQUAL(r, 0);
675 
676 	r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
677 				     sdma_write_length);
678 	CU_ASSERT_EQUAL(r, 0);
679 
680 	r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
681 				     sdma_write_length);
682 	CU_ASSERT_EQUAL(r, 0);
683 
684 	/* end of test */
685 	r = amdgpu_cs_ctx_free(context_handle);
686 	CU_ASSERT_EQUAL(r, 0);
687 }
688 
amdgpu_hang_sdma(void)689 static void amdgpu_hang_sdma(void)
690 {
691 	amdgpu_hang_sdma_helper(DMA_CORRUPTED_HEADER_HANG);
692 }
amdgpu_hang_slow_sdma(void)693 static void amdgpu_hang_slow_sdma(void)
694 {
695 	amdgpu_hang_sdma_helper(DMA_SLOW_LINEARCOPY_HANG);
696 }
697