2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
36 #include "CUnit/Basic.h"
38 #include "amdgpu_test.h"
39 #include "amdgpu_drm.h"
41 static amdgpu_device_handle device_handle;
42 static uint32_t major_version;
43 static uint32_t minor_version;
44 static uint32_t family_id;
46 static void amdgpu_query_info_test(void);
47 static void amdgpu_command_submission_gfx(void);
48 static void amdgpu_command_submission_compute(void);
49 static void amdgpu_command_submission_multi_fence(void);
50 static void amdgpu_command_submission_sdma(void);
51 static void amdgpu_userptr_test(void);
52 static void amdgpu_semaphore_test(void);
53 static void amdgpu_sync_dependency_test(void);
55 static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
56 static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
57 static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type);
59 CU_TestInfo basic_tests[] = {
60 { "Query Info Test", amdgpu_query_info_test },
61 { "Userptr Test", amdgpu_userptr_test },
62 { "Command submission Test (GFX)", amdgpu_command_submission_gfx },
63 { "Command submission Test (Compute)", amdgpu_command_submission_compute },
64 { "Command submission Test (Multi-Fence)", amdgpu_command_submission_multi_fence },
65 { "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
66 { "SW semaphore Test", amdgpu_semaphore_test },
67 { "Sync dependency Test", amdgpu_sync_dependency_test },
70 #define BUFFER_SIZE (8 * 1024)
71 #define SDMA_PKT_HEADER_op_offset 0
72 #define SDMA_PKT_HEADER_op_mask 0x000000FF
73 #define SDMA_PKT_HEADER_op_shift 0
74 #define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift)
75 #define SDMA_OPCODE_CONSTANT_FILL 11
76 # define SDMA_CONSTANT_FILL_EXTRA_SIZE(x) ((x) << 14)
80 #define SDMA_PACKET(op, sub_op, e) ((((e) & 0xFFFF) << 16) | \
81 (((sub_op) & 0xFF) << 8) | \
83 #define SDMA_OPCODE_WRITE 2
84 # define SDMA_WRITE_SUB_OPCODE_LINEAR 0
85 # define SDMA_WRTIE_SUB_OPCODE_TILED 1
87 #define SDMA_OPCODE_COPY 1
88 # define SDMA_COPY_SUB_OPCODE_LINEAR 0
90 #define GFX_COMPUTE_NOP 0xffff1000
94 #define PACKET_TYPE0 0
95 #define PACKET_TYPE1 1
96 #define PACKET_TYPE2 2
97 #define PACKET_TYPE3 3
99 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
100 #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
101 #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
102 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
103 #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
105 ((n) & 0x3FFF) << 16)
106 #define CP_PACKET2 0x80000000
107 #define PACKET2_PAD_SHIFT 0
108 #define PACKET2_PAD_MASK (0x3fffffff << 0)
110 #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
112 #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
113 (((op) & 0xFF) << 8) | \
114 ((n) & 0x3FFF) << 16)
117 #define PACKET3_NOP 0x10
119 #define PACKET3_WRITE_DATA 0x37
120 #define WRITE_DATA_DST_SEL(x) ((x) << 8)
122 * 1 - memory (sync - via GRBM)
126 * 5 - memory (async - direct)
128 #define WR_ONE_ADDR (1 << 16)
129 #define WR_CONFIRM (1 << 20)
130 #define WRITE_DATA_CACHE_POLICY(x) ((x) << 25)
134 #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
140 #define PACKET3_DMA_DATA 0x50
143 * 3. SRC_ADDR_LO or DATA [31:0]
144 * 4. SRC_ADDR_HI [31:0]
145 * 5. DST_ADDR_LO [31:0]
146 * 6. DST_ADDR_HI [7:0]
147 * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
150 # define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0)
154 # define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
159 # define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
160 # define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20)
161 /* 0 - DST_ADDR using DAS
163 * 3 - DST_ADDR using L2
165 # define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
170 # define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
171 # define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29)
172 /* 0 - SRC_ADDR using SAS
175 * 3 - SRC_ADDR using L2
177 # define PACKET3_DMA_DATA_CP_SYNC (1 << 31)
179 # define PACKET3_DMA_DATA_DIS_WC (1 << 21)
180 # define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
186 # define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
192 # define PACKET3_DMA_DATA_CMD_SAS (1 << 26)
196 # define PACKET3_DMA_DATA_CMD_DAS (1 << 27)
200 # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
201 # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
202 # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
204 #define SDMA_PACKET_SI(op, b, t, s, cnt) ((((op) & 0xF) << 28) | \
205 (((b) & 0x1) << 26) | \
206 (((t) & 0x1) << 23) | \
207 (((s) & 0x1) << 22) | \
208 (((cnt) & 0xFFFFF) << 0))
209 #define SDMA_OPCODE_COPY_SI 3
210 #define SDMA_OPCODE_CONSTANT_FILL_SI 13
211 #define SDMA_NOP_SI 0xf
212 #define GFX_COMPUTE_NOP_SI 0x80000000
213 #define PACKET3_DMA_DATA_SI 0x41
214 # define PACKET3_DMA_DATA_SI_ENGINE(x) ((x) << 27)
218 # define PACKET3_DMA_DATA_SI_DST_SEL(x) ((x) << 20)
219 /* 0 - DST_ADDR using DAS
221 * 3 - DST_ADDR using L2
223 # define PACKET3_DMA_DATA_SI_SRC_SEL(x) ((x) << 29)
224 /* 0 - SRC_ADDR using SAS
227 * 3 - SRC_ADDR using L2
229 # define PACKET3_DMA_DATA_SI_CP_SYNC (1 << 31)
232 #define PKT3_CONTEXT_CONTROL 0x28
233 #define CONTEXT_CONTROL_LOAD_ENABLE(x) (((unsigned)(x) & 0x1) << 31)
234 #define CONTEXT_CONTROL_LOAD_CE_RAM(x) (((unsigned)(x) & 0x1) << 28)
235 #define CONTEXT_CONTROL_SHADOW_ENABLE(x) (((unsigned)(x) & 0x1) << 31)
237 #define PKT3_CLEAR_STATE 0x12
239 #define PKT3_SET_SH_REG 0x76
240 #define PACKET3_SET_SH_REG_START 0x00002c00
242 #define PACKET3_DISPATCH_DIRECT 0x15
246 #define mmCOMPUTE_PGM_LO 0x2e0c
247 #define mmCOMPUTE_PGM_RSRC1 0x2e12
248 #define mmCOMPUTE_TMPRING_SIZE 0x2e18
249 #define mmCOMPUTE_USER_DATA_0 0x2e40
250 #define mmCOMPUTE_USER_DATA_1 0x2e41
251 #define mmCOMPUTE_RESOURCE_LIMITS 0x2e15
252 #define mmCOMPUTE_NUM_THREAD_X 0x2e07
256 #define SWAP_32(num) (((num & 0xff000000) >> 24) | \
257 ((num & 0x0000ff00) << 8) | \
258 ((num & 0x00ff0000) >> 8) | \
259 ((num & 0x000000ff) << 24))
266 float x = some_input;
267 for (unsigned i = 0; i < 1000000; i++)
274 static uint32_t shader_bin[] = {
275 SWAP_32(0x800082be), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0x040085bf),
276 SWAP_32(0x02810281), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0xfcff84bf),
277 SWAP_32(0xff0083be), SWAP_32(0x00f00000), SWAP_32(0xc10082be), SWAP_32(0xaa02007e),
278 SWAP_32(0x000070e0), SWAP_32(0x00000080), SWAP_32(0x000081bf)
281 #define CODE_OFFSET 512
282 #define DATA_OFFSET 1024
285 int suite_basic_tests_init(void)
287 struct amdgpu_gpu_info gpu_info = {0};
290 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
291 &minor_version, &device_handle);
294 if ((r == -EACCES) && (errno == EACCES))
295 printf("\n\nError:%s. "
296 "Hint:Try to run this test program as root.",
298 return CUE_SINIT_FAILED;
301 r = amdgpu_query_gpu_info(device_handle, &gpu_info);
303 return CUE_SINIT_FAILED;
305 family_id = gpu_info.family_id;
310 int suite_basic_tests_clean(void)
312 int r = amdgpu_device_deinitialize(device_handle);
317 return CUE_SCLEAN_FAILED;
320 static void amdgpu_query_info_test(void)
322 struct amdgpu_gpu_info gpu_info = {0};
323 uint32_t version, feature;
326 r = amdgpu_query_gpu_info(device_handle, &gpu_info);
327 CU_ASSERT_EQUAL(r, 0);
329 r = amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
330 0, &version, &feature);
331 CU_ASSERT_EQUAL(r, 0);
334 static void amdgpu_command_submission_gfx_separate_ibs(void)
336 amdgpu_context_handle context_handle;
337 amdgpu_bo_handle ib_result_handle, ib_result_ce_handle;
338 void *ib_result_cpu, *ib_result_ce_cpu;
339 uint64_t ib_result_mc_address, ib_result_ce_mc_address;
340 struct amdgpu_cs_request ibs_request = {0};
341 struct amdgpu_cs_ib_info ib_info[2];
342 struct amdgpu_cs_fence fence_status = {0};
345 amdgpu_bo_list_handle bo_list;
346 amdgpu_va_handle va_handle, va_handle_ce;
349 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
350 CU_ASSERT_EQUAL(r, 0);
352 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
353 AMDGPU_GEM_DOMAIN_GTT, 0,
354 &ib_result_handle, &ib_result_cpu,
355 &ib_result_mc_address, &va_handle);
356 CU_ASSERT_EQUAL(r, 0);
358 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
359 AMDGPU_GEM_DOMAIN_GTT, 0,
360 &ib_result_ce_handle, &ib_result_ce_cpu,
361 &ib_result_ce_mc_address, &va_handle_ce);
362 CU_ASSERT_EQUAL(r, 0);
364 r = amdgpu_get_bo_list(device_handle, ib_result_handle,
365 ib_result_ce_handle, &bo_list);
366 CU_ASSERT_EQUAL(r, 0);
368 memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
370 /* IT_SET_CE_DE_COUNTERS */
371 ptr = ib_result_ce_cpu;
372 if (family_id != AMDGPU_FAMILY_SI) {
373 ptr[i++] = 0xc0008900;
376 ptr[i++] = 0xc0008400;
378 ib_info[0].ib_mc_address = ib_result_ce_mc_address;
380 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
382 /* IT_WAIT_ON_CE_COUNTER */
386 ib_info[1].ib_mc_address = ib_result_mc_address;
389 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
390 ibs_request.number_of_ibs = 2;
391 ibs_request.ibs = ib_info;
392 ibs_request.resources = bo_list;
393 ibs_request.fence_info.handle = NULL;
395 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
397 CU_ASSERT_EQUAL(r, 0);
399 fence_status.context = context_handle;
400 fence_status.ip_type = AMDGPU_HW_IP_GFX;
401 fence_status.ip_instance = 0;
402 fence_status.fence = ibs_request.seq_no;
404 r = amdgpu_cs_query_fence_status(&fence_status,
405 AMDGPU_TIMEOUT_INFINITE,
407 CU_ASSERT_EQUAL(r, 0);
409 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
410 ib_result_mc_address, 4096);
411 CU_ASSERT_EQUAL(r, 0);
413 r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
414 ib_result_ce_mc_address, 4096);
415 CU_ASSERT_EQUAL(r, 0);
417 r = amdgpu_bo_list_destroy(bo_list);
418 CU_ASSERT_EQUAL(r, 0);
420 r = amdgpu_cs_ctx_free(context_handle);
421 CU_ASSERT_EQUAL(r, 0);
425 static void amdgpu_command_submission_gfx_shared_ib(void)
427 amdgpu_context_handle context_handle;
428 amdgpu_bo_handle ib_result_handle;
430 uint64_t ib_result_mc_address;
431 struct amdgpu_cs_request ibs_request = {0};
432 struct amdgpu_cs_ib_info ib_info[2];
433 struct amdgpu_cs_fence fence_status = {0};
436 amdgpu_bo_list_handle bo_list;
437 amdgpu_va_handle va_handle;
440 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
441 CU_ASSERT_EQUAL(r, 0);
443 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
444 AMDGPU_GEM_DOMAIN_GTT, 0,
445 &ib_result_handle, &ib_result_cpu,
446 &ib_result_mc_address, &va_handle);
447 CU_ASSERT_EQUAL(r, 0);
449 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
451 CU_ASSERT_EQUAL(r, 0);
453 memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
455 /* IT_SET_CE_DE_COUNTERS */
457 if (family_id != AMDGPU_FAMILY_SI) {
458 ptr[i++] = 0xc0008900;
461 ptr[i++] = 0xc0008400;
463 ib_info[0].ib_mc_address = ib_result_mc_address;
465 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
467 ptr = (uint32_t *)ib_result_cpu + 4;
470 ib_info[1].ib_mc_address = ib_result_mc_address + 16;
473 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
474 ibs_request.number_of_ibs = 2;
475 ibs_request.ibs = ib_info;
476 ibs_request.resources = bo_list;
477 ibs_request.fence_info.handle = NULL;
479 r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
481 CU_ASSERT_EQUAL(r, 0);
483 fence_status.context = context_handle;
484 fence_status.ip_type = AMDGPU_HW_IP_GFX;
485 fence_status.ip_instance = 0;
486 fence_status.fence = ibs_request.seq_no;
488 r = amdgpu_cs_query_fence_status(&fence_status,
489 AMDGPU_TIMEOUT_INFINITE,
491 CU_ASSERT_EQUAL(r, 0);
493 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
494 ib_result_mc_address, 4096);
495 CU_ASSERT_EQUAL(r, 0);
497 r = amdgpu_bo_list_destroy(bo_list);
498 CU_ASSERT_EQUAL(r, 0);
500 r = amdgpu_cs_ctx_free(context_handle);
501 CU_ASSERT_EQUAL(r, 0);
504 static void amdgpu_command_submission_gfx_cp_write_data(void)
506 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_GFX);
509 static void amdgpu_command_submission_gfx_cp_const_fill(void)
511 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_GFX);
514 static void amdgpu_command_submission_gfx_cp_copy_data(void)
516 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX);
519 static void amdgpu_command_submission_gfx(void)
521 /* write data using the CP */
522 amdgpu_command_submission_gfx_cp_write_data();
523 /* const fill using the CP */
524 amdgpu_command_submission_gfx_cp_const_fill();
525 /* copy data using the CP */
526 amdgpu_command_submission_gfx_cp_copy_data();
527 /* separate IB buffers for multi-IB submission */
528 amdgpu_command_submission_gfx_separate_ibs();
529 /* shared IB buffer for multi-IB submission */
530 amdgpu_command_submission_gfx_shared_ib();
533 static void amdgpu_semaphore_test(void)
535 amdgpu_context_handle context_handle[2];
536 amdgpu_semaphore_handle sem;
537 amdgpu_bo_handle ib_result_handle[2];
538 void *ib_result_cpu[2];
539 uint64_t ib_result_mc_address[2];
540 struct amdgpu_cs_request ibs_request[2] = {0};
541 struct amdgpu_cs_ib_info ib_info[2] = {0};
542 struct amdgpu_cs_fence fence_status = {0};
545 uint32_t sdma_nop, gfx_nop;
546 amdgpu_bo_list_handle bo_list[2];
547 amdgpu_va_handle va_handle[2];
550 if (family_id == AMDGPU_FAMILY_SI) {
551 sdma_nop = SDMA_PACKET_SI(SDMA_NOP_SI, 0, 0, 0, 0);
552 gfx_nop = GFX_COMPUTE_NOP_SI;
554 sdma_nop = SDMA_PKT_HEADER_OP(SDMA_NOP);
555 gfx_nop = GFX_COMPUTE_NOP;
558 r = amdgpu_cs_create_semaphore(&sem);
559 CU_ASSERT_EQUAL(r, 0);
560 for (i = 0; i < 2; i++) {
561 r = amdgpu_cs_ctx_create(device_handle, &context_handle[i]);
562 CU_ASSERT_EQUAL(r, 0);
564 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
565 AMDGPU_GEM_DOMAIN_GTT, 0,
566 &ib_result_handle[i], &ib_result_cpu[i],
567 &ib_result_mc_address[i], &va_handle[i]);
568 CU_ASSERT_EQUAL(r, 0);
570 r = amdgpu_get_bo_list(device_handle, ib_result_handle[i],
572 CU_ASSERT_EQUAL(r, 0);
575 /* 1. same context different engine */
576 ptr = ib_result_cpu[0];
578 ib_info[0].ib_mc_address = ib_result_mc_address[0];
581 ibs_request[0].ip_type = AMDGPU_HW_IP_DMA;
582 ibs_request[0].number_of_ibs = 1;
583 ibs_request[0].ibs = &ib_info[0];
584 ibs_request[0].resources = bo_list[0];
585 ibs_request[0].fence_info.handle = NULL;
586 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
587 CU_ASSERT_EQUAL(r, 0);
588 r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem);
589 CU_ASSERT_EQUAL(r, 0);
591 r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
592 CU_ASSERT_EQUAL(r, 0);
593 ptr = ib_result_cpu[1];
595 ib_info[1].ib_mc_address = ib_result_mc_address[1];
598 ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
599 ibs_request[1].number_of_ibs = 1;
600 ibs_request[1].ibs = &ib_info[1];
601 ibs_request[1].resources = bo_list[1];
602 ibs_request[1].fence_info.handle = NULL;
604 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[1], 1);
605 CU_ASSERT_EQUAL(r, 0);
607 fence_status.context = context_handle[0];
608 fence_status.ip_type = AMDGPU_HW_IP_GFX;
609 fence_status.ip_instance = 0;
610 fence_status.fence = ibs_request[1].seq_no;
611 r = amdgpu_cs_query_fence_status(&fence_status,
612 500000000, 0, &expired);
613 CU_ASSERT_EQUAL(r, 0);
614 CU_ASSERT_EQUAL(expired, true);
616 /* 2. same engine different context */
617 ptr = ib_result_cpu[0];
619 ib_info[0].ib_mc_address = ib_result_mc_address[0];
622 ibs_request[0].ip_type = AMDGPU_HW_IP_GFX;
623 ibs_request[0].number_of_ibs = 1;
624 ibs_request[0].ibs = &ib_info[0];
625 ibs_request[0].resources = bo_list[0];
626 ibs_request[0].fence_info.handle = NULL;
627 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
628 CU_ASSERT_EQUAL(r, 0);
629 r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
630 CU_ASSERT_EQUAL(r, 0);
632 r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
633 CU_ASSERT_EQUAL(r, 0);
634 ptr = ib_result_cpu[1];
636 ib_info[1].ib_mc_address = ib_result_mc_address[1];
639 ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
640 ibs_request[1].number_of_ibs = 1;
641 ibs_request[1].ibs = &ib_info[1];
642 ibs_request[1].resources = bo_list[1];
643 ibs_request[1].fence_info.handle = NULL;
644 r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request[1], 1);
646 CU_ASSERT_EQUAL(r, 0);
648 fence_status.context = context_handle[1];
649 fence_status.ip_type = AMDGPU_HW_IP_GFX;
650 fence_status.ip_instance = 0;
651 fence_status.fence = ibs_request[1].seq_no;
652 r = amdgpu_cs_query_fence_status(&fence_status,
653 500000000, 0, &expired);
654 CU_ASSERT_EQUAL(r, 0);
655 CU_ASSERT_EQUAL(expired, true);
657 for (i = 0; i < 2; i++) {
658 r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i],
659 ib_result_mc_address[i], 4096);
660 CU_ASSERT_EQUAL(r, 0);
662 r = amdgpu_bo_list_destroy(bo_list[i]);
663 CU_ASSERT_EQUAL(r, 0);
665 r = amdgpu_cs_ctx_free(context_handle[i]);
666 CU_ASSERT_EQUAL(r, 0);
669 r = amdgpu_cs_destroy_semaphore(sem);
670 CU_ASSERT_EQUAL(r, 0);
673 static void amdgpu_command_submission_compute_nop(void)
675 amdgpu_context_handle context_handle;
676 amdgpu_bo_handle ib_result_handle;
678 uint64_t ib_result_mc_address;
679 struct amdgpu_cs_request ibs_request;
680 struct amdgpu_cs_ib_info ib_info;
681 struct amdgpu_cs_fence fence_status;
685 amdgpu_bo_list_handle bo_list;
686 amdgpu_va_handle va_handle;
687 struct drm_amdgpu_info_hw_ip info;
689 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_COMPUTE, 0, &info);
690 CU_ASSERT_EQUAL(r, 0);
692 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
693 CU_ASSERT_EQUAL(r, 0);
695 for (instance = 0; (1 << instance) & info.available_rings; instance++) {
696 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
697 AMDGPU_GEM_DOMAIN_GTT, 0,
698 &ib_result_handle, &ib_result_cpu,
699 &ib_result_mc_address, &va_handle);
700 CU_ASSERT_EQUAL(r, 0);
702 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
704 CU_ASSERT_EQUAL(r, 0);
708 ptr[0]=PACKET3(PACKET3_NOP, 14);
710 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
711 ib_info.ib_mc_address = ib_result_mc_address;
714 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
715 ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE;
716 ibs_request.ring = instance;
717 ibs_request.number_of_ibs = 1;
718 ibs_request.ibs = &ib_info;
719 ibs_request.resources = bo_list;
720 ibs_request.fence_info.handle = NULL;
722 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
723 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
724 CU_ASSERT_EQUAL(r, 0);
726 fence_status.context = context_handle;
727 fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
728 fence_status.ip_instance = 0;
729 fence_status.ring = instance;
730 fence_status.fence = ibs_request.seq_no;
732 r = amdgpu_cs_query_fence_status(&fence_status,
733 AMDGPU_TIMEOUT_INFINITE,
735 CU_ASSERT_EQUAL(r, 0);
737 r = amdgpu_bo_list_destroy(bo_list);
738 CU_ASSERT_EQUAL(r, 0);
740 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
741 ib_result_mc_address, 4096);
742 CU_ASSERT_EQUAL(r, 0);
745 r = amdgpu_cs_ctx_free(context_handle);
746 CU_ASSERT_EQUAL(r, 0);
749 static void amdgpu_command_submission_compute_cp_write_data(void)
751 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_COMPUTE);
754 static void amdgpu_command_submission_compute_cp_const_fill(void)
756 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_COMPUTE);
759 static void amdgpu_command_submission_compute_cp_copy_data(void)
761 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_COMPUTE);
764 static void amdgpu_command_submission_compute(void)
766 /* write data using the CP */
767 amdgpu_command_submission_compute_cp_write_data();
768 /* const fill using the CP */
769 amdgpu_command_submission_compute_cp_const_fill();
770 /* copy data using the CP */
771 amdgpu_command_submission_compute_cp_copy_data();
773 amdgpu_command_submission_compute_nop();
777 * caller need create/release:
778 * pm4_src, resources, ib_info, and ibs_request
779 * submit command stream described in ibs_request and wait for this IB accomplished
781 static void amdgpu_test_exec_cs_helper(amdgpu_context_handle context_handle,
783 int instance, int pm4_dw, uint32_t *pm4_src,
784 int res_cnt, amdgpu_bo_handle *resources,
785 struct amdgpu_cs_ib_info *ib_info,
786 struct amdgpu_cs_request *ibs_request)
791 amdgpu_bo_handle ib_result_handle;
793 uint64_t ib_result_mc_address;
794 struct amdgpu_cs_fence fence_status = {0};
795 amdgpu_bo_handle *all_res = alloca(sizeof(resources[0]) * (res_cnt + 1));
796 amdgpu_va_handle va_handle;
799 CU_ASSERT_NOT_EQUAL(pm4_src, NULL);
800 CU_ASSERT_NOT_EQUAL(resources, NULL);
801 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
802 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
803 CU_ASSERT_TRUE(pm4_dw <= 1024);
806 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
807 AMDGPU_GEM_DOMAIN_GTT, 0,
808 &ib_result_handle, &ib_result_cpu,
809 &ib_result_mc_address, &va_handle);
810 CU_ASSERT_EQUAL(r, 0);
812 /* copy PM4 packet to ring from caller */
813 ring_ptr = ib_result_cpu;
814 memcpy(ring_ptr, pm4_src, pm4_dw * sizeof(*pm4_src));
816 ib_info->ib_mc_address = ib_result_mc_address;
817 ib_info->size = pm4_dw;
819 ibs_request->ip_type = ip_type;
820 ibs_request->ring = instance;
821 ibs_request->number_of_ibs = 1;
822 ibs_request->ibs = ib_info;
823 ibs_request->fence_info.handle = NULL;
825 memcpy(all_res, resources, sizeof(resources[0]) * res_cnt);
826 all_res[res_cnt] = ib_result_handle;
828 r = amdgpu_bo_list_create(device_handle, res_cnt+1, all_res,
829 NULL, &ibs_request->resources);
830 CU_ASSERT_EQUAL(r, 0);
832 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
835 r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1);
836 CU_ASSERT_EQUAL(r, 0);
838 r = amdgpu_bo_list_destroy(ibs_request->resources);
839 CU_ASSERT_EQUAL(r, 0);
841 fence_status.ip_type = ip_type;
842 fence_status.ip_instance = 0;
843 fence_status.ring = ibs_request->ring;
844 fence_status.context = context_handle;
845 fence_status.fence = ibs_request->seq_no;
847 /* wait for IB accomplished */
848 r = amdgpu_cs_query_fence_status(&fence_status,
849 AMDGPU_TIMEOUT_INFINITE,
851 CU_ASSERT_EQUAL(r, 0);
852 CU_ASSERT_EQUAL(expired, true);
854 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
855 ib_result_mc_address, 4096);
856 CU_ASSERT_EQUAL(r, 0);
859 static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
861 const int sdma_write_length = 128;
862 const int pm4_dw = 256;
863 amdgpu_context_handle context_handle;
865 amdgpu_bo_handle *resources;
867 struct amdgpu_cs_ib_info *ib_info;
868 struct amdgpu_cs_request *ibs_request;
870 volatile uint32_t *bo_cpu;
871 int i, j, r, loop, ring_id;
872 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
873 amdgpu_va_handle va_handle;
874 struct drm_amdgpu_info_hw_ip hw_ip_info;
876 pm4 = calloc(pm4_dw, sizeof(*pm4));
877 CU_ASSERT_NOT_EQUAL(pm4, NULL);
879 ib_info = calloc(1, sizeof(*ib_info));
880 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
882 ibs_request = calloc(1, sizeof(*ibs_request));
883 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
885 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
886 CU_ASSERT_EQUAL(r, 0);
888 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
889 CU_ASSERT_EQUAL(r, 0);
891 /* prepare resource */
892 resources = calloc(1, sizeof(amdgpu_bo_handle));
893 CU_ASSERT_NOT_EQUAL(resources, NULL);
895 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
898 /* allocate UC bo for sDMA use */
899 r = amdgpu_bo_alloc_and_map(device_handle,
900 sdma_write_length * sizeof(uint32_t),
901 4096, AMDGPU_GEM_DOMAIN_GTT,
902 gtt_flags[loop], &bo, (void**)&bo_cpu,
904 CU_ASSERT_EQUAL(r, 0);
907 memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t));
911 /* fulfill PM4: test DMA write-linear */
913 if (ip_type == AMDGPU_HW_IP_DMA) {
914 if (family_id == AMDGPU_FAMILY_SI)
915 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
918 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
919 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
920 pm4[i++] = 0xffffffff & bo_mc;
921 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
922 if (family_id >= AMDGPU_FAMILY_AI)
923 pm4[i++] = sdma_write_length - 1;
924 else if (family_id != AMDGPU_FAMILY_SI)
925 pm4[i++] = sdma_write_length;
926 while(j++ < sdma_write_length)
927 pm4[i++] = 0xdeadbeaf;
928 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
929 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
930 pm4[i++] = PACKET3(PACKET3_WRITE_DATA, 2 + sdma_write_length);
931 pm4[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
932 pm4[i++] = 0xfffffffc & bo_mc;
933 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
934 while(j++ < sdma_write_length)
935 pm4[i++] = 0xdeadbeaf;
938 amdgpu_test_exec_cs_helper(context_handle,
942 ib_info, ibs_request);
944 /* verify if SDMA test result meets with expected */
946 while(i < sdma_write_length) {
947 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
950 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
951 sdma_write_length * sizeof(uint32_t));
952 CU_ASSERT_EQUAL(r, 0);
956 /* clean resources */
963 r = amdgpu_cs_ctx_free(context_handle);
964 CU_ASSERT_EQUAL(r, 0);
967 static void amdgpu_command_submission_sdma_write_linear(void)
969 amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_DMA);
972 static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
974 const int sdma_write_length = 1024 * 1024;
975 const int pm4_dw = 256;
976 amdgpu_context_handle context_handle;
978 amdgpu_bo_handle *resources;
980 struct amdgpu_cs_ib_info *ib_info;
981 struct amdgpu_cs_request *ibs_request;
983 volatile uint32_t *bo_cpu;
984 int i, j, r, loop, ring_id;
985 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
986 amdgpu_va_handle va_handle;
987 struct drm_amdgpu_info_hw_ip hw_ip_info;
989 pm4 = calloc(pm4_dw, sizeof(*pm4));
990 CU_ASSERT_NOT_EQUAL(pm4, NULL);
992 ib_info = calloc(1, sizeof(*ib_info));
993 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
995 ibs_request = calloc(1, sizeof(*ibs_request));
996 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
998 r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
999 CU_ASSERT_EQUAL(r, 0);
1001 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1002 CU_ASSERT_EQUAL(r, 0);
1004 /* prepare resource */
1005 resources = calloc(1, sizeof(amdgpu_bo_handle));
1006 CU_ASSERT_NOT_EQUAL(resources, NULL);
1008 for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
1011 /* allocate UC bo for sDMA use */
1012 r = amdgpu_bo_alloc_and_map(device_handle,
1013 sdma_write_length, 4096,
1014 AMDGPU_GEM_DOMAIN_GTT,
1015 gtt_flags[loop], &bo, (void**)&bo_cpu,
1016 &bo_mc, &va_handle);
1017 CU_ASSERT_EQUAL(r, 0);
1020 memset((void*)bo_cpu, 0, sdma_write_length);
1024 /* fulfill PM4: test DMA const fill */
1026 if (ip_type == AMDGPU_HW_IP_DMA) {
1027 if (family_id == AMDGPU_FAMILY_SI) {
1028 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_CONSTANT_FILL_SI,
1030 sdma_write_length / 4);
1031 pm4[i++] = 0xfffffffc & bo_mc;
1032 pm4[i++] = 0xdeadbeaf;
1033 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 16;
1035 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
1036 SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
1037 pm4[i++] = 0xffffffff & bo_mc;
1038 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1039 pm4[i++] = 0xdeadbeaf;
1040 if (family_id >= AMDGPU_FAMILY_AI)
1041 pm4[i++] = sdma_write_length - 1;
1043 pm4[i++] = sdma_write_length;
1045 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1046 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1047 if (family_id == AMDGPU_FAMILY_SI) {
1048 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1049 pm4[i++] = 0xdeadbeaf;
1050 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1051 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1052 PACKET3_DMA_DATA_SI_SRC_SEL(2) |
1053 PACKET3_DMA_DATA_SI_CP_SYNC;
1054 pm4[i++] = 0xffffffff & bo_mc;
1055 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1056 pm4[i++] = sdma_write_length;
1058 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1059 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1060 PACKET3_DMA_DATA_DST_SEL(0) |
1061 PACKET3_DMA_DATA_SRC_SEL(2) |
1062 PACKET3_DMA_DATA_CP_SYNC;
1063 pm4[i++] = 0xdeadbeaf;
1065 pm4[i++] = 0xfffffffc & bo_mc;
1066 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1067 pm4[i++] = sdma_write_length;
1071 amdgpu_test_exec_cs_helper(context_handle,
1075 ib_info, ibs_request);
1077 /* verify if SDMA test result meets with expected */
1079 while(i < (sdma_write_length / 4)) {
1080 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
1083 r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
1085 CU_ASSERT_EQUAL(r, 0);
1089 /* clean resources */
1096 r = amdgpu_cs_ctx_free(context_handle);
1097 CU_ASSERT_EQUAL(r, 0);
1100 static void amdgpu_command_submission_sdma_const_fill(void)
1102 amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_DMA);
1105 static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1107 const int sdma_write_length = 1024;
1108 const int pm4_dw = 256;
1109 amdgpu_context_handle context_handle;
1110 amdgpu_bo_handle bo1, bo2;
1111 amdgpu_bo_handle *resources;
1113 struct amdgpu_cs_ib_info *ib_info;
1114 struct amdgpu_cs_request *ibs_request;
1115 uint64_t bo1_mc, bo2_mc;
1116 volatile unsigned char *bo1_cpu, *bo2_cpu;
1117 int i, j, r, loop1, loop2;
1118 uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
1119 amdgpu_va_handle bo1_va_handle, bo2_va_handle;
1121 pm4 = calloc(pm4_dw, sizeof(*pm4));
1122 CU_ASSERT_NOT_EQUAL(pm4, NULL);
1124 ib_info = calloc(1, sizeof(*ib_info));
1125 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
1127 ibs_request = calloc(1, sizeof(*ibs_request));
1128 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1130 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1131 CU_ASSERT_EQUAL(r, 0);
1133 /* prepare resource */
1134 resources = calloc(2, sizeof(amdgpu_bo_handle));
1135 CU_ASSERT_NOT_EQUAL(resources, NULL);
1138 /* run 9 circle to test all mapping combination */
1141 /* allocate UC bo1for sDMA use */
1142 r = amdgpu_bo_alloc_and_map(device_handle,
1143 sdma_write_length, 4096,
1144 AMDGPU_GEM_DOMAIN_GTT,
1145 gtt_flags[loop1], &bo1,
1146 (void**)&bo1_cpu, &bo1_mc,
1148 CU_ASSERT_EQUAL(r, 0);
1151 memset((void*)bo1_cpu, 0xaa, sdma_write_length);
1153 /* allocate UC bo2 for sDMA use */
1154 r = amdgpu_bo_alloc_and_map(device_handle,
1155 sdma_write_length, 4096,
1156 AMDGPU_GEM_DOMAIN_GTT,
1157 gtt_flags[loop2], &bo2,
1158 (void**)&bo2_cpu, &bo2_mc,
1160 CU_ASSERT_EQUAL(r, 0);
1163 memset((void*)bo2_cpu, 0, sdma_write_length);
1168 /* fulfill PM4: test DMA copy linear */
1170 if (ip_type == AMDGPU_HW_IP_DMA) {
1171 if (family_id == AMDGPU_FAMILY_SI) {
1172 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0,
1174 pm4[i++] = 0xffffffff & bo2_mc;
1175 pm4[i++] = 0xffffffff & bo1_mc;
1176 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1177 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1179 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1180 if (family_id >= AMDGPU_FAMILY_AI)
1181 pm4[i++] = sdma_write_length - 1;
1183 pm4[i++] = sdma_write_length;
1185 pm4[i++] = 0xffffffff & bo1_mc;
1186 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1187 pm4[i++] = 0xffffffff & bo2_mc;
1188 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1191 } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1192 (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1193 if (family_id == AMDGPU_FAMILY_SI) {
1194 pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1195 pm4[i++] = 0xfffffffc & bo1_mc;
1196 pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1197 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1198 PACKET3_DMA_DATA_SI_SRC_SEL(0) |
1199 PACKET3_DMA_DATA_SI_CP_SYNC |
1200 (0xffff00000000 & bo1_mc) >> 32;
1201 pm4[i++] = 0xfffffffc & bo2_mc;
1202 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1203 pm4[i++] = sdma_write_length;
1205 pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1206 pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1207 PACKET3_DMA_DATA_DST_SEL(0) |
1208 PACKET3_DMA_DATA_SRC_SEL(0) |
1209 PACKET3_DMA_DATA_CP_SYNC;
1210 pm4[i++] = 0xfffffffc & bo1_mc;
1211 pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1212 pm4[i++] = 0xfffffffc & bo2_mc;
1213 pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1214 pm4[i++] = sdma_write_length;
1218 amdgpu_test_exec_cs_helper(context_handle,
1222 ib_info, ibs_request);
1224 /* verify if SDMA test result meets with expected */
1226 while(i < sdma_write_length) {
1227 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
1229 r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
1231 CU_ASSERT_EQUAL(r, 0);
1232 r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1234 CU_ASSERT_EQUAL(r, 0);
1239 /* clean resources */
1246 r = amdgpu_cs_ctx_free(context_handle);
1247 CU_ASSERT_EQUAL(r, 0);
1250 static void amdgpu_command_submission_sdma_copy_linear(void)
1252 amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_DMA);
1255 static void amdgpu_command_submission_sdma(void)
1257 amdgpu_command_submission_sdma_write_linear();
1258 amdgpu_command_submission_sdma_const_fill();
1259 amdgpu_command_submission_sdma_copy_linear();
1262 static void amdgpu_command_submission_multi_fence_wait_all(bool wait_all)
1264 amdgpu_context_handle context_handle;
1265 amdgpu_bo_handle ib_result_handle, ib_result_ce_handle;
1266 void *ib_result_cpu, *ib_result_ce_cpu;
1267 uint64_t ib_result_mc_address, ib_result_ce_mc_address;
1268 struct amdgpu_cs_request ibs_request[2] = {0};
1269 struct amdgpu_cs_ib_info ib_info[2];
1270 struct amdgpu_cs_fence fence_status[2] = {0};
1273 amdgpu_bo_list_handle bo_list;
1274 amdgpu_va_handle va_handle, va_handle_ce;
1276 int i = 0, ib_cs_num = 2;
1278 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1279 CU_ASSERT_EQUAL(r, 0);
1281 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1282 AMDGPU_GEM_DOMAIN_GTT, 0,
1283 &ib_result_handle, &ib_result_cpu,
1284 &ib_result_mc_address, &va_handle);
1285 CU_ASSERT_EQUAL(r, 0);
1287 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1288 AMDGPU_GEM_DOMAIN_GTT, 0,
1289 &ib_result_ce_handle, &ib_result_ce_cpu,
1290 &ib_result_ce_mc_address, &va_handle_ce);
1291 CU_ASSERT_EQUAL(r, 0);
1293 r = amdgpu_get_bo_list(device_handle, ib_result_handle,
1294 ib_result_ce_handle, &bo_list);
1295 CU_ASSERT_EQUAL(r, 0);
1297 memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
1299 /* IT_SET_CE_DE_COUNTERS */
1300 ptr = ib_result_ce_cpu;
1301 if (family_id != AMDGPU_FAMILY_SI) {
1302 ptr[i++] = 0xc0008900;
1305 ptr[i++] = 0xc0008400;
1307 ib_info[0].ib_mc_address = ib_result_ce_mc_address;
1308 ib_info[0].size = i;
1309 ib_info[0].flags = AMDGPU_IB_FLAG_CE;
1311 /* IT_WAIT_ON_CE_COUNTER */
1312 ptr = ib_result_cpu;
1313 ptr[0] = 0xc0008600;
1314 ptr[1] = 0x00000001;
1315 ib_info[1].ib_mc_address = ib_result_mc_address;
1316 ib_info[1].size = 2;
1318 for (i = 0; i < ib_cs_num; i++) {
1319 ibs_request[i].ip_type = AMDGPU_HW_IP_GFX;
1320 ibs_request[i].number_of_ibs = 2;
1321 ibs_request[i].ibs = ib_info;
1322 ibs_request[i].resources = bo_list;
1323 ibs_request[i].fence_info.handle = NULL;
1326 r = amdgpu_cs_submit(context_handle, 0,ibs_request, ib_cs_num);
1328 CU_ASSERT_EQUAL(r, 0);
1330 for (i = 0; i < ib_cs_num; i++) {
1331 fence_status[i].context = context_handle;
1332 fence_status[i].ip_type = AMDGPU_HW_IP_GFX;
1333 fence_status[i].fence = ibs_request[i].seq_no;
1336 r = amdgpu_cs_wait_fences(fence_status, ib_cs_num, wait_all,
1337 AMDGPU_TIMEOUT_INFINITE,
1339 CU_ASSERT_EQUAL(r, 0);
1341 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1342 ib_result_mc_address, 4096);
1343 CU_ASSERT_EQUAL(r, 0);
1345 r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
1346 ib_result_ce_mc_address, 4096);
1347 CU_ASSERT_EQUAL(r, 0);
1349 r = amdgpu_bo_list_destroy(bo_list);
1350 CU_ASSERT_EQUAL(r, 0);
1352 r = amdgpu_cs_ctx_free(context_handle);
1353 CU_ASSERT_EQUAL(r, 0);
1356 static void amdgpu_command_submission_multi_fence(void)
1358 amdgpu_command_submission_multi_fence_wait_all(true);
1359 amdgpu_command_submission_multi_fence_wait_all(false);
1362 static void amdgpu_userptr_test(void)
1365 uint32_t *pm4 = NULL;
1369 int sdma_write_length = 4;
1370 amdgpu_bo_handle handle;
1371 amdgpu_context_handle context_handle;
1372 struct amdgpu_cs_ib_info *ib_info;
1373 struct amdgpu_cs_request *ibs_request;
1374 amdgpu_bo_handle buf_handle;
1375 amdgpu_va_handle va_handle;
1377 pm4 = calloc(pm4_dw, sizeof(*pm4));
1378 CU_ASSERT_NOT_EQUAL(pm4, NULL);
1380 ib_info = calloc(1, sizeof(*ib_info));
1381 CU_ASSERT_NOT_EQUAL(ib_info, NULL);
1383 ibs_request = calloc(1, sizeof(*ibs_request));
1384 CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1386 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1387 CU_ASSERT_EQUAL(r, 0);
1389 posix_memalign(&ptr, sysconf(_SC_PAGE_SIZE), BUFFER_SIZE);
1390 CU_ASSERT_NOT_EQUAL(ptr, NULL);
1391 memset(ptr, 0, BUFFER_SIZE);
1393 r = amdgpu_create_bo_from_user_mem(device_handle,
1394 ptr, BUFFER_SIZE, &buf_handle);
1395 CU_ASSERT_EQUAL(r, 0);
1397 r = amdgpu_va_range_alloc(device_handle,
1398 amdgpu_gpu_va_range_general,
1399 BUFFER_SIZE, 1, 0, &bo_mc,
1401 CU_ASSERT_EQUAL(r, 0);
1403 r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_MAP);
1404 CU_ASSERT_EQUAL(r, 0);
1406 handle = buf_handle;
1410 if (family_id == AMDGPU_FAMILY_SI)
1411 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
1414 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
1415 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
1416 pm4[i++] = 0xffffffff & bo_mc;
1417 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1418 if (family_id >= AMDGPU_FAMILY_AI)
1419 pm4[i++] = sdma_write_length - 1;
1420 else if (family_id != AMDGPU_FAMILY_SI)
1421 pm4[i++] = sdma_write_length;
1423 while (j++ < sdma_write_length)
1424 pm4[i++] = 0xdeadbeaf;
1431 amdgpu_test_exec_cs_helper(context_handle,
1432 AMDGPU_HW_IP_DMA, 0,
1435 ib_info, ibs_request);
1437 while (i < sdma_write_length) {
1438 CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
1444 r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_UNMAP);
1445 CU_ASSERT_EQUAL(r, 0);
1446 r = amdgpu_va_range_free(va_handle);
1447 CU_ASSERT_EQUAL(r, 0);
1448 r = amdgpu_bo_free(buf_handle);
1449 CU_ASSERT_EQUAL(r, 0);
1452 r = amdgpu_cs_ctx_free(context_handle);
1453 CU_ASSERT_EQUAL(r, 0);
1458 static void amdgpu_sync_dependency_test(void)
1460 amdgpu_context_handle context_handle[2];
1461 amdgpu_bo_handle ib_result_handle;
1462 void *ib_result_cpu;
1463 uint64_t ib_result_mc_address;
1464 struct amdgpu_cs_request ibs_request;
1465 struct amdgpu_cs_ib_info ib_info;
1466 struct amdgpu_cs_fence fence_status;
1468 int i, j, r, instance;
1469 amdgpu_bo_list_handle bo_list;
1470 amdgpu_va_handle va_handle;
1471 static uint32_t *ptr;
1474 r = amdgpu_cs_ctx_create(device_handle, &context_handle[0]);
1475 CU_ASSERT_EQUAL(r, 0);
1476 r = amdgpu_cs_ctx_create(device_handle, &context_handle[1]);
1477 CU_ASSERT_EQUAL(r, 0);
1479 r = amdgpu_bo_alloc_and_map(device_handle, 8192, 4096,
1480 AMDGPU_GEM_DOMAIN_GTT, 0,
1481 &ib_result_handle, &ib_result_cpu,
1482 &ib_result_mc_address, &va_handle);
1483 CU_ASSERT_EQUAL(r, 0);
1485 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
1487 CU_ASSERT_EQUAL(r, 0);
1489 ptr = ib_result_cpu;
1492 memcpy(ptr + CODE_OFFSET , shader_bin, sizeof(shader_bin));
1494 /* Dispatch minimal init config and verify it's executed */
1495 ptr[i++] = PACKET3(PKT3_CONTEXT_CONTROL, 1);
1496 ptr[i++] = 0x80000000;
1497 ptr[i++] = 0x80000000;
1499 ptr[i++] = PACKET3(PKT3_CLEAR_STATE, 0);
1500 ptr[i++] = 0x80000000;
1503 /* Program compute regs */
1504 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1505 ptr[i++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1506 ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 8;
1507 ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 40;
1510 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1511 ptr[i++] = mmCOMPUTE_PGM_RSRC1 - PACKET3_SET_SH_REG_START;
1513 * 002c0040 COMPUTE_PGM_RSRC1 <- VGPRS = 0
1516 FLOAT_MODE = 192 (0xc0)
1525 ptr[i++] = 0x002c0040;
1529 * 00000010 COMPUTE_PGM_RSRC2 <- SCRATCH_EN = 0
1542 ptr[i++] = 0x00000010;
1546 * 00000100 COMPUTE_TMPRING_SIZE <- WAVES = 256 (0x100)
1550 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1551 ptr[i++] = mmCOMPUTE_TMPRING_SIZE - PACKET3_SET_SH_REG_START;
1552 ptr[i++] = 0x00000100;
1554 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1555 ptr[i++] = mmCOMPUTE_USER_DATA_0 - PACKET3_SET_SH_REG_START;
1556 ptr[i++] = 0xffffffff & (ib_result_mc_address + DATA_OFFSET * 4);
1557 ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1559 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1560 ptr[i++] = mmCOMPUTE_RESOURCE_LIMITS - PACKET3_SET_SH_REG_START;
1563 ptr[i++] = PACKET3(PKT3_SET_SH_REG, 3);
1564 ptr[i++] = mmCOMPUTE_NUM_THREAD_X - PACKET3_SET_SH_REG_START;
1571 ptr[i++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1575 ptr[i++] = 0x00000045; /* DISPATCH DIRECT field */
1579 ptr[i++] = 0xffff1000; /* type3 nop packet */
1581 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1582 ib_info.ib_mc_address = ib_result_mc_address;
1585 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1586 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1587 ibs_request.ring = 0;
1588 ibs_request.number_of_ibs = 1;
1589 ibs_request.ibs = &ib_info;
1590 ibs_request.resources = bo_list;
1591 ibs_request.fence_info.handle = NULL;
1593 r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request, 1);
1594 CU_ASSERT_EQUAL(r, 0);
1595 seq_no = ibs_request.seq_no;
1599 /* Prepare second command with dependency on the first */
1601 ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
1602 ptr[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1603 ptr[i++] = 0xfffffffc & ib_result_mc_address + DATA_OFFSET * 4;
1604 ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1608 ptr[i++] = 0xffff1000; /* type3 nop packet */
1610 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1611 ib_info.ib_mc_address = ib_result_mc_address + j * 4;
1612 ib_info.size = i - j;
1614 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1615 ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1616 ibs_request.ring = 0;
1617 ibs_request.number_of_ibs = 1;
1618 ibs_request.ibs = &ib_info;
1619 ibs_request.resources = bo_list;
1620 ibs_request.fence_info.handle = NULL;
1622 ibs_request.number_of_dependencies = 1;
1624 ibs_request.dependencies = calloc(1, sizeof(*ibs_request.dependencies));
1625 ibs_request.dependencies[0].context = context_handle[1];
1626 ibs_request.dependencies[0].ip_instance = 0;
1627 ibs_request.dependencies[0].ring = 0;
1628 ibs_request.dependencies[0].fence = seq_no;
1631 r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request, 1);
1632 CU_ASSERT_EQUAL(r, 0);
1635 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
1636 fence_status.context = context_handle[0];
1637 fence_status.ip_type = AMDGPU_HW_IP_GFX;
1638 fence_status.ip_instance = 0;
1639 fence_status.ring = 0;
1640 fence_status.fence = ibs_request.seq_no;
1642 r = amdgpu_cs_query_fence_status(&fence_status,
1643 AMDGPU_TIMEOUT_INFINITE,0, &expired);
1644 CU_ASSERT_EQUAL(r, 0);
1646 /* Expect the second command to wait for shader to complete */
1647 CU_ASSERT_EQUAL(ptr[DATA_OFFSET], 99);
1649 r = amdgpu_bo_list_destroy(bo_list);
1650 CU_ASSERT_EQUAL(r, 0);
1652 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1653 ib_result_mc_address, 4096);
1654 CU_ASSERT_EQUAL(r, 0);
1656 r = amdgpu_cs_ctx_free(context_handle[0]);
1657 CU_ASSERT_EQUAL(r, 0);
1658 r = amdgpu_cs_ctx_free(context_handle[1]);
1659 CU_ASSERT_EQUAL(r, 0);
1661 free(ibs_request.dependencies);