OSDN Git Service

tests/amdgpu: execute const fill on all the available rings
[android-x86/external-libdrm.git] / tests / amdgpu / basic_tests.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <unistd.h>
31 #ifdef HAVE_ALLOCA_H
32 # include <alloca.h>
33 #endif
34 #include <sys/wait.h>
35
36 #include "CUnit/Basic.h"
37
38 #include "amdgpu_test.h"
39 #include "amdgpu_drm.h"
40
41 static  amdgpu_device_handle device_handle;
42 static  uint32_t  major_version;
43 static  uint32_t  minor_version;
44 static  uint32_t  family_id;
45
46 static void amdgpu_query_info_test(void);
47 static void amdgpu_command_submission_gfx(void);
48 static void amdgpu_command_submission_compute(void);
49 static void amdgpu_command_submission_multi_fence(void);
50 static void amdgpu_command_submission_sdma(void);
51 static void amdgpu_userptr_test(void);
52 static void amdgpu_semaphore_test(void);
53 static void amdgpu_sync_dependency_test(void);
54
55 static void amdgpu_command_submission_write_linear_helper(unsigned ip_type);
56 static void amdgpu_command_submission_const_fill_helper(unsigned ip_type);
57 static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type);
58
59 CU_TestInfo basic_tests[] = {
60         { "Query Info Test",  amdgpu_query_info_test },
61         { "Userptr Test",  amdgpu_userptr_test },
62         { "Command submission Test (GFX)",  amdgpu_command_submission_gfx },
63         { "Command submission Test (Compute)", amdgpu_command_submission_compute },
64         { "Command submission Test (Multi-Fence)", amdgpu_command_submission_multi_fence },
65         { "Command submission Test (SDMA)", amdgpu_command_submission_sdma },
66         { "SW semaphore Test",  amdgpu_semaphore_test },
67         { "Sync dependency Test",  amdgpu_sync_dependency_test },
68         CU_TEST_INFO_NULL,
69 };
70 #define BUFFER_SIZE (8 * 1024)
71 #define SDMA_PKT_HEADER_op_offset 0
72 #define SDMA_PKT_HEADER_op_mask   0x000000FF
73 #define SDMA_PKT_HEADER_op_shift  0
74 #define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift)
75 #define SDMA_OPCODE_CONSTANT_FILL  11
76 #       define SDMA_CONSTANT_FILL_EXTRA_SIZE(x)           ((x) << 14)
77         /* 0 = byte fill
78          * 2 = DW fill
79          */
80 #define SDMA_PACKET(op, sub_op, e)      ((((e) & 0xFFFF) << 16) |       \
81                                         (((sub_op) & 0xFF) << 8) |      \
82                                         (((op) & 0xFF) << 0))
83 #define SDMA_OPCODE_WRITE                                 2
84 #       define SDMA_WRITE_SUB_OPCODE_LINEAR               0
85 #       define SDMA_WRTIE_SUB_OPCODE_TILED                1
86
87 #define SDMA_OPCODE_COPY                                  1
88 #       define SDMA_COPY_SUB_OPCODE_LINEAR                0
89
90 #define GFX_COMPUTE_NOP  0xffff1000
91 #define SDMA_NOP  0x0
92
93 /* PM4 */
94 #define PACKET_TYPE0    0
95 #define PACKET_TYPE1    1
96 #define PACKET_TYPE2    2
97 #define PACKET_TYPE3    3
98
99 #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
100 #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
101 #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF)
102 #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
103 #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) |                         \
104                          ((reg) & 0xFFFF) |                     \
105                          ((n) & 0x3FFF) << 16)
106 #define CP_PACKET2                      0x80000000
107 #define         PACKET2_PAD_SHIFT               0
108 #define         PACKET2_PAD_MASK                (0x3fffffff << 0)
109
110 #define PACKET2(v)      (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
111
112 #define PACKET3(op, n)  ((PACKET_TYPE3 << 30) |                         \
113                          (((op) & 0xFF) << 8) |                         \
114                          ((n) & 0x3FFF) << 16)
115
116 /* Packet 3 types */
117 #define PACKET3_NOP                                     0x10
118
119 #define PACKET3_WRITE_DATA                              0x37
120 #define         WRITE_DATA_DST_SEL(x)                   ((x) << 8)
121                 /* 0 - register
122                  * 1 - memory (sync - via GRBM)
123                  * 2 - gl2
124                  * 3 - gds
125                  * 4 - reserved
126                  * 5 - memory (async - direct)
127                  */
128 #define         WR_ONE_ADDR                             (1 << 16)
129 #define         WR_CONFIRM                              (1 << 20)
130 #define         WRITE_DATA_CACHE_POLICY(x)              ((x) << 25)
131                 /* 0 - LRU
132                  * 1 - Stream
133                  */
134 #define         WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
135                 /* 0 - me
136                  * 1 - pfp
137                  * 2 - ce
138                  */
139
140 #define PACKET3_DMA_DATA                                0x50
141 /* 1. header
142  * 2. CONTROL
143  * 3. SRC_ADDR_LO or DATA [31:0]
144  * 4. SRC_ADDR_HI [31:0]
145  * 5. DST_ADDR_LO [31:0]
146  * 6. DST_ADDR_HI [7:0]
147  * 7. COMMAND [30:21] | BYTE_COUNT [20:0]
148  */
149 /* CONTROL */
150 #              define PACKET3_DMA_DATA_ENGINE(x)     ((x) << 0)
151                 /* 0 - ME
152                  * 1 - PFP
153                  */
154 #              define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13)
155                 /* 0 - LRU
156                  * 1 - Stream
157                  * 2 - Bypass
158                  */
159 #              define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15)
160 #              define PACKET3_DMA_DATA_DST_SEL(x)  ((x) << 20)
161                 /* 0 - DST_ADDR using DAS
162                  * 1 - GDS
163                  * 3 - DST_ADDR using L2
164                  */
165 #              define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25)
166                 /* 0 - LRU
167                  * 1 - Stream
168                  * 2 - Bypass
169                  */
170 #              define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27)
171 #              define PACKET3_DMA_DATA_SRC_SEL(x)  ((x) << 29)
172                 /* 0 - SRC_ADDR using SAS
173                  * 1 - GDS
174                  * 2 - DATA
175                  * 3 - SRC_ADDR using L2
176                  */
177 #              define PACKET3_DMA_DATA_CP_SYNC     (1 << 31)
178 /* COMMAND */
179 #              define PACKET3_DMA_DATA_DIS_WC      (1 << 21)
180 #              define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22)
181                 /* 0 - none
182                  * 1 - 8 in 16
183                  * 2 - 8 in 32
184                  * 3 - 8 in 64
185                  */
186 #              define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24)
187                 /* 0 - none
188                  * 1 - 8 in 16
189                  * 2 - 8 in 32
190                  * 3 - 8 in 64
191                  */
192 #              define PACKET3_DMA_DATA_CMD_SAS     (1 << 26)
193                 /* 0 - memory
194                  * 1 - register
195                  */
196 #              define PACKET3_DMA_DATA_CMD_DAS     (1 << 27)
197                 /* 0 - memory
198                  * 1 - register
199                  */
200 #              define PACKET3_DMA_DATA_CMD_SAIC    (1 << 28)
201 #              define PACKET3_DMA_DATA_CMD_DAIC    (1 << 29)
202 #              define PACKET3_DMA_DATA_CMD_RAW_WAIT  (1 << 30)
203
204 #define SDMA_PACKET_SI(op, b, t, s, cnt)        ((((op) & 0xF) << 28) | \
205                                                 (((b) & 0x1) << 26) |           \
206                                                 (((t) & 0x1) << 23) |           \
207                                                 (((s) & 0x1) << 22) |           \
208                                                 (((cnt) & 0xFFFFF) << 0))
209 #define SDMA_OPCODE_COPY_SI     3
210 #define SDMA_OPCODE_CONSTANT_FILL_SI    13
211 #define SDMA_NOP_SI  0xf
212 #define GFX_COMPUTE_NOP_SI 0x80000000
213 #define PACKET3_DMA_DATA_SI     0x41
214 #              define PACKET3_DMA_DATA_SI_ENGINE(x)     ((x) << 27)
215                 /* 0 - ME
216                  * 1 - PFP
217                  */
218 #              define PACKET3_DMA_DATA_SI_DST_SEL(x)  ((x) << 20)
219                 /* 0 - DST_ADDR using DAS
220                  * 1 - GDS
221                  * 3 - DST_ADDR using L2
222                  */
223 #              define PACKET3_DMA_DATA_SI_SRC_SEL(x)  ((x) << 29)
224                 /* 0 - SRC_ADDR using SAS
225                  * 1 - GDS
226                  * 2 - DATA
227                  * 3 - SRC_ADDR using L2
228                  */
229 #              define PACKET3_DMA_DATA_SI_CP_SYNC     (1 << 31)
230
231
232 #define PKT3_CONTEXT_CONTROL                   0x28
233 #define     CONTEXT_CONTROL_LOAD_ENABLE(x)     (((unsigned)(x) & 0x1) << 31)
234 #define     CONTEXT_CONTROL_LOAD_CE_RAM(x)     (((unsigned)(x) & 0x1) << 28)
235 #define     CONTEXT_CONTROL_SHADOW_ENABLE(x)   (((unsigned)(x) & 0x1) << 31)
236
237 #define PKT3_CLEAR_STATE                       0x12
238
239 #define PKT3_SET_SH_REG                        0x76
240 #define         PACKET3_SET_SH_REG_START                        0x00002c00
241
242 #define PACKET3_DISPATCH_DIRECT                         0x15
243
244
245 /* gfx 8 */
246 #define mmCOMPUTE_PGM_LO                                                        0x2e0c
247 #define mmCOMPUTE_PGM_RSRC1                                                     0x2e12
248 #define mmCOMPUTE_TMPRING_SIZE                                                  0x2e18
249 #define mmCOMPUTE_USER_DATA_0                                                   0x2e40
250 #define mmCOMPUTE_USER_DATA_1                                                   0x2e41
251 #define mmCOMPUTE_RESOURCE_LIMITS                                               0x2e15
252 #define mmCOMPUTE_NUM_THREAD_X                                                  0x2e07
253
254
255
256 #define SWAP_32(num) (((num & 0xff000000) >> 24) | \
257                       ((num & 0x0000ff00) << 8) | \
258                       ((num & 0x00ff0000) >> 8) | \
259                       ((num & 0x000000ff) << 24))
260
261
262 /* Shader code
263  * void main()
264 {
265
266         float x = some_input;
267                 for (unsigned i = 0; i < 1000000; i++)
268         x = sin(x);
269
270         u[0] = 42u;
271 }
272 */
273
274 static  uint32_t shader_bin[] = {
275         SWAP_32(0x800082be), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0x040085bf),
276         SWAP_32(0x02810281), SWAP_32(0x02ff08bf), SWAP_32(0x7f969800), SWAP_32(0xfcff84bf),
277         SWAP_32(0xff0083be), SWAP_32(0x00f00000), SWAP_32(0xc10082be), SWAP_32(0xaa02007e),
278         SWAP_32(0x000070e0), SWAP_32(0x00000080), SWAP_32(0x000081bf)
279 };
280
281 #define CODE_OFFSET 512
282 #define DATA_OFFSET 1024
283
284
285 int suite_basic_tests_init(void)
286 {
287         struct amdgpu_gpu_info gpu_info = {0};
288         int r;
289
290         r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
291                                    &minor_version, &device_handle);
292
293         if (r) {
294                 if ((r == -EACCES) && (errno == EACCES))
295                         printf("\n\nError:%s. "
296                                 "Hint:Try to run this test program as root.",
297                                 strerror(errno));
298                 return CUE_SINIT_FAILED;
299         }
300
301         r = amdgpu_query_gpu_info(device_handle, &gpu_info);
302         if (r)
303                 return CUE_SINIT_FAILED;
304
305         family_id = gpu_info.family_id;
306
307         return CUE_SUCCESS;
308 }
309
310 int suite_basic_tests_clean(void)
311 {
312         int r = amdgpu_device_deinitialize(device_handle);
313
314         if (r == 0)
315                 return CUE_SUCCESS;
316         else
317                 return CUE_SCLEAN_FAILED;
318 }
319
320 static void amdgpu_query_info_test(void)
321 {
322         struct amdgpu_gpu_info gpu_info = {0};
323         uint32_t version, feature;
324         int r;
325
326         r = amdgpu_query_gpu_info(device_handle, &gpu_info);
327         CU_ASSERT_EQUAL(r, 0);
328
329         r = amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
330                                           0, &version, &feature);
331         CU_ASSERT_EQUAL(r, 0);
332 }
333
334 static void amdgpu_command_submission_gfx_separate_ibs(void)
335 {
336         amdgpu_context_handle context_handle;
337         amdgpu_bo_handle ib_result_handle, ib_result_ce_handle;
338         void *ib_result_cpu, *ib_result_ce_cpu;
339         uint64_t ib_result_mc_address, ib_result_ce_mc_address;
340         struct amdgpu_cs_request ibs_request = {0};
341         struct amdgpu_cs_ib_info ib_info[2];
342         struct amdgpu_cs_fence fence_status = {0};
343         uint32_t *ptr;
344         uint32_t expired;
345         amdgpu_bo_list_handle bo_list;
346         amdgpu_va_handle va_handle, va_handle_ce;
347         int r, i = 0;
348
349         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
350         CU_ASSERT_EQUAL(r, 0);
351
352         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
353                                     AMDGPU_GEM_DOMAIN_GTT, 0,
354                                     &ib_result_handle, &ib_result_cpu,
355                                     &ib_result_mc_address, &va_handle);
356         CU_ASSERT_EQUAL(r, 0);
357
358         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
359                                     AMDGPU_GEM_DOMAIN_GTT, 0,
360                                     &ib_result_ce_handle, &ib_result_ce_cpu,
361                                     &ib_result_ce_mc_address, &va_handle_ce);
362         CU_ASSERT_EQUAL(r, 0);
363
364         r = amdgpu_get_bo_list(device_handle, ib_result_handle,
365                                ib_result_ce_handle, &bo_list);
366         CU_ASSERT_EQUAL(r, 0);
367
368         memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
369
370         /* IT_SET_CE_DE_COUNTERS */
371         ptr = ib_result_ce_cpu;
372         if (family_id != AMDGPU_FAMILY_SI) {
373                 ptr[i++] = 0xc0008900;
374                 ptr[i++] = 0;
375         }
376         ptr[i++] = 0xc0008400;
377         ptr[i++] = 1;
378         ib_info[0].ib_mc_address = ib_result_ce_mc_address;
379         ib_info[0].size = i;
380         ib_info[0].flags = AMDGPU_IB_FLAG_CE;
381
382         /* IT_WAIT_ON_CE_COUNTER */
383         ptr = ib_result_cpu;
384         ptr[0] = 0xc0008600;
385         ptr[1] = 0x00000001;
386         ib_info[1].ib_mc_address = ib_result_mc_address;
387         ib_info[1].size = 2;
388
389         ibs_request.ip_type = AMDGPU_HW_IP_GFX;
390         ibs_request.number_of_ibs = 2;
391         ibs_request.ibs = ib_info;
392         ibs_request.resources = bo_list;
393         ibs_request.fence_info.handle = NULL;
394
395         r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
396
397         CU_ASSERT_EQUAL(r, 0);
398
399         fence_status.context = context_handle;
400         fence_status.ip_type = AMDGPU_HW_IP_GFX;
401         fence_status.ip_instance = 0;
402         fence_status.fence = ibs_request.seq_no;
403
404         r = amdgpu_cs_query_fence_status(&fence_status,
405                                          AMDGPU_TIMEOUT_INFINITE,
406                                          0, &expired);
407         CU_ASSERT_EQUAL(r, 0);
408
409         r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
410                                      ib_result_mc_address, 4096);
411         CU_ASSERT_EQUAL(r, 0);
412
413         r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
414                                      ib_result_ce_mc_address, 4096);
415         CU_ASSERT_EQUAL(r, 0);
416
417         r = amdgpu_bo_list_destroy(bo_list);
418         CU_ASSERT_EQUAL(r, 0);
419
420         r = amdgpu_cs_ctx_free(context_handle);
421         CU_ASSERT_EQUAL(r, 0);
422
423 }
424
425 static void amdgpu_command_submission_gfx_shared_ib(void)
426 {
427         amdgpu_context_handle context_handle;
428         amdgpu_bo_handle ib_result_handle;
429         void *ib_result_cpu;
430         uint64_t ib_result_mc_address;
431         struct amdgpu_cs_request ibs_request = {0};
432         struct amdgpu_cs_ib_info ib_info[2];
433         struct amdgpu_cs_fence fence_status = {0};
434         uint32_t *ptr;
435         uint32_t expired;
436         amdgpu_bo_list_handle bo_list;
437         amdgpu_va_handle va_handle;
438         int r, i = 0;
439
440         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
441         CU_ASSERT_EQUAL(r, 0);
442
443         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
444                                     AMDGPU_GEM_DOMAIN_GTT, 0,
445                                     &ib_result_handle, &ib_result_cpu,
446                                     &ib_result_mc_address, &va_handle);
447         CU_ASSERT_EQUAL(r, 0);
448
449         r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
450                                &bo_list);
451         CU_ASSERT_EQUAL(r, 0);
452
453         memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
454
455         /* IT_SET_CE_DE_COUNTERS */
456         ptr = ib_result_cpu;
457         if (family_id != AMDGPU_FAMILY_SI) {
458                 ptr[i++] = 0xc0008900;
459                 ptr[i++] = 0;
460         }
461         ptr[i++] = 0xc0008400;
462         ptr[i++] = 1;
463         ib_info[0].ib_mc_address = ib_result_mc_address;
464         ib_info[0].size = i;
465         ib_info[0].flags = AMDGPU_IB_FLAG_CE;
466
467         ptr = (uint32_t *)ib_result_cpu + 4;
468         ptr[0] = 0xc0008600;
469         ptr[1] = 0x00000001;
470         ib_info[1].ib_mc_address = ib_result_mc_address + 16;
471         ib_info[1].size = 2;
472
473         ibs_request.ip_type = AMDGPU_HW_IP_GFX;
474         ibs_request.number_of_ibs = 2;
475         ibs_request.ibs = ib_info;
476         ibs_request.resources = bo_list;
477         ibs_request.fence_info.handle = NULL;
478
479         r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
480
481         CU_ASSERT_EQUAL(r, 0);
482
483         fence_status.context = context_handle;
484         fence_status.ip_type = AMDGPU_HW_IP_GFX;
485         fence_status.ip_instance = 0;
486         fence_status.fence = ibs_request.seq_no;
487
488         r = amdgpu_cs_query_fence_status(&fence_status,
489                                          AMDGPU_TIMEOUT_INFINITE,
490                                          0, &expired);
491         CU_ASSERT_EQUAL(r, 0);
492
493         r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
494                                      ib_result_mc_address, 4096);
495         CU_ASSERT_EQUAL(r, 0);
496
497         r = amdgpu_bo_list_destroy(bo_list);
498         CU_ASSERT_EQUAL(r, 0);
499
500         r = amdgpu_cs_ctx_free(context_handle);
501         CU_ASSERT_EQUAL(r, 0);
502 }
503
504 static void amdgpu_command_submission_gfx_cp_write_data(void)
505 {
506         amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_GFX);
507 }
508
509 static void amdgpu_command_submission_gfx_cp_const_fill(void)
510 {
511         amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_GFX);
512 }
513
514 static void amdgpu_command_submission_gfx_cp_copy_data(void)
515 {
516         amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_GFX);
517 }
518
519 static void amdgpu_command_submission_gfx(void)
520 {
521         /* write data using the CP */
522         amdgpu_command_submission_gfx_cp_write_data();
523         /* const fill using the CP */
524         amdgpu_command_submission_gfx_cp_const_fill();
525         /* copy data using the CP */
526         amdgpu_command_submission_gfx_cp_copy_data();
527         /* separate IB buffers for multi-IB submission */
528         amdgpu_command_submission_gfx_separate_ibs();
529         /* shared IB buffer for multi-IB submission */
530         amdgpu_command_submission_gfx_shared_ib();
531 }
532
533 static void amdgpu_semaphore_test(void)
534 {
535         amdgpu_context_handle context_handle[2];
536         amdgpu_semaphore_handle sem;
537         amdgpu_bo_handle ib_result_handle[2];
538         void *ib_result_cpu[2];
539         uint64_t ib_result_mc_address[2];
540         struct amdgpu_cs_request ibs_request[2] = {0};
541         struct amdgpu_cs_ib_info ib_info[2] = {0};
542         struct amdgpu_cs_fence fence_status = {0};
543         uint32_t *ptr;
544         uint32_t expired;
545         uint32_t sdma_nop, gfx_nop;
546         amdgpu_bo_list_handle bo_list[2];
547         amdgpu_va_handle va_handle[2];
548         int r, i;
549
550         if (family_id == AMDGPU_FAMILY_SI) {
551                 sdma_nop = SDMA_PACKET_SI(SDMA_NOP_SI, 0, 0, 0, 0);
552                 gfx_nop = GFX_COMPUTE_NOP_SI;
553         } else {
554                 sdma_nop = SDMA_PKT_HEADER_OP(SDMA_NOP);
555                 gfx_nop = GFX_COMPUTE_NOP;
556         }
557
558         r = amdgpu_cs_create_semaphore(&sem);
559         CU_ASSERT_EQUAL(r, 0);
560         for (i = 0; i < 2; i++) {
561                 r = amdgpu_cs_ctx_create(device_handle, &context_handle[i]);
562                 CU_ASSERT_EQUAL(r, 0);
563
564                 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
565                                             AMDGPU_GEM_DOMAIN_GTT, 0,
566                                             &ib_result_handle[i], &ib_result_cpu[i],
567                                             &ib_result_mc_address[i], &va_handle[i]);
568                 CU_ASSERT_EQUAL(r, 0);
569
570                 r = amdgpu_get_bo_list(device_handle, ib_result_handle[i],
571                                        NULL, &bo_list[i]);
572                 CU_ASSERT_EQUAL(r, 0);
573         }
574
575         /* 1. same context different engine */
576         ptr = ib_result_cpu[0];
577         ptr[0] = sdma_nop;
578         ib_info[0].ib_mc_address = ib_result_mc_address[0];
579         ib_info[0].size = 1;
580
581         ibs_request[0].ip_type = AMDGPU_HW_IP_DMA;
582         ibs_request[0].number_of_ibs = 1;
583         ibs_request[0].ibs = &ib_info[0];
584         ibs_request[0].resources = bo_list[0];
585         ibs_request[0].fence_info.handle = NULL;
586         r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
587         CU_ASSERT_EQUAL(r, 0);
588         r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_DMA, 0, 0, sem);
589         CU_ASSERT_EQUAL(r, 0);
590
591         r = amdgpu_cs_wait_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
592         CU_ASSERT_EQUAL(r, 0);
593         ptr = ib_result_cpu[1];
594         ptr[0] = gfx_nop;
595         ib_info[1].ib_mc_address = ib_result_mc_address[1];
596         ib_info[1].size = 1;
597
598         ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
599         ibs_request[1].number_of_ibs = 1;
600         ibs_request[1].ibs = &ib_info[1];
601         ibs_request[1].resources = bo_list[1];
602         ibs_request[1].fence_info.handle = NULL;
603
604         r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[1], 1);
605         CU_ASSERT_EQUAL(r, 0);
606
607         fence_status.context = context_handle[0];
608         fence_status.ip_type = AMDGPU_HW_IP_GFX;
609         fence_status.ip_instance = 0;
610         fence_status.fence = ibs_request[1].seq_no;
611         r = amdgpu_cs_query_fence_status(&fence_status,
612                                          500000000, 0, &expired);
613         CU_ASSERT_EQUAL(r, 0);
614         CU_ASSERT_EQUAL(expired, true);
615
616         /* 2. same engine different context */
617         ptr = ib_result_cpu[0];
618         ptr[0] = gfx_nop;
619         ib_info[0].ib_mc_address = ib_result_mc_address[0];
620         ib_info[0].size = 1;
621
622         ibs_request[0].ip_type = AMDGPU_HW_IP_GFX;
623         ibs_request[0].number_of_ibs = 1;
624         ibs_request[0].ibs = &ib_info[0];
625         ibs_request[0].resources = bo_list[0];
626         ibs_request[0].fence_info.handle = NULL;
627         r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request[0], 1);
628         CU_ASSERT_EQUAL(r, 0);
629         r = amdgpu_cs_signal_semaphore(context_handle[0], AMDGPU_HW_IP_GFX, 0, 0, sem);
630         CU_ASSERT_EQUAL(r, 0);
631
632         r = amdgpu_cs_wait_semaphore(context_handle[1], AMDGPU_HW_IP_GFX, 0, 0, sem);
633         CU_ASSERT_EQUAL(r, 0);
634         ptr = ib_result_cpu[1];
635         ptr[0] = gfx_nop;
636         ib_info[1].ib_mc_address = ib_result_mc_address[1];
637         ib_info[1].size = 1;
638
639         ibs_request[1].ip_type = AMDGPU_HW_IP_GFX;
640         ibs_request[1].number_of_ibs = 1;
641         ibs_request[1].ibs = &ib_info[1];
642         ibs_request[1].resources = bo_list[1];
643         ibs_request[1].fence_info.handle = NULL;
644         r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request[1], 1);
645
646         CU_ASSERT_EQUAL(r, 0);
647
648         fence_status.context = context_handle[1];
649         fence_status.ip_type = AMDGPU_HW_IP_GFX;
650         fence_status.ip_instance = 0;
651         fence_status.fence = ibs_request[1].seq_no;
652         r = amdgpu_cs_query_fence_status(&fence_status,
653                                          500000000, 0, &expired);
654         CU_ASSERT_EQUAL(r, 0);
655         CU_ASSERT_EQUAL(expired, true);
656
657         for (i = 0; i < 2; i++) {
658                 r = amdgpu_bo_unmap_and_free(ib_result_handle[i], va_handle[i],
659                                              ib_result_mc_address[i], 4096);
660                 CU_ASSERT_EQUAL(r, 0);
661
662                 r = amdgpu_bo_list_destroy(bo_list[i]);
663                 CU_ASSERT_EQUAL(r, 0);
664
665                 r = amdgpu_cs_ctx_free(context_handle[i]);
666                 CU_ASSERT_EQUAL(r, 0);
667         }
668
669         r = amdgpu_cs_destroy_semaphore(sem);
670         CU_ASSERT_EQUAL(r, 0);
671 }
672
673 static void amdgpu_command_submission_compute_nop(void)
674 {
675         amdgpu_context_handle context_handle;
676         amdgpu_bo_handle ib_result_handle;
677         void *ib_result_cpu;
678         uint64_t ib_result_mc_address;
679         struct amdgpu_cs_request ibs_request;
680         struct amdgpu_cs_ib_info ib_info;
681         struct amdgpu_cs_fence fence_status;
682         uint32_t *ptr;
683         uint32_t expired;
684         int i, r, instance;
685         amdgpu_bo_list_handle bo_list;
686         amdgpu_va_handle va_handle;
687         struct drm_amdgpu_info_hw_ip info;
688
689         r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_COMPUTE, 0, &info);
690         CU_ASSERT_EQUAL(r, 0);
691
692         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
693         CU_ASSERT_EQUAL(r, 0);
694
695         for (instance = 0; (1 << instance) & info.available_rings; instance++) {
696                 r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
697                                             AMDGPU_GEM_DOMAIN_GTT, 0,
698                                             &ib_result_handle, &ib_result_cpu,
699                                             &ib_result_mc_address, &va_handle);
700                 CU_ASSERT_EQUAL(r, 0);
701
702                 r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
703                                        &bo_list);
704                 CU_ASSERT_EQUAL(r, 0);
705
706                 ptr = ib_result_cpu;
707                 memset(ptr, 0, 16);
708                 ptr[0]=PACKET3(PACKET3_NOP, 14);
709
710                 memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
711                 ib_info.ib_mc_address = ib_result_mc_address;
712                 ib_info.size = 16;
713
714                 memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
715                 ibs_request.ip_type = AMDGPU_HW_IP_COMPUTE;
716                 ibs_request.ring = instance;
717                 ibs_request.number_of_ibs = 1;
718                 ibs_request.ibs = &ib_info;
719                 ibs_request.resources = bo_list;
720                 ibs_request.fence_info.handle = NULL;
721
722                 memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
723                 r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
724                 CU_ASSERT_EQUAL(r, 0);
725
726                 fence_status.context = context_handle;
727                 fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
728                 fence_status.ip_instance = 0;
729                 fence_status.ring = instance;
730                 fence_status.fence = ibs_request.seq_no;
731
732                 r = amdgpu_cs_query_fence_status(&fence_status,
733                                                  AMDGPU_TIMEOUT_INFINITE,
734                                                  0, &expired);
735                 CU_ASSERT_EQUAL(r, 0);
736
737                 r = amdgpu_bo_list_destroy(bo_list);
738                 CU_ASSERT_EQUAL(r, 0);
739
740                 r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
741                                              ib_result_mc_address, 4096);
742                 CU_ASSERT_EQUAL(r, 0);
743         }
744
745         r = amdgpu_cs_ctx_free(context_handle);
746         CU_ASSERT_EQUAL(r, 0);
747 }
748
749 static void amdgpu_command_submission_compute_cp_write_data(void)
750 {
751         amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_COMPUTE);
752 }
753
754 static void amdgpu_command_submission_compute_cp_const_fill(void)
755 {
756         amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_COMPUTE);
757 }
758
759 static void amdgpu_command_submission_compute_cp_copy_data(void)
760 {
761         amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_COMPUTE);
762 }
763
764 static void amdgpu_command_submission_compute(void)
765 {
766         /* write data using the CP */
767         amdgpu_command_submission_compute_cp_write_data();
768         /* const fill using the CP */
769         amdgpu_command_submission_compute_cp_const_fill();
770         /* copy data using the CP */
771         amdgpu_command_submission_compute_cp_copy_data();
772         /* nop test */
773         amdgpu_command_submission_compute_nop();
774 }
775
776 /*
777  * caller need create/release:
778  * pm4_src, resources, ib_info, and ibs_request
779  * submit command stream described in ibs_request and wait for this IB accomplished
780  */
781 static void amdgpu_test_exec_cs_helper(amdgpu_context_handle context_handle,
782                                        unsigned ip_type,
783                                        int instance, int pm4_dw, uint32_t *pm4_src,
784                                        int res_cnt, amdgpu_bo_handle *resources,
785                                        struct amdgpu_cs_ib_info *ib_info,
786                                        struct amdgpu_cs_request *ibs_request)
787 {
788         int r;
789         uint32_t expired;
790         uint32_t *ring_ptr;
791         amdgpu_bo_handle ib_result_handle;
792         void *ib_result_cpu;
793         uint64_t ib_result_mc_address;
794         struct amdgpu_cs_fence fence_status = {0};
795         amdgpu_bo_handle *all_res = alloca(sizeof(resources[0]) * (res_cnt + 1));
796         amdgpu_va_handle va_handle;
797
798         /* prepare CS */
799         CU_ASSERT_NOT_EQUAL(pm4_src, NULL);
800         CU_ASSERT_NOT_EQUAL(resources, NULL);
801         CU_ASSERT_NOT_EQUAL(ib_info, NULL);
802         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
803         CU_ASSERT_TRUE(pm4_dw <= 1024);
804
805         /* allocate IB */
806         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
807                                     AMDGPU_GEM_DOMAIN_GTT, 0,
808                                     &ib_result_handle, &ib_result_cpu,
809                                     &ib_result_mc_address, &va_handle);
810         CU_ASSERT_EQUAL(r, 0);
811
812         /* copy PM4 packet to ring from caller */
813         ring_ptr = ib_result_cpu;
814         memcpy(ring_ptr, pm4_src, pm4_dw * sizeof(*pm4_src));
815
816         ib_info->ib_mc_address = ib_result_mc_address;
817         ib_info->size = pm4_dw;
818
819         ibs_request->ip_type = ip_type;
820         ibs_request->ring = instance;
821         ibs_request->number_of_ibs = 1;
822         ibs_request->ibs = ib_info;
823         ibs_request->fence_info.handle = NULL;
824
825         memcpy(all_res, resources, sizeof(resources[0]) * res_cnt);
826         all_res[res_cnt] = ib_result_handle;
827
828         r = amdgpu_bo_list_create(device_handle, res_cnt+1, all_res,
829                                   NULL, &ibs_request->resources);
830         CU_ASSERT_EQUAL(r, 0);
831
832         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
833
834         /* submit CS */
835         r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1);
836         CU_ASSERT_EQUAL(r, 0);
837
838         r = amdgpu_bo_list_destroy(ibs_request->resources);
839         CU_ASSERT_EQUAL(r, 0);
840
841         fence_status.ip_type = ip_type;
842         fence_status.ip_instance = 0;
843         fence_status.ring = ibs_request->ring;
844         fence_status.context = context_handle;
845         fence_status.fence = ibs_request->seq_no;
846
847         /* wait for IB accomplished */
848         r = amdgpu_cs_query_fence_status(&fence_status,
849                                          AMDGPU_TIMEOUT_INFINITE,
850                                          0, &expired);
851         CU_ASSERT_EQUAL(r, 0);
852         CU_ASSERT_EQUAL(expired, true);
853
854         r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
855                                      ib_result_mc_address, 4096);
856         CU_ASSERT_EQUAL(r, 0);
857 }
858
859 static void amdgpu_command_submission_write_linear_helper(unsigned ip_type)
860 {
861         const int sdma_write_length = 128;
862         const int pm4_dw = 256;
863         amdgpu_context_handle context_handle;
864         amdgpu_bo_handle bo;
865         amdgpu_bo_handle *resources;
866         uint32_t *pm4;
867         struct amdgpu_cs_ib_info *ib_info;
868         struct amdgpu_cs_request *ibs_request;
869         uint64_t bo_mc;
870         volatile uint32_t *bo_cpu;
871         int i, j, r, loop, ring_id;
872         uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
873         amdgpu_va_handle va_handle;
874         struct drm_amdgpu_info_hw_ip hw_ip_info;
875
876         pm4 = calloc(pm4_dw, sizeof(*pm4));
877         CU_ASSERT_NOT_EQUAL(pm4, NULL);
878
879         ib_info = calloc(1, sizeof(*ib_info));
880         CU_ASSERT_NOT_EQUAL(ib_info, NULL);
881
882         ibs_request = calloc(1, sizeof(*ibs_request));
883         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
884
885         r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
886         CU_ASSERT_EQUAL(r, 0);
887
888         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
889         CU_ASSERT_EQUAL(r, 0);
890
891         /* prepare resource */
892         resources = calloc(1, sizeof(amdgpu_bo_handle));
893         CU_ASSERT_NOT_EQUAL(resources, NULL);
894
895         for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
896                 loop = 0;
897                 while(loop < 2) {
898                         /* allocate UC bo for sDMA use */
899                         r = amdgpu_bo_alloc_and_map(device_handle,
900                                                     sdma_write_length * sizeof(uint32_t),
901                                                     4096, AMDGPU_GEM_DOMAIN_GTT,
902                                                     gtt_flags[loop], &bo, (void**)&bo_cpu,
903                                                     &bo_mc, &va_handle);
904                         CU_ASSERT_EQUAL(r, 0);
905
906                         /* clear bo */
907                         memset((void*)bo_cpu, 0, sdma_write_length * sizeof(uint32_t));
908
909                         resources[0] = bo;
910
911                         /* fulfill PM4: test DMA write-linear */
912                         i = j = 0;
913                         if (ip_type == AMDGPU_HW_IP_DMA) {
914                                 if (family_id == AMDGPU_FAMILY_SI)
915                                         pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
916                                                                   sdma_write_length);
917                                 else
918                                         pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
919                                                                SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
920                                 pm4[i++] = 0xffffffff & bo_mc;
921                                 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
922                                 if (family_id >= AMDGPU_FAMILY_AI)
923                                         pm4[i++] = sdma_write_length - 1;
924                                 else if (family_id != AMDGPU_FAMILY_SI)
925                                         pm4[i++] = sdma_write_length;
926                                 while(j++ < sdma_write_length)
927                                         pm4[i++] = 0xdeadbeaf;
928                         } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
929                                     (ip_type == AMDGPU_HW_IP_COMPUTE)) {
930                                 pm4[i++] = PACKET3(PACKET3_WRITE_DATA, 2 + sdma_write_length);
931                                 pm4[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
932                                 pm4[i++] = 0xfffffffc & bo_mc;
933                                 pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
934                                 while(j++ < sdma_write_length)
935                                         pm4[i++] = 0xdeadbeaf;
936                         }
937
938                         amdgpu_test_exec_cs_helper(context_handle,
939                                                    ip_type, ring_id,
940                                                    i, pm4,
941                                                    1, resources,
942                                                    ib_info, ibs_request);
943
944                         /* verify if SDMA test result meets with expected */
945                         i = 0;
946                         while(i < sdma_write_length) {
947                                 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
948                         }
949
950                         r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
951                                                      sdma_write_length * sizeof(uint32_t));
952                         CU_ASSERT_EQUAL(r, 0);
953                         loop++;
954                 }
955         }
956         /* clean resources */
957         free(resources);
958         free(ibs_request);
959         free(ib_info);
960         free(pm4);
961
962         /* end of test */
963         r = amdgpu_cs_ctx_free(context_handle);
964         CU_ASSERT_EQUAL(r, 0);
965 }
966
967 static void amdgpu_command_submission_sdma_write_linear(void)
968 {
969         amdgpu_command_submission_write_linear_helper(AMDGPU_HW_IP_DMA);
970 }
971
972 static void amdgpu_command_submission_const_fill_helper(unsigned ip_type)
973 {
974         const int sdma_write_length = 1024 * 1024;
975         const int pm4_dw = 256;
976         amdgpu_context_handle context_handle;
977         amdgpu_bo_handle bo;
978         amdgpu_bo_handle *resources;
979         uint32_t *pm4;
980         struct amdgpu_cs_ib_info *ib_info;
981         struct amdgpu_cs_request *ibs_request;
982         uint64_t bo_mc;
983         volatile uint32_t *bo_cpu;
984         int i, j, r, loop, ring_id;
985         uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
986         amdgpu_va_handle va_handle;
987         struct drm_amdgpu_info_hw_ip hw_ip_info;
988
989         pm4 = calloc(pm4_dw, sizeof(*pm4));
990         CU_ASSERT_NOT_EQUAL(pm4, NULL);
991
992         ib_info = calloc(1, sizeof(*ib_info));
993         CU_ASSERT_NOT_EQUAL(ib_info, NULL);
994
995         ibs_request = calloc(1, sizeof(*ibs_request));
996         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
997
998         r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &hw_ip_info);
999         CU_ASSERT_EQUAL(r, 0);
1000
1001         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1002         CU_ASSERT_EQUAL(r, 0);
1003
1004         /* prepare resource */
1005         resources = calloc(1, sizeof(amdgpu_bo_handle));
1006         CU_ASSERT_NOT_EQUAL(resources, NULL);
1007
1008         for (ring_id = 0; (1 << ring_id) & hw_ip_info.available_rings; ring_id++) {
1009                 loop = 0;
1010                 while(loop < 2) {
1011                         /* allocate UC bo for sDMA use */
1012                         r = amdgpu_bo_alloc_and_map(device_handle,
1013                                                     sdma_write_length, 4096,
1014                                                     AMDGPU_GEM_DOMAIN_GTT,
1015                                                     gtt_flags[loop], &bo, (void**)&bo_cpu,
1016                                                     &bo_mc, &va_handle);
1017                         CU_ASSERT_EQUAL(r, 0);
1018
1019                         /* clear bo */
1020                         memset((void*)bo_cpu, 0, sdma_write_length);
1021
1022                         resources[0] = bo;
1023
1024                         /* fulfill PM4: test DMA const fill */
1025                         i = j = 0;
1026                         if (ip_type == AMDGPU_HW_IP_DMA) {
1027                                 if (family_id == AMDGPU_FAMILY_SI) {
1028                                         pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_CONSTANT_FILL_SI,
1029                                                                   0, 0, 0,
1030                                                                   sdma_write_length / 4);
1031                                         pm4[i++] = 0xfffffffc & bo_mc;
1032                                         pm4[i++] = 0xdeadbeaf;
1033                                         pm4[i++] = (0xffffffff00000000 & bo_mc) >> 16;
1034                                 } else {
1035                                         pm4[i++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0,
1036                                                                SDMA_CONSTANT_FILL_EXTRA_SIZE(2));
1037                                         pm4[i++] = 0xffffffff & bo_mc;
1038                                         pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1039                                         pm4[i++] = 0xdeadbeaf;
1040                                         if (family_id >= AMDGPU_FAMILY_AI)
1041                                                 pm4[i++] = sdma_write_length - 1;
1042                                         else
1043                                                 pm4[i++] = sdma_write_length;
1044                                 }
1045                         } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1046                                    (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1047                                 if (family_id == AMDGPU_FAMILY_SI) {
1048                                         pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1049                                         pm4[i++] = 0xdeadbeaf;
1050                                         pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1051                                                    PACKET3_DMA_DATA_SI_DST_SEL(0) |
1052                                                    PACKET3_DMA_DATA_SI_SRC_SEL(2) |
1053                                                    PACKET3_DMA_DATA_SI_CP_SYNC;
1054                                         pm4[i++] = 0xffffffff & bo_mc;
1055                                         pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1056                                         pm4[i++] = sdma_write_length;
1057                                 } else {
1058                                         pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1059                                         pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1060                                                    PACKET3_DMA_DATA_DST_SEL(0) |
1061                                                    PACKET3_DMA_DATA_SRC_SEL(2) |
1062                                                    PACKET3_DMA_DATA_CP_SYNC;
1063                                         pm4[i++] = 0xdeadbeaf;
1064                                         pm4[i++] = 0;
1065                                         pm4[i++] = 0xfffffffc & bo_mc;
1066                                         pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1067                                         pm4[i++] = sdma_write_length;
1068                                 }
1069                         }
1070
1071                         amdgpu_test_exec_cs_helper(context_handle,
1072                                                    ip_type, ring_id,
1073                                                    i, pm4,
1074                                                    1, resources,
1075                                                    ib_info, ibs_request);
1076
1077                         /* verify if SDMA test result meets with expected */
1078                         i = 0;
1079                         while(i < (sdma_write_length / 4)) {
1080                                 CU_ASSERT_EQUAL(bo_cpu[i++], 0xdeadbeaf);
1081                         }
1082
1083                         r = amdgpu_bo_unmap_and_free(bo, va_handle, bo_mc,
1084                                                      sdma_write_length);
1085                         CU_ASSERT_EQUAL(r, 0);
1086                         loop++;
1087                 }
1088         }
1089         /* clean resources */
1090         free(resources);
1091         free(ibs_request);
1092         free(ib_info);
1093         free(pm4);
1094
1095         /* end of test */
1096         r = amdgpu_cs_ctx_free(context_handle);
1097         CU_ASSERT_EQUAL(r, 0);
1098 }
1099
1100 static void amdgpu_command_submission_sdma_const_fill(void)
1101 {
1102         amdgpu_command_submission_const_fill_helper(AMDGPU_HW_IP_DMA);
1103 }
1104
1105 static void amdgpu_command_submission_copy_linear_helper(unsigned ip_type)
1106 {
1107         const int sdma_write_length = 1024;
1108         const int pm4_dw = 256;
1109         amdgpu_context_handle context_handle;
1110         amdgpu_bo_handle bo1, bo2;
1111         amdgpu_bo_handle *resources;
1112         uint32_t *pm4;
1113         struct amdgpu_cs_ib_info *ib_info;
1114         struct amdgpu_cs_request *ibs_request;
1115         uint64_t bo1_mc, bo2_mc;
1116         volatile unsigned char *bo1_cpu, *bo2_cpu;
1117         int i, j, r, loop1, loop2;
1118         uint64_t gtt_flags[2] = {0, AMDGPU_GEM_CREATE_CPU_GTT_USWC};
1119         amdgpu_va_handle bo1_va_handle, bo2_va_handle;
1120
1121         pm4 = calloc(pm4_dw, sizeof(*pm4));
1122         CU_ASSERT_NOT_EQUAL(pm4, NULL);
1123
1124         ib_info = calloc(1, sizeof(*ib_info));
1125         CU_ASSERT_NOT_EQUAL(ib_info, NULL);
1126
1127         ibs_request = calloc(1, sizeof(*ibs_request));
1128         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1129
1130         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1131         CU_ASSERT_EQUAL(r, 0);
1132
1133         /* prepare resource */
1134         resources = calloc(2, sizeof(amdgpu_bo_handle));
1135         CU_ASSERT_NOT_EQUAL(resources, NULL);
1136
1137         loop1 = loop2 = 0;
1138         /* run 9 circle to test all mapping combination */
1139         while(loop1 < 2) {
1140                 while(loop2 < 2) {
1141                         /* allocate UC bo1for sDMA use */
1142                         r = amdgpu_bo_alloc_and_map(device_handle,
1143                                                     sdma_write_length, 4096,
1144                                                     AMDGPU_GEM_DOMAIN_GTT,
1145                                                     gtt_flags[loop1], &bo1,
1146                                                     (void**)&bo1_cpu, &bo1_mc,
1147                                                     &bo1_va_handle);
1148                         CU_ASSERT_EQUAL(r, 0);
1149
1150                         /* set bo1 */
1151                         memset((void*)bo1_cpu, 0xaa, sdma_write_length);
1152
1153                         /* allocate UC bo2 for sDMA use */
1154                         r = amdgpu_bo_alloc_and_map(device_handle,
1155                                                     sdma_write_length, 4096,
1156                                                     AMDGPU_GEM_DOMAIN_GTT,
1157                                                     gtt_flags[loop2], &bo2,
1158                                                     (void**)&bo2_cpu, &bo2_mc,
1159                                                     &bo2_va_handle);
1160                         CU_ASSERT_EQUAL(r, 0);
1161
1162                         /* clear bo2 */
1163                         memset((void*)bo2_cpu, 0, sdma_write_length);
1164
1165                         resources[0] = bo1;
1166                         resources[1] = bo2;
1167
1168                         /* fulfill PM4: test DMA copy linear */
1169                         i = j = 0;
1170                         if (ip_type == AMDGPU_HW_IP_DMA) {
1171                                 if (family_id == AMDGPU_FAMILY_SI) {
1172                                         pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0,
1173                                                                   sdma_write_length);
1174                                         pm4[i++] = 0xffffffff & bo2_mc;
1175                                         pm4[i++] = 0xffffffff & bo1_mc;
1176                                         pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1177                                         pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1178                                 } else {
1179                                         pm4[i++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
1180                                         if (family_id >= AMDGPU_FAMILY_AI)
1181                                                 pm4[i++] = sdma_write_length - 1;
1182                                         else
1183                                                 pm4[i++] = sdma_write_length;
1184                                         pm4[i++] = 0;
1185                                         pm4[i++] = 0xffffffff & bo1_mc;
1186                                         pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1187                                         pm4[i++] = 0xffffffff & bo2_mc;
1188                                         pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1189                                 }
1190
1191                         } else if ((ip_type == AMDGPU_HW_IP_GFX) ||
1192                                    (ip_type == AMDGPU_HW_IP_COMPUTE)) {
1193                                 if (family_id == AMDGPU_FAMILY_SI) {
1194                                         pm4[i++] = PACKET3(PACKET3_DMA_DATA_SI, 4);
1195                                         pm4[i++] = 0xfffffffc & bo1_mc;
1196                                         pm4[i++] = PACKET3_DMA_DATA_SI_ENGINE(0) |
1197                                                 PACKET3_DMA_DATA_SI_DST_SEL(0) |
1198                                                 PACKET3_DMA_DATA_SI_SRC_SEL(0) |
1199                                                 PACKET3_DMA_DATA_SI_CP_SYNC |
1200                                                 (0xffff00000000 & bo1_mc) >> 32;
1201                                         pm4[i++] = 0xfffffffc & bo2_mc;
1202                                         pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1203                                         pm4[i++] = sdma_write_length;
1204                                 } else {
1205                                         pm4[i++] = PACKET3(PACKET3_DMA_DATA, 5);
1206                                         pm4[i++] = PACKET3_DMA_DATA_ENGINE(0) |
1207                                                 PACKET3_DMA_DATA_DST_SEL(0) |
1208                                                 PACKET3_DMA_DATA_SRC_SEL(0) |
1209                                                 PACKET3_DMA_DATA_CP_SYNC;
1210                                         pm4[i++] = 0xfffffffc & bo1_mc;
1211                                         pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
1212                                         pm4[i++] = 0xfffffffc & bo2_mc;
1213                                         pm4[i++] = (0xffffffff00000000 & bo2_mc) >> 32;
1214                                         pm4[i++] = sdma_write_length;
1215                                 }
1216                         }
1217
1218                         amdgpu_test_exec_cs_helper(context_handle,
1219                                                    ip_type, 0,
1220                                                    i, pm4,
1221                                                    2, resources,
1222                                                    ib_info, ibs_request);
1223
1224                         /* verify if SDMA test result meets with expected */
1225                         i = 0;
1226                         while(i < sdma_write_length) {
1227                                 CU_ASSERT_EQUAL(bo2_cpu[i++], 0xaa);
1228                         }
1229                         r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
1230                                                      sdma_write_length);
1231                         CU_ASSERT_EQUAL(r, 0);
1232                         r = amdgpu_bo_unmap_and_free(bo2, bo2_va_handle, bo2_mc,
1233                                                      sdma_write_length);
1234                         CU_ASSERT_EQUAL(r, 0);
1235                         loop2++;
1236                 }
1237                 loop1++;
1238         }
1239         /* clean resources */
1240         free(resources);
1241         free(ibs_request);
1242         free(ib_info);
1243         free(pm4);
1244
1245         /* end of test */
1246         r = amdgpu_cs_ctx_free(context_handle);
1247         CU_ASSERT_EQUAL(r, 0);
1248 }
1249
1250 static void amdgpu_command_submission_sdma_copy_linear(void)
1251 {
1252         amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_DMA);
1253 }
1254
1255 static void amdgpu_command_submission_sdma(void)
1256 {
1257         amdgpu_command_submission_sdma_write_linear();
1258         amdgpu_command_submission_sdma_const_fill();
1259         amdgpu_command_submission_sdma_copy_linear();
1260 }
1261
1262 static void amdgpu_command_submission_multi_fence_wait_all(bool wait_all)
1263 {
1264         amdgpu_context_handle context_handle;
1265         amdgpu_bo_handle ib_result_handle, ib_result_ce_handle;
1266         void *ib_result_cpu, *ib_result_ce_cpu;
1267         uint64_t ib_result_mc_address, ib_result_ce_mc_address;
1268         struct amdgpu_cs_request ibs_request[2] = {0};
1269         struct amdgpu_cs_ib_info ib_info[2];
1270         struct amdgpu_cs_fence fence_status[2] = {0};
1271         uint32_t *ptr;
1272         uint32_t expired;
1273         amdgpu_bo_list_handle bo_list;
1274         amdgpu_va_handle va_handle, va_handle_ce;
1275         int r;
1276         int i = 0, ib_cs_num = 2;
1277
1278         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1279         CU_ASSERT_EQUAL(r, 0);
1280
1281         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1282                                     AMDGPU_GEM_DOMAIN_GTT, 0,
1283                                     &ib_result_handle, &ib_result_cpu,
1284                                     &ib_result_mc_address, &va_handle);
1285         CU_ASSERT_EQUAL(r, 0);
1286
1287         r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
1288                                     AMDGPU_GEM_DOMAIN_GTT, 0,
1289                                     &ib_result_ce_handle, &ib_result_ce_cpu,
1290                                     &ib_result_ce_mc_address, &va_handle_ce);
1291         CU_ASSERT_EQUAL(r, 0);
1292
1293         r = amdgpu_get_bo_list(device_handle, ib_result_handle,
1294                                ib_result_ce_handle, &bo_list);
1295         CU_ASSERT_EQUAL(r, 0);
1296
1297         memset(ib_info, 0, 2 * sizeof(struct amdgpu_cs_ib_info));
1298
1299         /* IT_SET_CE_DE_COUNTERS */
1300         ptr = ib_result_ce_cpu;
1301         if (family_id != AMDGPU_FAMILY_SI) {
1302                 ptr[i++] = 0xc0008900;
1303                 ptr[i++] = 0;
1304         }
1305         ptr[i++] = 0xc0008400;
1306         ptr[i++] = 1;
1307         ib_info[0].ib_mc_address = ib_result_ce_mc_address;
1308         ib_info[0].size = i;
1309         ib_info[0].flags = AMDGPU_IB_FLAG_CE;
1310
1311         /* IT_WAIT_ON_CE_COUNTER */
1312         ptr = ib_result_cpu;
1313         ptr[0] = 0xc0008600;
1314         ptr[1] = 0x00000001;
1315         ib_info[1].ib_mc_address = ib_result_mc_address;
1316         ib_info[1].size = 2;
1317
1318         for (i = 0; i < ib_cs_num; i++) {
1319                 ibs_request[i].ip_type = AMDGPU_HW_IP_GFX;
1320                 ibs_request[i].number_of_ibs = 2;
1321                 ibs_request[i].ibs = ib_info;
1322                 ibs_request[i].resources = bo_list;
1323                 ibs_request[i].fence_info.handle = NULL;
1324         }
1325
1326         r = amdgpu_cs_submit(context_handle, 0,ibs_request, ib_cs_num);
1327
1328         CU_ASSERT_EQUAL(r, 0);
1329
1330         for (i = 0; i < ib_cs_num; i++) {
1331                 fence_status[i].context = context_handle;
1332                 fence_status[i].ip_type = AMDGPU_HW_IP_GFX;
1333                 fence_status[i].fence = ibs_request[i].seq_no;
1334         }
1335
1336         r = amdgpu_cs_wait_fences(fence_status, ib_cs_num, wait_all,
1337                                 AMDGPU_TIMEOUT_INFINITE,
1338                                 &expired, NULL);
1339         CU_ASSERT_EQUAL(r, 0);
1340
1341         r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1342                                      ib_result_mc_address, 4096);
1343         CU_ASSERT_EQUAL(r, 0);
1344
1345         r = amdgpu_bo_unmap_and_free(ib_result_ce_handle, va_handle_ce,
1346                                      ib_result_ce_mc_address, 4096);
1347         CU_ASSERT_EQUAL(r, 0);
1348
1349         r = amdgpu_bo_list_destroy(bo_list);
1350         CU_ASSERT_EQUAL(r, 0);
1351
1352         r = amdgpu_cs_ctx_free(context_handle);
1353         CU_ASSERT_EQUAL(r, 0);
1354 }
1355
1356 static void amdgpu_command_submission_multi_fence(void)
1357 {
1358         amdgpu_command_submission_multi_fence_wait_all(true);
1359         amdgpu_command_submission_multi_fence_wait_all(false);
1360 }
1361
1362 static void amdgpu_userptr_test(void)
1363 {
1364         int i, r, j;
1365         uint32_t *pm4 = NULL;
1366         uint64_t bo_mc;
1367         void *ptr = NULL;
1368         int pm4_dw = 256;
1369         int sdma_write_length = 4;
1370         amdgpu_bo_handle handle;
1371         amdgpu_context_handle context_handle;
1372         struct amdgpu_cs_ib_info *ib_info;
1373         struct amdgpu_cs_request *ibs_request;
1374         amdgpu_bo_handle buf_handle;
1375         amdgpu_va_handle va_handle;
1376
1377         pm4 = calloc(pm4_dw, sizeof(*pm4));
1378         CU_ASSERT_NOT_EQUAL(pm4, NULL);
1379
1380         ib_info = calloc(1, sizeof(*ib_info));
1381         CU_ASSERT_NOT_EQUAL(ib_info, NULL);
1382
1383         ibs_request = calloc(1, sizeof(*ibs_request));
1384         CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
1385
1386         r = amdgpu_cs_ctx_create(device_handle, &context_handle);
1387         CU_ASSERT_EQUAL(r, 0);
1388
1389         posix_memalign(&ptr, sysconf(_SC_PAGE_SIZE), BUFFER_SIZE);
1390         CU_ASSERT_NOT_EQUAL(ptr, NULL);
1391         memset(ptr, 0, BUFFER_SIZE);
1392
1393         r = amdgpu_create_bo_from_user_mem(device_handle,
1394                                            ptr, BUFFER_SIZE, &buf_handle);
1395         CU_ASSERT_EQUAL(r, 0);
1396
1397         r = amdgpu_va_range_alloc(device_handle,
1398                                   amdgpu_gpu_va_range_general,
1399                                   BUFFER_SIZE, 1, 0, &bo_mc,
1400                                   &va_handle, 0);
1401         CU_ASSERT_EQUAL(r, 0);
1402
1403         r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_MAP);
1404         CU_ASSERT_EQUAL(r, 0);
1405
1406         handle = buf_handle;
1407
1408         j = i = 0;
1409
1410         if (family_id == AMDGPU_FAMILY_SI)
1411                 pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
1412                                 sdma_write_length);
1413         else
1414                 pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
1415                                 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
1416         pm4[i++] = 0xffffffff & bo_mc;
1417         pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
1418         if (family_id >= AMDGPU_FAMILY_AI)
1419                 pm4[i++] = sdma_write_length - 1;
1420         else if (family_id != AMDGPU_FAMILY_SI)
1421                 pm4[i++] = sdma_write_length;
1422
1423         while (j++ < sdma_write_length)
1424                 pm4[i++] = 0xdeadbeaf;
1425
1426         if (!fork()) {
1427                 pm4[0] = 0x0;
1428                 exit(0);
1429         }
1430
1431         amdgpu_test_exec_cs_helper(context_handle,
1432                                    AMDGPU_HW_IP_DMA, 0,
1433                                    i, pm4,
1434                                    1, &handle,
1435                                    ib_info, ibs_request);
1436         i = 0;
1437         while (i < sdma_write_length) {
1438                 CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
1439         }
1440         free(ibs_request);
1441         free(ib_info);
1442         free(pm4);
1443
1444         r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, AMDGPU_VA_OP_UNMAP);
1445         CU_ASSERT_EQUAL(r, 0);
1446         r = amdgpu_va_range_free(va_handle);
1447         CU_ASSERT_EQUAL(r, 0);
1448         r = amdgpu_bo_free(buf_handle);
1449         CU_ASSERT_EQUAL(r, 0);
1450         free(ptr);
1451
1452         r = amdgpu_cs_ctx_free(context_handle);
1453         CU_ASSERT_EQUAL(r, 0);
1454
1455         wait(NULL);
1456 }
1457
1458 static void amdgpu_sync_dependency_test(void)
1459 {
1460         amdgpu_context_handle context_handle[2];
1461         amdgpu_bo_handle ib_result_handle;
1462         void *ib_result_cpu;
1463         uint64_t ib_result_mc_address;
1464         struct amdgpu_cs_request ibs_request;
1465         struct amdgpu_cs_ib_info ib_info;
1466         struct amdgpu_cs_fence fence_status;
1467         uint32_t expired;
1468         int i, j, r, instance;
1469         amdgpu_bo_list_handle bo_list;
1470         amdgpu_va_handle va_handle;
1471         static uint32_t *ptr;
1472         uint64_t seq_no;
1473
1474         r = amdgpu_cs_ctx_create(device_handle, &context_handle[0]);
1475         CU_ASSERT_EQUAL(r, 0);
1476         r = amdgpu_cs_ctx_create(device_handle, &context_handle[1]);
1477         CU_ASSERT_EQUAL(r, 0);
1478
1479         r = amdgpu_bo_alloc_and_map(device_handle, 8192, 4096,
1480                         AMDGPU_GEM_DOMAIN_GTT, 0,
1481                                                     &ib_result_handle, &ib_result_cpu,
1482                                                     &ib_result_mc_address, &va_handle);
1483         CU_ASSERT_EQUAL(r, 0);
1484
1485         r = amdgpu_get_bo_list(device_handle, ib_result_handle, NULL,
1486                                &bo_list);
1487         CU_ASSERT_EQUAL(r, 0);
1488
1489         ptr = ib_result_cpu;
1490         i = 0;
1491
1492         memcpy(ptr + CODE_OFFSET , shader_bin, sizeof(shader_bin));
1493
1494         /* Dispatch minimal init config and verify it's executed */
1495         ptr[i++] = PACKET3(PKT3_CONTEXT_CONTROL, 1);
1496         ptr[i++] = 0x80000000;
1497         ptr[i++] = 0x80000000;
1498
1499         ptr[i++] = PACKET3(PKT3_CLEAR_STATE, 0);
1500         ptr[i++] = 0x80000000;
1501
1502
1503         /* Program compute regs */
1504         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1505         ptr[i++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1506         ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 8;
1507         ptr[i++] = (ib_result_mc_address + CODE_OFFSET * 4) >> 40;
1508
1509
1510         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1511         ptr[i++] = mmCOMPUTE_PGM_RSRC1 - PACKET3_SET_SH_REG_START;
1512         /*
1513          * 002c0040         COMPUTE_PGM_RSRC1 <- VGPRS = 0
1514                                               SGPRS = 1
1515                                               PRIORITY = 0
1516                                               FLOAT_MODE = 192 (0xc0)
1517                                               PRIV = 0
1518                                               DX10_CLAMP = 1
1519                                               DEBUG_MODE = 0
1520                                               IEEE_MODE = 0
1521                                               BULKY = 0
1522                                               CDBG_USER = 0
1523          *
1524          */
1525         ptr[i++] = 0x002c0040;
1526
1527
1528         /*
1529          * 00000010         COMPUTE_PGM_RSRC2 <- SCRATCH_EN = 0
1530                                               USER_SGPR = 8
1531                                               TRAP_PRESENT = 0
1532                                               TGID_X_EN = 0
1533                                               TGID_Y_EN = 0
1534                                               TGID_Z_EN = 0
1535                                               TG_SIZE_EN = 0
1536                                               TIDIG_COMP_CNT = 0
1537                                               EXCP_EN_MSB = 0
1538                                               LDS_SIZE = 0
1539                                               EXCP_EN = 0
1540          *
1541          */
1542         ptr[i++] = 0x00000010;
1543
1544
1545 /*
1546  * 00000100         COMPUTE_TMPRING_SIZE <- WAVES = 256 (0x100)
1547                                          WAVESIZE = 0
1548  *
1549  */
1550         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1551         ptr[i++] = mmCOMPUTE_TMPRING_SIZE - PACKET3_SET_SH_REG_START;
1552         ptr[i++] = 0x00000100;
1553
1554         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
1555         ptr[i++] = mmCOMPUTE_USER_DATA_0 - PACKET3_SET_SH_REG_START;
1556         ptr[i++] = 0xffffffff & (ib_result_mc_address + DATA_OFFSET * 4);
1557         ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1558
1559         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
1560         ptr[i++] = mmCOMPUTE_RESOURCE_LIMITS - PACKET3_SET_SH_REG_START;
1561         ptr[i++] = 0;
1562
1563         ptr[i++] = PACKET3(PKT3_SET_SH_REG, 3);
1564         ptr[i++] = mmCOMPUTE_NUM_THREAD_X - PACKET3_SET_SH_REG_START;
1565         ptr[i++] = 1;
1566         ptr[i++] = 1;
1567         ptr[i++] = 1;
1568
1569
1570         /* Dispatch */
1571         ptr[i++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1572         ptr[i++] = 1;
1573         ptr[i++] = 1;
1574         ptr[i++] = 1;
1575         ptr[i++] = 0x00000045; /* DISPATCH DIRECT field */
1576
1577
1578         while (i & 7)
1579                 ptr[i++] =  0xffff1000; /* type3 nop packet */
1580
1581         memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1582         ib_info.ib_mc_address = ib_result_mc_address;
1583         ib_info.size = i;
1584
1585         memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1586         ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1587         ibs_request.ring = 0;
1588         ibs_request.number_of_ibs = 1;
1589         ibs_request.ibs = &ib_info;
1590         ibs_request.resources = bo_list;
1591         ibs_request.fence_info.handle = NULL;
1592
1593         r = amdgpu_cs_submit(context_handle[1], 0,&ibs_request, 1);
1594         CU_ASSERT_EQUAL(r, 0);
1595         seq_no = ibs_request.seq_no;
1596
1597
1598
1599         /* Prepare second command with dependency on the first */
1600         j = i;
1601         ptr[i++] = PACKET3(PACKET3_WRITE_DATA, 3);
1602         ptr[i++] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1603         ptr[i++] = 0xfffffffc & ib_result_mc_address + DATA_OFFSET * 4;
1604         ptr[i++] = (0xffffffff00000000 & (ib_result_mc_address + DATA_OFFSET * 4)) >> 32;
1605         ptr[i++] = 99;
1606
1607         while (i & 7)
1608                 ptr[i++] =  0xffff1000; /* type3 nop packet */
1609
1610         memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
1611         ib_info.ib_mc_address = ib_result_mc_address + j * 4;
1612         ib_info.size = i - j;
1613
1614         memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
1615         ibs_request.ip_type = AMDGPU_HW_IP_GFX;
1616         ibs_request.ring = 0;
1617         ibs_request.number_of_ibs = 1;
1618         ibs_request.ibs = &ib_info;
1619         ibs_request.resources = bo_list;
1620         ibs_request.fence_info.handle = NULL;
1621
1622         ibs_request.number_of_dependencies = 1;
1623
1624         ibs_request.dependencies = calloc(1, sizeof(*ibs_request.dependencies));
1625         ibs_request.dependencies[0].context = context_handle[1];
1626         ibs_request.dependencies[0].ip_instance = 0;
1627         ibs_request.dependencies[0].ring = 0;
1628         ibs_request.dependencies[0].fence = seq_no;
1629
1630
1631         r = amdgpu_cs_submit(context_handle[0], 0,&ibs_request, 1);
1632         CU_ASSERT_EQUAL(r, 0);
1633
1634
1635         memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
1636         fence_status.context = context_handle[0];
1637         fence_status.ip_type = AMDGPU_HW_IP_GFX;
1638         fence_status.ip_instance = 0;
1639         fence_status.ring = 0;
1640         fence_status.fence = ibs_request.seq_no;
1641
1642         r = amdgpu_cs_query_fence_status(&fence_status,
1643                        AMDGPU_TIMEOUT_INFINITE,0, &expired);
1644         CU_ASSERT_EQUAL(r, 0);
1645
1646         /* Expect the second command to wait for shader to complete */
1647         CU_ASSERT_EQUAL(ptr[DATA_OFFSET], 99);
1648
1649         r = amdgpu_bo_list_destroy(bo_list);
1650         CU_ASSERT_EQUAL(r, 0);
1651
1652         r = amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
1653                                      ib_result_mc_address, 4096);
1654         CU_ASSERT_EQUAL(r, 0);
1655
1656         r = amdgpu_cs_ctx_free(context_handle[0]);
1657         CU_ASSERT_EQUAL(r, 0);
1658         r = amdgpu_cs_ctx_free(context_handle[1]);
1659         CU_ASSERT_EQUAL(r, 0);
1660
1661         free(ibs_request.dependencies);
1662 }