2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
31 #include "CUnit/Basic.h"
33 #include "util_math.h"
35 #include "amdgpu_test.h"
36 #include "amdgpu_drm.h"
37 #include "amdgpu_internal.h"
42 #define MAX_RESOURCES 16
44 struct amdgpu_uvd_enc_bo {
45 amdgpu_bo_handle handle;
46 amdgpu_va_handle va_handle;
52 struct amdgpu_uvd_enc {
55 struct amdgpu_uvd_enc_bo session;
56 struct amdgpu_uvd_enc_bo vbuf;
57 struct amdgpu_uvd_enc_bo bs;
58 struct amdgpu_uvd_enc_bo fb;
59 struct amdgpu_uvd_enc_bo cpb;
62 static amdgpu_device_handle device_handle;
63 static uint32_t major_version;
64 static uint32_t minor_version;
65 static uint32_t family_id;
67 static amdgpu_context_handle context_handle;
68 static amdgpu_bo_handle ib_handle;
69 static amdgpu_va_handle ib_va_handle;
70 static uint64_t ib_mc_address;
71 static uint32_t *ib_cpu;
73 static struct amdgpu_uvd_enc enc;
74 static amdgpu_bo_handle resources[MAX_RESOURCES];
75 static unsigned num_resources;
77 static void amdgpu_cs_uvd_enc_create(void);
78 static void amdgpu_cs_uvd_enc_session_init(void);
79 static void amdgpu_cs_uvd_enc_encode(void);
80 static void amdgpu_cs_uvd_enc_destroy(void);
82 static bool uvd_enc_support(void);
84 CU_TestInfo uvd_enc_tests[] = {
85 { "UVD ENC create", amdgpu_cs_uvd_enc_create },
86 { "UVD ENC session init", amdgpu_cs_uvd_enc_session_init },
87 { "UVD ENC encode", amdgpu_cs_uvd_enc_encode },
88 { "UVD ENC destroy", amdgpu_cs_uvd_enc_destroy },
92 int suite_uvd_enc_tests_init(void)
96 r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
97 &minor_version, &device_handle);
99 return CUE_SINIT_FAILED;
101 family_id = device_handle->info.family_id;
103 if (!uvd_enc_support()) {
104 printf("\n\nThe ASIC NOT support UVD ENC, all sub-tests will pass\n");
108 r = amdgpu_cs_ctx_create(device_handle, &context_handle);
110 return CUE_SINIT_FAILED;
112 r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
113 AMDGPU_GEM_DOMAIN_GTT, 0,
114 &ib_handle, (void**)&ib_cpu,
115 &ib_mc_address, &ib_va_handle);
117 return CUE_SINIT_FAILED;
122 int suite_uvd_enc_tests_clean(void)
126 if (!uvd_enc_support()) {
128 r = amdgpu_device_deinitialize(device_handle);
130 return CUE_SCLEAN_FAILED;
135 r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
136 ib_mc_address, IB_SIZE);
138 return CUE_SCLEAN_FAILED;
140 r = amdgpu_cs_ctx_free(context_handle);
142 return CUE_SCLEAN_FAILED;
144 r = amdgpu_device_deinitialize(device_handle);
146 return CUE_SCLEAN_FAILED;
152 static int submit(unsigned ndw, unsigned ip)
154 struct amdgpu_cs_request ibs_request = {0};
155 struct amdgpu_cs_ib_info ib_info = {0};
156 struct amdgpu_cs_fence fence_status = {0};
160 ib_info.ib_mc_address = ib_mc_address;
163 ibs_request.ip_type = ip;
165 r = amdgpu_bo_list_create(device_handle, num_resources, resources,
166 NULL, &ibs_request.resources);
170 ibs_request.number_of_ibs = 1;
171 ibs_request.ibs = &ib_info;
172 ibs_request.fence_info.handle = NULL;
174 r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
178 r = amdgpu_bo_list_destroy(ibs_request.resources);
182 fence_status.context = context_handle;
183 fence_status.ip_type = ip;
184 fence_status.fence = ibs_request.seq_no;
186 r = amdgpu_cs_query_fence_status(&fence_status,
187 AMDGPU_TIMEOUT_INFINITE,
195 static void alloc_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo,
196 unsigned size, unsigned domain)
198 struct amdgpu_bo_alloc_request req = {0};
199 amdgpu_bo_handle buf_handle;
200 amdgpu_va_handle va_handle;
204 req.alloc_size = ALIGN(size, 4096);
205 req.preferred_heap = domain;
206 r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
207 CU_ASSERT_EQUAL(r, 0);
208 r = amdgpu_va_range_alloc(device_handle,
209 amdgpu_gpu_va_range_general,
210 req.alloc_size, 1, 0, &va,
212 CU_ASSERT_EQUAL(r, 0);
213 r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
215 CU_ASSERT_EQUAL(r, 0);
216 uvd_enc_bo->addr = va;
217 uvd_enc_bo->handle = buf_handle;
218 uvd_enc_bo->size = req.alloc_size;
219 uvd_enc_bo->va_handle = va_handle;
220 r = amdgpu_bo_cpu_map(uvd_enc_bo->handle, (void **)&uvd_enc_bo->ptr);
221 CU_ASSERT_EQUAL(r, 0);
222 memset(uvd_enc_bo->ptr, 0, size);
223 r = amdgpu_bo_cpu_unmap(uvd_enc_bo->handle);
224 CU_ASSERT_EQUAL(r, 0);
227 static void free_resource(struct amdgpu_uvd_enc_bo *uvd_enc_bo)
231 r = amdgpu_bo_va_op(uvd_enc_bo->handle, 0, uvd_enc_bo->size,
232 uvd_enc_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
233 CU_ASSERT_EQUAL(r, 0);
235 r = amdgpu_va_range_free(uvd_enc_bo->va_handle);
236 CU_ASSERT_EQUAL(r, 0);
238 r = amdgpu_bo_free(uvd_enc_bo->handle);
239 CU_ASSERT_EQUAL(r, 0);
240 memset(uvd_enc_bo, 0, sizeof(*uvd_enc_bo));
243 static bool uvd_enc_support(void)
246 struct drm_amdgpu_info_hw_ip info;
248 r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_UVD_ENC, 0, &info);
253 return (info.available_rings?true:false);
256 static void amdgpu_cs_uvd_enc_create(void)
260 if (!uvd_enc_support())
267 alloc_resource(&enc.session, 128 * 1024, AMDGPU_GEM_DOMAIN_GTT);
268 resources[num_resources++] = enc.session.handle;
269 resources[num_resources++] = ib_handle;
272 static void check_result(struct amdgpu_uvd_enc *enc)
279 r = amdgpu_bo_cpu_map(enc->fb.handle, (void **)&enc->fb.ptr);
280 CU_ASSERT_EQUAL(r, 0);
281 ptr = (uint32_t *)enc->fb.ptr;
283 r = amdgpu_bo_cpu_unmap(enc->fb.handle);
284 CU_ASSERT_EQUAL(r, 0);
285 r = amdgpu_bo_cpu_map(enc->bs.handle, (void **)&enc->bs.ptr);
286 CU_ASSERT_EQUAL(r, 0);
287 for (j = 0, sum = 0; j < size; ++j)
288 sum += enc->bs.ptr[j];
289 CU_ASSERT_EQUAL(sum, s);
290 r = amdgpu_bo_cpu_unmap(enc->bs.handle);
291 CU_ASSERT_EQUAL(r, 0);
295 static void amdgpu_cs_uvd_enc_session_init(void)
299 if (!uvd_enc_support())
303 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
304 len += sizeof(uve_session_info) / 4;
305 ib_cpu[len++] = enc.session.addr >> 32;
306 ib_cpu[len++] = enc.session.addr;
308 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
309 len += sizeof(uve_task_info) / 4;
310 ib_cpu[len++] = 0x000000d8;
311 ib_cpu[len++] = 0x00000000;
312 ib_cpu[len++] = 0x00000000;
314 memcpy((ib_cpu + len), uve_op_init, sizeof(uve_op_init));
315 len += sizeof(uve_op_init) / 4;
317 memcpy((ib_cpu + len), uve_session_init, sizeof(uve_session_init));
318 len += sizeof(uve_session_init) / 4;
320 memcpy((ib_cpu + len), uve_layer_ctrl, sizeof(uve_layer_ctrl));
321 len += sizeof(uve_layer_ctrl) / 4;
323 memcpy((ib_cpu + len), uve_slice_ctrl, sizeof(uve_slice_ctrl));
324 len += sizeof(uve_slice_ctrl) / 4;
326 memcpy((ib_cpu + len), uve_spec_misc, sizeof(uve_spec_misc));
327 len += sizeof(uve_spec_misc) / 4;
329 memcpy((ib_cpu + len), uve_rc_session_init, sizeof(uve_rc_session_init));
330 len += sizeof(uve_rc_session_init) / 4;
332 memcpy((ib_cpu + len), uve_deblocking_filter, sizeof(uve_deblocking_filter));
333 len += sizeof(uve_deblocking_filter) / 4;
335 memcpy((ib_cpu + len), uve_quality_params, sizeof(uve_quality_params));
336 len += sizeof(uve_quality_params) / 4;
338 memcpy((ib_cpu + len), uve_op_init_rc, sizeof(uve_op_init_rc));
339 len += sizeof(uve_op_init_rc) / 4;
341 memcpy((ib_cpu + len), uve_op_init_rc_vbv_level, sizeof(uve_op_init_rc_vbv_level));
342 len += sizeof(uve_op_init_rc_vbv_level) / 4;
344 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
345 CU_ASSERT_EQUAL(r, 0);
348 static void amdgpu_cs_uvd_enc_encode(void)
351 uint64_t luma_offset, chroma_offset;
352 uint32_t vbuf_size, bs_size = 0x003f4800, cpb_size;
353 unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
354 vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
355 cpb_size = vbuf_size * 10;
357 if (!uvd_enc_support())
361 alloc_resource(&enc.fb, 4096, AMDGPU_GEM_DOMAIN_VRAM);
362 resources[num_resources++] = enc.fb.handle;
363 alloc_resource(&enc.bs, bs_size, AMDGPU_GEM_DOMAIN_VRAM);
364 resources[num_resources++] = enc.bs.handle;
365 alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
366 resources[num_resources++] = enc.vbuf.handle;
367 alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
368 resources[num_resources++] = enc.cpb.handle;
369 resources[num_resources++] = ib_handle;
371 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
372 CU_ASSERT_EQUAL(r, 0);
374 memset(enc.vbuf.ptr, 0, vbuf_size);
375 for (i = 0; i < enc.height; ++i) {
376 memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
377 enc.vbuf.ptr += ALIGN(enc.width, align);
379 for (i = 0; i < enc.height / 2; ++i) {
380 memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
381 enc.vbuf.ptr += ALIGN(enc.width, align);
384 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
385 CU_ASSERT_EQUAL(r, 0);
388 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
389 len += sizeof(uve_session_info) / 4;
390 ib_cpu[len++] = enc.session.addr >> 32;
391 ib_cpu[len++] = enc.session.addr;
393 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
394 len += sizeof(uve_task_info) / 4;
395 ib_cpu[len++] = 0x000005e0;
396 ib_cpu[len++] = 0x00000001;
397 ib_cpu[len++] = 0x00000001;
399 memcpy((ib_cpu + len), uve_nalu_buffer_1, sizeof(uve_nalu_buffer_1));
400 len += sizeof(uve_nalu_buffer_1) / 4;
402 memcpy((ib_cpu + len), uve_nalu_buffer_2, sizeof(uve_nalu_buffer_2));
403 len += sizeof(uve_nalu_buffer_2) / 4;
405 memcpy((ib_cpu + len), uve_nalu_buffer_3, sizeof(uve_nalu_buffer_3));
406 len += sizeof(uve_nalu_buffer_3) / 4;
408 memcpy((ib_cpu + len), uve_nalu_buffer_4, sizeof(uve_nalu_buffer_4));
409 len += sizeof(uve_nalu_buffer_4) / 4;
411 memcpy((ib_cpu + len), uve_slice_header, sizeof(uve_slice_header));
412 len += sizeof(uve_slice_header) / 4;
414 ib_cpu[len++] = 0x00000254;
415 ib_cpu[len++] = 0x00000010;
416 ib_cpu[len++] = enc.cpb.addr >> 32;
417 ib_cpu[len++] = enc.cpb.addr;
418 memcpy((ib_cpu + len), uve_ctx_buffer, sizeof(uve_ctx_buffer));
419 len += sizeof(uve_ctx_buffer) / 4;
421 memcpy((ib_cpu + len), uve_bitstream_buffer, sizeof(uve_bitstream_buffer));
422 len += sizeof(uve_bitstream_buffer) / 4;
423 ib_cpu[len++] = 0x00000000;
424 ib_cpu[len++] = enc.bs.addr >> 32;
425 ib_cpu[len++] = enc.bs.addr;
426 ib_cpu[len++] = 0x003f4800;
427 ib_cpu[len++] = 0x00000000;
429 memcpy((ib_cpu + len), uve_feedback_buffer, sizeof(uve_feedback_buffer));
430 len += sizeof(uve_feedback_buffer) / 4;
431 ib_cpu[len++] = enc.fb.addr >> 32;
432 ib_cpu[len++] = enc.fb.addr;
433 ib_cpu[len++] = 0x00000010;
434 ib_cpu[len++] = 0x00000028;
436 memcpy((ib_cpu + len), uve_feedback_buffer_additional, sizeof(uve_feedback_buffer_additional));
437 len += sizeof(uve_feedback_buffer_additional) / 4;
439 memcpy((ib_cpu + len), uve_intra_refresh, sizeof(uve_intra_refresh));
440 len += sizeof(uve_intra_refresh) / 4;
442 memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
443 len += sizeof(uve_layer_select) / 4;
445 memcpy((ib_cpu + len), uve_rc_layer_init, sizeof(uve_rc_layer_init));
446 len += sizeof(uve_rc_layer_init) / 4;
448 memcpy((ib_cpu + len), uve_layer_select, sizeof(uve_layer_select));
449 len += sizeof(uve_layer_select) / 4;
451 memcpy((ib_cpu + len), uve_rc_per_pic, sizeof(uve_rc_per_pic));
452 len += sizeof(uve_rc_per_pic) / 4;
454 unsigned luma_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16);
455 luma_offset = enc.vbuf.addr;
456 chroma_offset = luma_offset + luma_size;
457 ib_cpu[len++] = 0x00000054;
458 ib_cpu[len++] = 0x0000000c;
459 ib_cpu[len++] = 0x00000002;
460 ib_cpu[len++] = 0x003f4800;
461 ib_cpu[len++] = luma_offset >> 32;
462 ib_cpu[len++] = luma_offset;
463 ib_cpu[len++] = chroma_offset >> 32;
464 ib_cpu[len++] = chroma_offset;
465 memcpy((ib_cpu + len), uve_encode_param, sizeof(uve_encode_param));
466 len += sizeof(uve_encode_param) / 4;
468 memcpy((ib_cpu + len), uve_op_speed_enc_mode, sizeof(uve_op_speed_enc_mode));
469 len += sizeof(uve_op_speed_enc_mode) / 4;
471 memcpy((ib_cpu + len), uve_op_encode, sizeof(uve_op_encode));
472 len += sizeof(uve_op_encode) / 4;
474 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
475 CU_ASSERT_EQUAL(r, 0);
479 free_resource(&enc.fb);
480 free_resource(&enc.bs);
481 free_resource(&enc.vbuf);
482 free_resource(&enc.cpb);
485 static void amdgpu_cs_uvd_enc_destroy(void)
487 struct amdgpu_uvd_enc_bo sw_ctx;
490 if (!uvd_enc_support())
494 resources[num_resources++] = ib_handle;
497 memcpy((ib_cpu + len), uve_session_info, sizeof(uve_session_info));
498 len += sizeof(uve_session_info) / 4;
499 ib_cpu[len++] = enc.session.addr >> 32;
500 ib_cpu[len++] = enc.session.addr;
502 memcpy((ib_cpu + len), uve_task_info, sizeof(uve_task_info));
503 len += sizeof(uve_task_info) / 4;
504 ib_cpu[len++] = 0xffffffff;
505 ib_cpu[len++] = 0x00000002;
506 ib_cpu[len++] = 0x00000000;
508 memcpy((ib_cpu + len), uve_op_close, sizeof(uve_op_close));
509 len += sizeof(uve_op_close) / 4;
511 r = submit(len, AMDGPU_HW_IP_UVD_ENC);
512 CU_ASSERT_EQUAL(r, 0);
514 free_resource(&enc.session);