*
*/
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
#include <stdio.h>
#include "CUnit/Basic.h"
#include "amdgpu_test.h"
#include "amdgpu_drm.h"
+#include "amdgpu_internal.h"
#define BUFFER_SIZE (4*1024)
#define BUFFER_ALIGN (4*1024)
static void amdgpu_bo_export_import(void);
static void amdgpu_bo_metadata(void);
static void amdgpu_bo_map_unmap(void);
+static void amdgpu_memory_alloc(void);
+static void amdgpu_mem_fail_alloc(void);
+static void amdgpu_bo_find_by_cpu_mapping(void);
CU_TestInfo bo_tests[] = {
{ "Export/Import", amdgpu_bo_export_import },
-#if 0
{ "Metadata", amdgpu_bo_metadata },
-#endif
{ "CPU map/unmap", amdgpu_bo_map_unmap },
+ { "Memory alloc Test", amdgpu_memory_alloc },
+ { "Memory fail alloc Test", amdgpu_mem_fail_alloc },
+ { "Find bo by CPU mapping", amdgpu_bo_find_by_cpu_mapping },
CU_TEST_INFO_NULL,
};
static void amdgpu_bo_export_import(void)
{
+ if (open_render_node) {
+ printf("(DRM render node is used. Skip export/Import test) ");
+ return;
+ }
+
amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
}
r = amdgpu_bo_cpu_unmap(buffer_handle);
CU_ASSERT_EQUAL(r, 0);
}
+
+static void amdgpu_memory_alloc(void)
+{
+ amdgpu_bo_handle bo;
+ amdgpu_va_handle va_handle;
+ uint64_t bo_mc;
+ int r;
+
+ /* Test visible VRAM */
+ bo = gpu_mem_alloc(device_handle,
+ 4096, 4096,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ &bo_mc, &va_handle);
+
+ r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test invisible VRAM */
+ bo = gpu_mem_alloc(device_handle,
+ 4096, 4096,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ &bo_mc, &va_handle);
+
+ r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test GART Cacheable */
+ bo = gpu_mem_alloc(device_handle,
+ 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT,
+ 0, &bo_mc, &va_handle);
+
+ r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test GART USWC */
+ bo = gpu_mem_alloc(device_handle,
+ 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ &bo_mc, &va_handle);
+
+ r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test GDS */
+ bo = gpu_mem_alloc(device_handle, 1024, 0,
+ AMDGPU_GEM_DOMAIN_GDS, 0,
+ NULL, NULL);
+ r = gpu_mem_free(bo, NULL, 0, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test GWS */
+ bo = gpu_mem_alloc(device_handle, 1, 0,
+ AMDGPU_GEM_DOMAIN_GWS, 0,
+ NULL, NULL);
+ r = gpu_mem_free(bo, NULL, 0, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+
+ /* Test OA */
+ bo = gpu_mem_alloc(device_handle, 1, 0,
+ AMDGPU_GEM_DOMAIN_OA, 0,
+ NULL, NULL);
+ r = gpu_mem_free(bo, NULL, 0, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+}
+
+static void amdgpu_mem_fail_alloc(void)
+{
+ amdgpu_bo_handle bo;
+ int r;
+ struct amdgpu_bo_alloc_request req = {0};
+ amdgpu_bo_handle buf_handle;
+
+ /* Test impossible mem allocation, 1TB */
+ req.alloc_size = 0xE8D4A51000;
+ req.phys_alignment = 4096;
+ req.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
+ req.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+
+ r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
+ CU_ASSERT_EQUAL(r, -ENOMEM);
+
+ if (!r) {
+ r = amdgpu_bo_free(bo);
+ CU_ASSERT_EQUAL(r, 0);
+ }
+}
+
+static void amdgpu_bo_find_by_cpu_mapping(void)
+{
+ amdgpu_bo_handle bo_handle, find_bo_handle;
+ amdgpu_va_handle va_handle;
+ void *bo_cpu;
+ uint64_t bo_mc_address;
+ uint64_t offset;
+ int r;
+
+ r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+ AMDGPU_GEM_DOMAIN_GTT, 0,
+ &bo_handle, &bo_cpu,
+ &bo_mc_address, &va_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_find_bo_by_cpu_mapping(device_handle,
+ bo_cpu,
+ 4096,
+ &find_bo_handle,
+ &offset);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(offset, 0);
+ CU_ASSERT_EQUAL(bo_handle->handle, find_bo_handle->handle);
+
+ atomic_dec(&find_bo_handle->refcount, 1);
+ r = amdgpu_bo_unmap_and_free(bo_handle, va_handle,
+ bo_mc_address, 4096);
+ CU_ASSERT_EQUAL(r, 0);
+}