+static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
+{
+ struct drm_amdgpu_info info_args = { 0 };
+
+ info_args.return_pointer = (uintptr_t)dev_info;
+ info_args.return_size = sizeof(*dev_info);
+ info_args.query = AMDGPU_INFO_DEV_INFO;
+
+ return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
+}
+
+static int sdma_init(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_gem_close gem_close = { 0 };
+ int ret;
+
+ /* Ensure we can make a submission without BO lists. */
+ if (priv->drm_version < 27)
+ return 0;
+
+ /* Anything outside this range needs adjustments to the SDMA copy commands */
+ if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
+ return 0;
+
+ ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ if (ret < 0)
+ return ret;
+
+ priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
+
+ priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
+ gem_create.in.bo_size = priv->sdma_cmdbuf_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
+ if (ret < 0)
+ goto fail_ctx;
+
+ priv->sdma_cmdbuf_bo = gem_create.out.handle;
+
+ priv->sdma_cmdbuf_addr =
+ ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
+
+ /* Map the buffer into the GPU address space so we can use it from the GPU */
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto fail_bo;
+
+ gem_map.in.handle = priv->sdma_cmdbuf_bo;
+ ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
+ if (ret)
+ goto fail_va;
+
+ priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, gem_map.out.addr_ptr);
+ if (priv->sdma_cmdbuf_map == MAP_FAILED) {
+ priv->sdma_cmdbuf_map = NULL;
+ ret = -ENOMEM;
+ goto fail_va;
+ }
+
+ return 0;
+fail_va:
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+fail_bo:
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+fail_ctx:
+ memset(&ctx_args, 0, sizeof(ctx_args));
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ return ret;
+}
+
+static void sdma_finish(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ struct drm_gem_close gem_close = { 0 };
+
+ if (!priv->sdma_cmdbuf_map)
+ return;
+
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+}
+
+static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
+ uint64_t size)
+{
+ const uint64_t max_size_per_cmd = 0x3fff00;
+ const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
+ const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
+ uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
+ uint64_t dst_addr = src_addr + size;
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ unsigned cmd = 0;
+ uint64_t remaining_size = size;
+ uint64_t cur_src_addr = src_addr;
+ uint64_t cur_dst_addr = dst_addr;
+ struct drm_amdgpu_cs_chunk_ib ib = { 0 };
+ struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
+ uint64_t chunk_ptrs[2];
+ union drm_amdgpu_cs cs = { { 0 } };
+ struct drm_amdgpu_bo_list_in bo_list = { 0 };
+ struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
+ union drm_amdgpu_wait_cs wait_cs = { { 0 } };
+ int ret = 0;
+
+ if (size > UINT64_MAX - max_size_per_cmd ||
+ DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
+ return -ENOMEM;
+
+ /* Map both buffers into the GPU address space so we can access them from the GPU. */
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ va_args.map_size = size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ return ret;
+
+ va_args.handle = dst_handle;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto unmap_src;
+
+ while (remaining_size) {
+ uint64_t cur_size = remaining_size;
+ if (cur_size > max_size_per_cmd)
+ cur_size = max_size_per_cmd;
+
+ priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
+ priv->sdma_cmdbuf_map[cmd++] =
+ priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
+ priv->sdma_cmdbuf_map[cmd++] = 0;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
+
+ remaining_size -= cur_size;
+ cur_src_addr += cur_size;
+ cur_dst_addr += cur_size;
+ }
+
+ ib.va_start = priv->sdma_cmdbuf_addr;
+ ib.ib_bytes = cmd * 4;
+ ib.ip_type = AMDGPU_HW_IP_DMA;
+
+ chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[1].length_dw = sizeof(ib) / 4;
+ chunks[1].chunk_data = (uintptr_t)&ib;
+
+ bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
+ bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
+ bo_list_entries[1].bo_handle = src_handle;
+ bo_list_entries[1].bo_priority = 8;
+ bo_list_entries[2].bo_handle = dst_handle;
+ bo_list_entries[2].bo_priority = 8;
+
+ bo_list.bo_number = 3;
+ bo_list.bo_info_size = sizeof(bo_list_entries[0]);
+ bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
+
+ chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[0].length_dw = sizeof(bo_list) / 4;
+ chunks[0].chunk_data = (uintptr_t)&bo_list;
+
+ chunk_ptrs[0] = (uintptr_t)&chunks[0];
+ chunk_ptrs[1] = (uintptr_t)&chunks[1];
+
+ cs.in.ctx_id = priv->sdma_ctx;
+ cs.in.num_chunks = 2;
+ cs.in.chunks = (uintptr_t)chunk_ptrs;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
+ if (ret) {
+ drv_log("SDMA copy command buffer submission failed %d\n", ret);
+ goto unmap_dst;
+ }
+
+ wait_cs.in.handle = cs.out.handle;
+ wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
+ wait_cs.in.ctx_id = priv->sdma_ctx;
+ wait_cs.in.timeout = INT64_MAX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
+ if (ret) {
+ drv_log("Could not wait for CS to finish\n");
+ } else if (wait_cs.out.status) {
+ drv_log("Infinite wait timed out, likely GPU hang.\n");
+ ret = -ENODEV;
+ }
+
+unmap_dst:
+ va_args.handle = dst_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+unmap_src:
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ return ret;
+}
+