#include <sys/mman.h>
#include <xf86drm.h>
-#include "addrinterface.h"
+#include "dri.h"
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
-#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
-#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
-#endif
-
// clang-format off
-#define mmCC_RB_BACKEND_DISABLE 0x263d
-#define mmGB_TILE_MODE0 0x2644
-#define mmGB_MACROTILE_MODE0 0x2664
-#define mmGB_ADDR_CONFIG 0x263e
-#define mmMC_ARB_RAMCFG 0x9d8
-
-enum {
- FAMILY_UNKNOWN,
- FAMILY_SI,
- FAMILY_CI,
- FAMILY_KV,
- FAMILY_VI,
- FAMILY_CZ,
- FAMILY_PI,
- FAMILY_LAST,
-};
+#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
// clang-format on
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+#define TILE_TYPE_LINEAR 0
+/* DRI backend decides tiling in this case. */
+#define TILE_TYPE_DRI 1
-const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21,
- DRM_FORMAT_NV12 };
-
-static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
-{
- struct drm_amdgpu_gem_metadata args = { 0 };
+/* Height alignement for Encoder/Decoder buffers */
+#define CHROME_HEIGHT_ALIGN 16
- if (!info)
- return -EINVAL;
+struct amdgpu_priv {
+ struct dri_driver dri;
+ int drm_version;
- args.handle = handle;
- args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
- args.data.flags = info->flags;
- args.data.tiling_info = info->tiling_info;
+ /* sdma */
+ struct drm_amdgpu_info_device dev_info;
+ uint32_t sdma_ctx;
+ uint32_t sdma_cmdbuf_bo;
+ uint64_t sdma_cmdbuf_addr;
+ uint64_t sdma_cmdbuf_size;
+ uint32_t *sdma_cmdbuf_map;
+};
- if (info->size_metadata > sizeof(args.data.data))
- return -EINVAL;
+struct amdgpu_linear_vma_priv {
+ uint32_t handle;
+ uint32_t map_flags;
+};
- if (info->size_metadata) {
- args.data.data_size_bytes = info->size_metadata;
- memcpy(args.data.data, info->umd_metadata, info->size_metadata);
- }
+const static uint32_t render_target_formats[] = {
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+};
- return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
-}
+const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
+ DRM_FORMAT_NV21, DRM_FORMAT_NV12,
+ DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
-static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance,
- uint32_t flags, uint32_t *values)
+static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
{
- struct drm_amdgpu_info request;
-
- memset(&request, 0, sizeof(request));
- request.return_pointer = (uintptr_t)values;
- request.return_size = count * sizeof(uint32_t);
- request.query = AMDGPU_INFO_READ_MMR_REG;
- request.read_mmr_reg.dword_offset = dword_offset;
- request.read_mmr_reg.count = count;
- request.read_mmr_reg.instance = instance;
- request.read_mmr_reg.flags = flags;
-
- return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
+ struct drm_amdgpu_info info_args = { 0 };
+
+ info_args.return_pointer = (uintptr_t)dev_info;
+ info_args.return_size = sizeof(*dev_info);
+ info_args.query = AMDGPU_INFO_DEV_INFO;
+
+ return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
}
-static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
+static int sdma_init(struct amdgpu_priv *priv, int fd)
{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_gem_close gem_close = { 0 };
int ret;
- uint32_t instance;
- if (!gpu_info)
- return -EINVAL;
+ /* Ensure we can make a submission without BO lists. */
+ if (priv->drm_version < 27)
+ return 0;
- instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
+ /* Anything outside this range needs adjustments to the SDMA copy commands */
+ if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
+ return 0;
- ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
- &gpu_info->backend_disable[0]);
- if (ret)
- return ret;
- /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
- gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff;
+ ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
- ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode);
- if (ret)
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ if (ret < 0)
return ret;
- ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
- gpu_info->gb_macro_tile_mode);
- if (ret)
- return ret;
+ priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
+
+ priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
+ gem_create.in.bo_size = priv->sdma_cmdbuf_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
+ if (ret < 0)
+ goto fail_ctx;
- ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg);
+ priv->sdma_cmdbuf_bo = gem_create.out.handle;
+
+ priv->sdma_cmdbuf_addr =
+ ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
+
+ /* Map the buffer into the GPU address space so we can use it from the GPU */
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
if (ret)
- return ret;
+ goto fail_bo;
- ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg);
+ gem_map.in.handle = priv->sdma_cmdbuf_bo;
+ ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret)
- return ret;
+ goto fail_va;
+
+ priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, gem_map.out.addr_ptr);
+ if (priv->sdma_cmdbuf_map == MAP_FAILED) {
+ priv->sdma_cmdbuf_map = NULL;
+ ret = -ENOMEM;
+ goto fail_va;
+ }
return 0;
+fail_va:
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+fail_bo:
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+fail_ctx:
+ memset(&ctx_args, 0, sizeof(ctx_args));
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ return ret;
}
-static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
-{
- return malloc(in->sizeInBytes);
-}
-
-static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
+static void sdma_finish(struct amdgpu_priv *priv, int fd)
{
- free(in->pVirtAddr);
- return ADDR_OK;
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ struct drm_gem_close gem_close = { 0 };
+
+ if (!priv->sdma_cmdbuf_map)
+ return;
+
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
}
-static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags, uint32_t *tiling_flags,
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
+static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
+ uint64_t size)
{
- ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 };
- ADDR_TILEINFO addr_tile_info = { 0 };
- ADDR_TILEINFO addr_tile_info_out = { 0 };
- uint32_t bits_per_pixel;
-
- addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
-
- /* Set the requested tiling mode. */
- addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
- if (use_flags &
- (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
- addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
- else if (width <= 16 || height <= 16)
- addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
-
- bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8;
- /* Bits per pixel should be calculated from format*/
- addr_surf_info_in.bpp = bits_per_pixel;
- addr_surf_info_in.numSamples = 1;
- addr_surf_info_in.width = width;
- addr_surf_info_in.height = height;
- addr_surf_info_in.numSlices = 1;
- addr_surf_info_in.pTileInfo = &addr_tile_info;
- addr_surf_info_in.tileIndex = -1;
-
- /* This disables incorrect calculations (hacks) in addrlib. */
- addr_surf_info_in.flags.noStencil = 1;
-
- /* Set the micro tile type. */
- if (use_flags & BO_USE_SCANOUT)
- addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
- else
- addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
-
- addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
- addr_out->pTileInfo = &addr_tile_info_out;
+ const uint64_t max_size_per_cmd = 0x3fff00;
+ const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
+ const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
+ uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
+ uint64_t dst_addr = src_addr + size;
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ unsigned cmd = 0;
+ uint64_t remaining_size = size;
+ uint64_t cur_src_addr = src_addr;
+ uint64_t cur_dst_addr = dst_addr;
+ struct drm_amdgpu_cs_chunk_ib ib = { 0 };
+ struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
+ uint64_t chunk_ptrs[2];
+ union drm_amdgpu_cs cs = { { 0 } };
+ struct drm_amdgpu_bo_list_in bo_list = { 0 };
+ struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
+ union drm_amdgpu_wait_cs wait_cs = { { 0 } };
+ int ret = 0;
+
+ if (size > UINT64_MAX - max_size_per_cmd ||
+ DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
+ return -ENOMEM;
+
+ /* Map both buffers into the GPU address space so we can access them from the GPU. */
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ va_args.map_size = size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ return ret;
- if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK)
- return -EINVAL;
+ va_args.handle = dst_handle;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
- ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 };
- ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 };
- ADDR_TILEINFO s_tile_hw_info_out = { 0 };
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto unmap_src;
+
+ while (remaining_size) {
+ uint64_t cur_size = remaining_size;
+ if (cur_size > max_size_per_cmd)
+ cur_size = max_size_per_cmd;
+
+ priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
+ priv->sdma_cmdbuf_map[cmd++] =
+ priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
+ priv->sdma_cmdbuf_map[cmd++] = 0;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
+
+ remaining_size -= cur_size;
+ cur_src_addr += cur_size;
+ cur_dst_addr += cur_size;
+ }
- s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
- /* Convert from real value to HW value */
- s_in.reverse = 0;
- s_in.pTileInfo = &addr_tile_info_out;
- s_in.tileIndex = -1;
+ ib.va_start = priv->sdma_cmdbuf_addr;
+ ib.ib_bytes = cmd * 4;
+ ib.ip_type = AMDGPU_HW_IP_DMA;
- s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
- s_out.pTileInfo = &s_tile_hw_info_out;
+ chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[1].length_dw = sizeof(ib) / 4;
+ chunks[1].chunk_data = (uintptr_t)&ib;
- if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
- return -EINVAL;
+ bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
+ bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
+ bo_list_entries[1].bo_handle = src_handle;
+ bo_list_entries[1].bo_priority = 8;
+ bo_list_entries[2].bo_handle = dst_handle;
+ bo_list_entries[2].bo_priority = 8;
- if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
- /* 2D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
- else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
- /* 1D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
- else
- /* LINEAR_ALIGNED */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
-
- *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth));
- *tiling_flags |=
- AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight));
- *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes);
- *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
- drv_log_base2(addr_tile_info_out.macroAspectRatio));
- *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig);
- *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
+ bo_list.bo_number = 3;
+ bo_list.bo_info_size = sizeof(bo_list_entries[0]);
+ bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
- return 0;
-}
+ chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[0].length_dw = sizeof(bo_list) / 4;
+ chunks[0].chunk_data = (uintptr_t)&bo_list;
-static void *amdgpu_addrlib_init(int fd)
-{
- int ret;
- ADDR_CREATE_INPUT addr_create_input = { 0 };
- ADDR_CREATE_OUTPUT addr_create_output = { 0 };
- ADDR_REGISTER_VALUE reg_value = { 0 };
- ADDR_CREATE_FLAGS create_flags = { { 0 } };
- ADDR_E_RETURNCODE addr_ret;
+ chunk_ptrs[0] = (uintptr_t)&chunks[0];
+ chunk_ptrs[1] = (uintptr_t)&chunks[1];
- addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
- addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
+ cs.in.ctx_id = priv->sdma_ctx;
+ cs.in.num_chunks = 2;
+ cs.in.chunks = (uintptr_t)chunk_ptrs;
- struct amdgpu_gpu_info gpu_info = { 0 };
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
+ if (ret) {
+ drv_log("SDMA copy command buffer submission failed %d\n", ret);
+ goto unmap_dst;
+ }
- ret = amdgpu_query_gpu(fd, &gpu_info);
+ wait_cs.in.handle = cs.out.handle;
+ wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
+ wait_cs.in.ctx_id = priv->sdma_ctx;
+ wait_cs.in.timeout = INT64_MAX;
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
if (ret) {
- fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
- return NULL;
+ drv_log("Could not wait for CS to finish\n");
+ } else if (wait_cs.out.status) {
+ drv_log("Infinite wait timed out, likely GPU hang.\n");
+ ret = -ENODEV;
}
- reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
- reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
- reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
-
- reg_value.backendDisables = gpu_info.backend_disable[0];
- reg_value.pTileConfig = gpu_info.gb_tile_mode;
- reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]);
- reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
- reg_value.noOfMacroEntries =
- sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]);
- create_flags.value = 0;
- create_flags.useTileIndex = 1;
-
- addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
-
- addr_create_input.chipFamily = FAMILY_CZ;
- addr_create_input.createFlags = create_flags;
- addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
- addr_create_input.callbacks.freeSysMem = free_sys_mem;
- addr_create_input.callbacks.debugPrint = 0;
- addr_create_input.regValue = reg_value;
-
- addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
-
- if (addr_ret != ADDR_OK) {
- fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
- return NULL;
- }
+unmap_dst:
+ va_args.handle = dst_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
- return addr_create_output.hLib;
+unmap_src:
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ return ret;
}
static int amdgpu_init(struct driver *drv)
{
- int ret;
- void *addrlib;
+ struct amdgpu_priv *priv;
+ drmVersionPtr drm_version;
struct format_metadata metadata;
uint64_t use_flags = BO_USE_RENDER_MASK;
- addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
- if (!addrlib)
- return -1;
+ priv = calloc(1, sizeof(struct amdgpu_priv));
+ if (!priv)
+ return -ENOMEM;
- drv->priv = addrlib;
+ drm_version = drmGetVersion(drv_get_fd(drv));
+ if (!drm_version) {
+ free(priv);
+ return -ENODEV;
+ }
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ priv->drm_version = drm_version->version_minor;
+ drmFreeVersion(drm_version);
- drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT);
+ drv->priv = priv;
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 2;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
+ }
+ if (dri_init(drv, DRI_PATH, "radeonsi")) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
+ }
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ if (sdma_init(priv, drv_get_fd(drv))) {
+ drv_log("SDMA init failed\n");
+
+ /* Continue, as we can still succesfully map things without SDMA. */
+ }
+
+ metadata.tiling = TILE_TYPE_LINEAR;
+ metadata.priority = 1;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
+
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &metadata, BO_USE_TEXTURE_MASK);
+
+ /* NV12 format for camera, display, decoding and encoding. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+
+ /* Android CTS tests require this. */
+ drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
+
+ /* Linear formats supported by display. */
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 3;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
-
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
-
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
+
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera and input/output from hardware decoder/encoder.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+
+ /*
+ * The following formats will be allocated by the DRI backend and may be potentially tiled.
+ * Since format modifier support hasn't been implemented fully yet, it's not
+ * possible to enumerate the different types of buffers (like i915 can).
+ */
+ use_flags &= ~BO_USE_RENDERSCRIPT;
use_flags &= ~BO_USE_SW_WRITE_OFTEN;
use_flags &= ~BO_USE_SW_READ_OFTEN;
use_flags &= ~BO_USE_LINEAR;
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 4;
+ metadata.tiling = TILE_TYPE_DRI;
+ metadata.priority = 2;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
+ /* Potentially tiled formats supported by display. */
+ drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 5;
-
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
-
- return ret;
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
+ return 0;
}
static void amdgpu_close(struct driver *drv)
{
- AddrDestroy(drv->priv);
+ sdma_finish(drv->priv, drv_get_fd(drv));
+ dri_close(drv);
+ free(drv->priv);
drv->priv = NULL;
}
-static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint64_t use_flags)
+static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
- void *addrlib = bo->drv->priv;
- union drm_amdgpu_gem_create gem_create;
- struct amdgpu_bo_metadata metadata = { 0 };
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
- uint32_t tiling_flags = 0;
- size_t plane;
int ret;
+ size_t num_planes;
+ uint32_t plane, stride;
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct amdgpu_priv *priv = bo->drv->priv;
+
+ stride = drv_stride_from_format(format, width, 0);
+ num_planes = drv_num_planes_from_format(format);
+
+ /*
+ * For multiplane formats, align the stride to 512 to ensure that subsample strides are 256
+ * aligned. This uses more memory than necessary since the first plane only needs to be
+ * 256 aligned, but it's acceptable for a short-term fix. It's probably safe for other gpu
+ * families, but let's restrict it to Raven for now (b/171013552).
+ * */
+ if (priv->dev_info.family == AMDGPU_FAMILY_RV && num_planes > 1)
+ stride = ALIGN(stride, 512);
+ else
+ stride = ALIGN(stride, 256);
- if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
- drv_bo_from_format(bo, ALIGN(width, 64), height, format);
- } else {
- if (amdgpu_addrlib_compute(addrlib, width, height, format, use_flags, &tiling_flags,
- &addr_out) < 0)
- return -EINVAL;
+ /*
+ * Currently, allocator used by chrome aligns the height for Encoder/
+ * Decoder buffers while allocator used by android(gralloc/minigbm)
+ * doesn't provide any aligment.
+ *
+ * See b/153130069
+ */
+ if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
+ height = ALIGN(height, CHROME_HEIGHT_ALIGN);
- bo->tiling = tiling_flags;
- /* RGB has 1 plane only */
- bo->offsets[0] = 0;
- bo->total_size = bo->sizes[0] = addr_out.surfSize;
- bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8);
- }
+ drv_bo_from_format(bo, stride, height, format);
+
+ gem_create.in.bo_size =
+ ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
+ gem_create.in.alignment = 256;
+ gem_create.in.domain_flags = 0;
+
+ if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- memset(&gem_create, 0, sizeof(gem_create));
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ /* Scanout in GTT requires USWC, otherwise try to use cachable memory
+ * for buffers that are read often, because uncacheable reads can be
+ * very slow. USWC should be faster on the GPU though. */
+ if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- gem_create.in.bo_size = bo->total_size;
- gem_create.in.alignment = addr_out.baseAlign;
- /* Set the placement. */
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
- gem_create.in.domain_flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
/* Allocate the buffer with the preferred heap. */
ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
sizeof(gem_create));
-
if (ret < 0)
return ret;
- metadata.tiling_info = tiling_flags;
-
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->meta.num_planes; plane++)
bo->handles[plane].u32 = gem_create.out.handle;
- ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata);
+ bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
- return ret;
+ return 0;
+}
+
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ if (combo->metadata.tiling == TILE_TYPE_DRI) {
+ bool needs_alignment = false;
+#ifdef __ANDROID__
+ /*
+ * Currently, the gralloc API doesn't differentiate between allocation time and map
+ * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
+ * allocation time.
+ *
+ * See b/115946221,b/117942643
+ */
+ if (use_flags & (BO_USE_SW_MASK))
+ needs_alignment = true;
+#endif
+ // See b/122049612
+ if (use_flags & (BO_USE_SCANOUT))
+ needs_alignment = true;
+
+ if (needs_alignment) {
+ uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
+ width = ALIGN(width, 256 / bytes_per_pixel);
+ }
+
+ return dri_bo_create(bo, width, height, format, use_flags);
+ }
+
+ return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
}
-static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags)
+static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers,
+ uint32_t count)
{
+ bool only_use_linear = true;
+
+ for (uint32_t i = 0; i < count; ++i)
+ if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
+ only_use_linear = false;
+
+ if (only_use_linear)
+ return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
+
+ return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
+}
+
+static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+{
+ bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
+ if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+ struct combination *combo;
+ combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
+ }
+
+ if (dri_tiling)
+ return dri_bo_import(bo, data);
+ else
+ return drv_prime_bo_import(bo, data);
+}
+
+static int amdgpu_destroy_bo(struct bo *bo)
+{
+ if (bo->priv)
+ return dri_bo_destroy(bo);
+ else
+ return drv_gem_bo_destroy(bo);
+}
+
+static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ void *addr = MAP_FAILED;
int ret;
- union drm_amdgpu_gem_mmap gem_map;
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_amdgpu_gem_create_in bo_info = { 0 };
+ struct drm_amdgpu_gem_op gem_op = { 0 };
+ uint32_t handle = bo->handles[plane].u32;
+ struct amdgpu_linear_vma_priv *priv = NULL;
+ struct amdgpu_priv *drv_priv;
+
+ if (bo->priv)
+ return dri_bo_map(bo, vma, plane, map_flags);
+
+ drv_priv = bo->drv->priv;
+ gem_op.handle = handle;
+ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+ gem_op.value = (uintptr_t)&bo_info;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
+ if (ret)
+ return MAP_FAILED;
- memset(&gem_map, 0, sizeof(gem_map));
- gem_map.in.handle = bo->handles[plane].u32;
+ vma->length = bo_info.bo_size;
+
+ if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
+ (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
+ drv_priv->sdma_cmdbuf_map) {
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+
+ priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
+ if (!priv)
+ return MAP_FAILED;
+
+ gem_create.in.bo_size = bo_info.bo_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
+ sizeof(gem_create));
+ if (ret < 0) {
+ drv_log("GEM create failed\n");
+ free(priv);
+ return MAP_FAILED;
+ }
+
+ priv->map_flags = map_flags;
+ handle = priv->handle = gem_create.out.handle;
+
+ ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
+ bo_info.bo_size);
+ if (ret) {
+ drv_log("SDMA copy for read failed\n");
+ goto fail;
+ }
+ }
+ gem_map.in.handle = handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
- return MAP_FAILED;
+ drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
+ goto fail;
}
- data->length = bo->total_size;
-
- return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
+ if (addr == MAP_FAILED)
+ goto fail;
+
+ vma->priv = priv;
+ return addr;
+
+fail:
+ if (priv) {
+ struct drm_gem_close gem_close = { 0 };
+ gem_close.handle = priv->handle;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ free(priv);
+ }
+ return MAP_FAILED;
+}
+
+static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
+{
+ if (bo->priv)
+ return dri_bo_unmap(bo, vma);
+ else {
+ int r = munmap(vma->addr, vma->length);
+ if (r)
+ return r;
+
+ if (vma->priv) {
+ struct amdgpu_linear_vma_priv *priv = vma->priv;
+ struct drm_gem_close gem_close = { 0 };
+
+ if (BO_MAP_WRITE & priv->map_flags) {
+ r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
+ bo->handles[0].u32, vma->length);
+ if (r)
+ return r;
+ }
+
+ gem_close.handle = priv->handle;
+ r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ }
+
+ return 0;
+ }
+}
+
+static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ union drm_amdgpu_gem_wait_idle wait_idle = { { 0 } };
+
+ if (bo->priv)
+ return 0;
+
+ wait_idle.in.handle = bo->handles[0].u32;
+ wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
+ sizeof(wait_idle));
+
+ if (ret < 0) {
+ drv_log("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret);
+ return ret;
+ }
+
+ if (ret == 0 && wait_idle.out.status)
+ drv_log("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n");
+
+ return 0;
}
-static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
+static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
switch (format) {
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* Camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
+ /*HACK: See b/28671744 */
+ return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
return DRM_FORMAT_NV12;
default:
}
}
-struct backend backend_amdgpu = {
+const struct backend backend_amdgpu = {
.name = "amdgpu",
.init = amdgpu_init,
.close = amdgpu_close,
- .bo_create = amdgpu_bo_create,
- .bo_destroy = drv_gem_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = amdgpu_bo_map,
- .bo_unmap = drv_bo_munmap,
+ .bo_create = amdgpu_create_bo,
+ .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
+ .bo_destroy = amdgpu_destroy_bo,
+ .bo_import = amdgpu_import_bo,
+ .bo_map = amdgpu_map_bo,
+ .bo_unmap = amdgpu_unmap_bo,
+ .bo_invalidate = amdgpu_bo_invalidate,
.resolve_format = amdgpu_resolve_format,
+ .num_planes_from_modifier = dri_num_planes_from_modifier,
};
#endif