};
// clang-format on
+struct amdgpu_priv {
+ void *addrlib;
+ int drm_version;
+};
+
const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21,
- DRM_FORMAT_NV12 };
+ DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID };
static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
{
ret = amdgpu_query_gpu(fd, &gpu_info);
if (ret) {
- fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
+ drv_log("failed with error =%d\n", ret);
return NULL;
}
addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
if (addr_ret != ADDR_OK) {
- fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
+ drv_log("failed error =%d\n", addr_ret);
return NULL;
}
static int amdgpu_init(struct driver *drv)
{
- int ret;
- void *addrlib;
+ struct amdgpu_priv *priv;
+ drmVersionPtr drm_version;
struct format_metadata metadata;
uint64_t use_flags = BO_USE_RENDER_MASK;
- addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
- if (!addrlib)
+ priv = calloc(1, sizeof(struct amdgpu_priv));
+ if (!priv)
return -1;
- drv->priv = addrlib;
+ drm_version = drmGetVersion(drv_get_fd(drv));
+ if (!drm_version) {
+ free(priv);
+ return -1;
+ }
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ priv->drm_version = drm_version->version_minor;
+ drmFreeVersion(drm_version);
+
+ priv->addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
+ if (!priv->addrlib) {
+ free(priv);
+ return -1;
+ }
+
+ drv->priv = priv;
+
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
/* YUV format for camera */
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
metadata.priority = 2;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
metadata.priority = 3;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
use_flags &= ~BO_USE_SW_WRITE_OFTEN;
use_flags &= ~BO_USE_SW_READ_OFTEN;
metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
metadata.priority = 4;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
metadata.priority = 5;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, use_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
- return ret;
+ return 0;
}
static void amdgpu_close(struct driver *drv)
{
- AddrDestroy(drv->priv);
+ struct amdgpu_priv *priv = (struct amdgpu_priv *)drv->priv;
+ AddrDestroy(priv->addrlib);
+ free(priv);
drv->priv = NULL;
}
static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
- void *addrlib = bo->drv->priv;
+ struct amdgpu_priv *priv = (struct amdgpu_priv *)bo->drv->priv;
+ void *addrlib = priv->addrlib;
union drm_amdgpu_gem_create gem_create;
struct amdgpu_bo_metadata metadata = { 0 };
ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
drv_bo_from_format(bo, ALIGN(width, 64), height, format);
+ } else if (format == DRM_FORMAT_YVU420_ANDROID) {
+ drv_bo_from_format(bo, ALIGN(width, 128), height, format);
} else {
if (amdgpu_addrlib_compute(addrlib, width, height, format, use_flags, &tiling_flags,
&addr_out) < 0)
gem_create.in.bo_size = bo->total_size;
gem_create.in.alignment = addr_out.baseAlign;
/* Set the placement. */
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
- gem_create.in.domain_flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ gem_create.in.domain_flags = 0;
+ if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (use_flags & (BO_USE_SCANOUT | BO_USE_CURSOR)) {
+ /* TODO(dbehr) do not use VRAM after we enable display VM */
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
+ } else {
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (!(use_flags & BO_USE_SW_READ_OFTEN))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ }
+
+ /* If drm_version >= 21 everything exposes explicit synchronization primitives
+ and chromeos/arc++ will use them. Disable implicit synchronization. */
+ if (priv->drm_version >= 21) {
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+ }
+
/* Allocate the buffer with the preferred heap. */
ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
sizeof(gem_create));
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
+ drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
return MAP_FAILED;
}