OSDN Git Service

Update Android.bp after merge
[android-x86/external-minigbm.git] / amdgpu.c
index 3bf5eb2..93681cb 100644 (file)
--- a/amdgpu.c
+++ b/amdgpu.c
 #include "helpers.h"
 #include "util.h"
 
-#ifdef __ANDROID__
-#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
-#else
-#define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so"
-#endif
+// clang-format off
+#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
+// clang-format on
 
 #define TILE_TYPE_LINEAR 0
 /* DRI backend decides tiling in this case. */
 #define TILE_TYPE_DRI 1
 
+/* Height alignement for Encoder/Decoder buffers */
+#define CHROME_HEIGHT_ALIGN 16
+
 struct amdgpu_priv {
        struct dri_driver dri;
        int drm_version;
+
+       /* sdma */
+       struct drm_amdgpu_info_device dev_info;
+       uint32_t sdma_ctx;
+       uint32_t sdma_cmdbuf_bo;
+       uint64_t sdma_cmdbuf_addr;
+       uint64_t sdma_cmdbuf_size;
+       uint32_t *sdma_cmdbuf_map;
 };
 
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
-                                                 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
-                                                 DRM_FORMAT_XRGB8888 };
+struct amdgpu_linear_vma_priv {
+       uint32_t handle;
+       uint32_t map_flags;
+};
+
+const static uint32_t render_target_formats[] = {
+       DRM_FORMAT_ABGR8888,    DRM_FORMAT_ARGB8888,    DRM_FORMAT_RGB565,
+       DRM_FORMAT_XBGR8888,    DRM_FORMAT_XRGB8888,    DRM_FORMAT_ABGR2101010,
+       DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+};
+
+const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88,          DRM_FORMAT_R8,
+                                                  DRM_FORMAT_NV21,           DRM_FORMAT_NV12,
+                                                  DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
+
+static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
+{
+       struct drm_amdgpu_info info_args = { 0 };
+
+       info_args.return_pointer = (uintptr_t)dev_info;
+       info_args.return_size = sizeof(*dev_info);
+       info_args.query = AMDGPU_INFO_DEV_INFO;
+
+       return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
+}
+
+static int sdma_init(struct amdgpu_priv *priv, int fd)
+{
+       union drm_amdgpu_ctx ctx_args = { { 0 } };
+       union drm_amdgpu_gem_create gem_create = { { 0 } };
+       struct drm_amdgpu_gem_va va_args = { 0 };
+       union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+       struct drm_gem_close gem_close = { 0 };
+       int ret;
+
+       /* Ensure we can make a submission without BO lists. */
+       if (priv->drm_version < 27)
+               return 0;
+
+       /* Anything outside this range needs adjustments to the SDMA copy commands */
+       if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
+               return 0;
+
+       ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+
+       ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+       if (ret < 0)
+               return ret;
+
+       priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
+
+       priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
+       gem_create.in.bo_size = priv->sdma_cmdbuf_size;
+       gem_create.in.alignment = 4096;
+       gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+       ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
+       if (ret < 0)
+               goto fail_ctx;
+
+       priv->sdma_cmdbuf_bo = gem_create.out.handle;
+
+       priv->sdma_cmdbuf_addr =
+           ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
+
+       /* Map the buffer into the GPU address space so we can use it from the GPU */
+       va_args.handle = priv->sdma_cmdbuf_bo;
+       va_args.operation = AMDGPU_VA_OP_MAP;
+       va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
+       va_args.va_address = priv->sdma_cmdbuf_addr;
+       va_args.offset_in_bo = 0;
+       va_args.map_size = priv->sdma_cmdbuf_size;
+
+       ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+       if (ret)
+               goto fail_bo;
+
+       gem_map.in.handle = priv->sdma_cmdbuf_bo;
+       ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
+       if (ret)
+               goto fail_va;
+
+       priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+                                    fd, gem_map.out.addr_ptr);
+       if (priv->sdma_cmdbuf_map == MAP_FAILED) {
+               priv->sdma_cmdbuf_map = NULL;
+               ret = -ENOMEM;
+               goto fail_va;
+       }
+
+       return 0;
+fail_va:
+       va_args.operation = AMDGPU_VA_OP_UNMAP;
+       va_args.flags = 0;
+       drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+fail_bo:
+       gem_close.handle = priv->sdma_cmdbuf_bo;
+       drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+fail_ctx:
+       memset(&ctx_args, 0, sizeof(ctx_args));
+       ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+       ctx_args.in.ctx_id = priv->sdma_ctx;
+       drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+       return ret;
+}
+
+static void sdma_finish(struct amdgpu_priv *priv, int fd)
+{
+       union drm_amdgpu_ctx ctx_args = { { 0 } };
+       struct drm_amdgpu_gem_va va_args = { 0 };
+       struct drm_gem_close gem_close = { 0 };
+
+       if (!priv->sdma_cmdbuf_map)
+               return;
+
+       va_args.handle = priv->sdma_cmdbuf_bo;
+       va_args.operation = AMDGPU_VA_OP_UNMAP;
+       va_args.flags = 0;
+       va_args.va_address = priv->sdma_cmdbuf_addr;
+       va_args.offset_in_bo = 0;
+       va_args.map_size = priv->sdma_cmdbuf_size;
+       drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+       gem_close.handle = priv->sdma_cmdbuf_bo;
+       drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+       ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+       ctx_args.in.ctx_id = priv->sdma_ctx;
+       drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+}
+
+static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
+                    uint64_t size)
+{
+       const uint64_t max_size_per_cmd = 0x3fff00;
+       const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
+       const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
+       uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
+       uint64_t dst_addr = src_addr + size;
+       struct drm_amdgpu_gem_va va_args = { 0 };
+       unsigned cmd = 0;
+       uint64_t remaining_size = size;
+       uint64_t cur_src_addr = src_addr;
+       uint64_t cur_dst_addr = dst_addr;
+       struct drm_amdgpu_cs_chunk_ib ib = { 0 };
+       struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
+       uint64_t chunk_ptrs[2];
+       union drm_amdgpu_cs cs = { { 0 } };
+       struct drm_amdgpu_bo_list_in bo_list = { 0 };
+       struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
+       union drm_amdgpu_wait_cs wait_cs = { { 0 } };
+       int ret = 0;
+
+       if (size > UINT64_MAX - max_size_per_cmd ||
+           DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
+               return -ENOMEM;
 
-const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21,
-                                                  DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID };
+       /* Map both buffers into the GPU address space so we can access them from the GPU. */
+       va_args.handle = src_handle;
+       va_args.operation = AMDGPU_VA_OP_MAP;
+       va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
+       va_args.va_address = src_addr;
+       va_args.map_size = size;
+
+       ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+       if (ret)
+               return ret;
+
+       va_args.handle = dst_handle;
+       va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
+       va_args.va_address = dst_addr;
+
+       ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+       if (ret)
+               goto unmap_src;
+
+       while (remaining_size) {
+               uint64_t cur_size = remaining_size;
+               if (cur_size > max_size_per_cmd)
+                       cur_size = max_size_per_cmd;
+
+               priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
+               priv->sdma_cmdbuf_map[cmd++] =
+                   priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
+               priv->sdma_cmdbuf_map[cmd++] = 0;
+               priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
+               priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
+               priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
+               priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
+
+               remaining_size -= cur_size;
+               cur_src_addr += cur_size;
+               cur_dst_addr += cur_size;
+       }
+
+       ib.va_start = priv->sdma_cmdbuf_addr;
+       ib.ib_bytes = cmd * 4;
+       ib.ip_type = AMDGPU_HW_IP_DMA;
+
+       chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
+       chunks[1].length_dw = sizeof(ib) / 4;
+       chunks[1].chunk_data = (uintptr_t)&ib;
+
+       bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
+       bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
+       bo_list_entries[1].bo_handle = src_handle;
+       bo_list_entries[1].bo_priority = 8;
+       bo_list_entries[2].bo_handle = dst_handle;
+       bo_list_entries[2].bo_priority = 8;
+
+       bo_list.bo_number = 3;
+       bo_list.bo_info_size = sizeof(bo_list_entries[0]);
+       bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
+
+       chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+       chunks[0].length_dw = sizeof(bo_list) / 4;
+       chunks[0].chunk_data = (uintptr_t)&bo_list;
+
+       chunk_ptrs[0] = (uintptr_t)&chunks[0];
+       chunk_ptrs[1] = (uintptr_t)&chunks[1];
+
+       cs.in.ctx_id = priv->sdma_ctx;
+       cs.in.num_chunks = 2;
+       cs.in.chunks = (uintptr_t)chunk_ptrs;
+
+       ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
+       if (ret) {
+               drv_log("SDMA copy command buffer submission failed %d\n", ret);
+               goto unmap_dst;
+       }
+
+       wait_cs.in.handle = cs.out.handle;
+       wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
+       wait_cs.in.ctx_id = priv->sdma_ctx;
+       wait_cs.in.timeout = INT64_MAX;
+
+       ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
+       if (ret) {
+               drv_log("Could not wait for CS to finish\n");
+       } else if (wait_cs.out.status) {
+               drv_log("Infinite wait timed out, likely GPU hang.\n");
+               ret = -ENODEV;
+       }
+
+unmap_dst:
+       va_args.handle = dst_handle;
+       va_args.operation = AMDGPU_VA_OP_UNMAP;
+       va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+       va_args.va_address = dst_addr;
+       drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+unmap_src:
+       va_args.handle = src_handle;
+       va_args.operation = AMDGPU_VA_OP_UNMAP;
+       va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+       va_args.va_address = src_addr;
+       drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+       return ret;
+}
 
 static int amdgpu_init(struct driver *drv)
 {
@@ -62,12 +325,23 @@ static int amdgpu_init(struct driver *drv)
 
        drv->priv = priv;
 
+       if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
+               free(priv);
+               drv->priv = NULL;
+               return -ENODEV;
+       }
        if (dri_init(drv, DRI_PATH, "radeonsi")) {
                free(priv);
                drv->priv = NULL;
                return -ENODEV;
        }
 
+       if (sdma_init(priv, drv_get_fd(drv))) {
+               drv_log("SDMA init failed\n");
+
+               /* Continue, as we can still succesfully map things without SDMA. */
+       }
+
        metadata.tiling = TILE_TYPE_LINEAR;
        metadata.priority = 1;
        metadata.modifier = DRM_FORMAT_MOD_LINEAR;
@@ -78,23 +352,34 @@ static int amdgpu_init(struct driver *drv)
        drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
                             &metadata, BO_USE_TEXTURE_MASK);
 
+       /* NV12 format for camera, display, decoding and encoding. */
+       drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+                              BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+                                  BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
+
+       /* Android CTS tests require this. */
+       drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
+
        /* Linear formats supported by display. */
        drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
        drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
        drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
 
-       /* YUV formats for camera and display. */
-       drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
-                              BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
 
        drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
 
        /*
         * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
-        * from camera.
+        * from camera and input/output from hardware decoder/encoder.
         */
        drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
-                              BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+                              BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+                                  BO_USE_HW_VIDEO_ENCODER);
 
        /*
         * The following formats will be allocated by the DRI backend and may be potentially tiled.
@@ -115,63 +400,74 @@ static int amdgpu_init(struct driver *drv)
        /* Potentially tiled formats supported by display. */
        drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
        drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
        drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
+
+       drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+       drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
        return 0;
 }
 
 static void amdgpu_close(struct driver *drv)
 {
+       sdma_finish(drv->priv, drv_get_fd(drv));
        dri_close(drv);
        free(drv->priv);
        drv->priv = NULL;
 }
 
-static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
-                           uint64_t use_flags)
+static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+                                  uint64_t use_flags)
 {
        int ret;
+       size_t num_planes;
        uint32_t plane, stride;
-       struct combination *combo;
-       union drm_amdgpu_gem_create gem_create;
+       union drm_amdgpu_gem_create gem_create = { { 0 } };
        struct amdgpu_priv *priv = bo->drv->priv;
 
-       combo = drv_get_combination(bo->drv, format, use_flags);
-       if (!combo)
-               return -EINVAL;
-
-       if (combo->metadata.tiling == TILE_TYPE_DRI)
-               return dri_bo_create(bo, width, height, format, use_flags);
-
        stride = drv_stride_from_format(format, width, 0);
-       if (format == DRM_FORMAT_YVU420_ANDROID)
-               stride = ALIGN(stride, 128);
+       num_planes = drv_num_planes_from_format(format);
+
+       /*
+        * For multiplane formats, align the stride to 512 to ensure that subsample strides are 256
+        * aligned. This uses more memory than necessary since the first plane only needs to be
+        * 256 aligned, but it's acceptable for a short-term fix. It's probably safe for other gpu
+        * families, but let's restrict it to Raven for now (b/171013552).
+        * */
+       if (priv->dev_info.family == AMDGPU_FAMILY_RV && num_planes > 1)
+               stride = ALIGN(stride, 512);
        else
-               stride = ALIGN(stride, 64);
+               stride = ALIGN(stride, 256);
+
+       /*
+        * Currently, allocator used by chrome aligns the height for Encoder/
+        * Decoder buffers while allocator used by android(gralloc/minigbm)
+        * doesn't provide any aligment.
+        *
+        * See b/153130069
+        */
+       if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
+               height = ALIGN(height, CHROME_HEIGHT_ALIGN);
 
        drv_bo_from_format(bo, stride, height, format);
 
-       memset(&gem_create, 0, sizeof(gem_create));
-       gem_create.in.bo_size = bo->total_size;
+       gem_create.in.bo_size =
+           ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
        gem_create.in.alignment = 256;
        gem_create.in.domain_flags = 0;
 
-       if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
+       if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
                gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
-       if (use_flags & (BO_USE_SCANOUT | BO_USE_CURSOR)) {
-               /* TODO(dbehr) do not use VRAM after we enable display VM */
-               gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
-       } else {
-               gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
-               if (!(use_flags & BO_USE_SW_READ_OFTEN))
-                       gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-       }
+       gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
 
-       /* If drm_version >= 21 everything exposes explicit synchronization primitives
-          and chromeos/arc++ will use them. Disable implicit synchronization. */
-       if (priv->drm_version >= 21) {
-               gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
-       }
+       /* Scanout in GTT requires USWC, otherwise try to use cachable memory
+        * for buffers that are read often, because uncacheable reads can be
+        * very slow. USWC should be faster on the GPU though. */
+       if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
+               gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
        /* Allocate the buffer with the preferred heap. */
        ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
@@ -179,20 +475,80 @@ static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint
        if (ret < 0)
                return ret;
 
-       for (plane = 0; plane < bo->num_planes; plane++)
+       for (plane = 0; plane < bo->meta.num_planes; plane++)
                bo->handles[plane].u32 = gem_create.out.handle;
 
+       bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+
        return 0;
 }
 
-static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+                           uint64_t use_flags)
 {
        struct combination *combo;
-       combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+
+       combo = drv_get_combination(bo->drv, format, use_flags);
        if (!combo)
                return -EINVAL;
 
-       if (combo->metadata.tiling == TILE_TYPE_DRI)
+       if (combo->metadata.tiling == TILE_TYPE_DRI) {
+               bool needs_alignment = false;
+#ifdef __ANDROID__
+               /*
+                * Currently, the gralloc API doesn't differentiate between allocation time and map
+                * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at
+                * allocation time.
+                *
+                * See b/115946221,b/117942643
+                */
+               if (use_flags & (BO_USE_SW_MASK))
+                       needs_alignment = true;
+#endif
+               // See b/122049612
+               if (use_flags & (BO_USE_SCANOUT))
+                       needs_alignment = true;
+
+               if (needs_alignment) {
+                       uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
+                       width = ALIGN(width, 256 / bytes_per_pixel);
+               }
+
+               return dri_bo_create(bo, width, height, format, use_flags);
+       }
+
+       return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
+}
+
+static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+                                          uint32_t format, const uint64_t *modifiers,
+                                          uint32_t count)
+{
+       bool only_use_linear = true;
+
+       for (uint32_t i = 0; i < count; ++i)
+               if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
+                       only_use_linear = false;
+
+       if (only_use_linear)
+               return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
+
+       return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count);
+}
+
+static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+{
+       bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR;
+       if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) {
+               struct combination *combo;
+               combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+               if (!combo)
+                       return -EINVAL;
+
+               dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI;
+       }
+
+       if (dri_tiling)
                return dri_bo_import(bo, data);
        else
                return drv_prime_bo_import(bo, data);
@@ -208,36 +564,140 @@ static int amdgpu_destroy_bo(struct bo *bo)
 
 static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
 {
+       void *addr = MAP_FAILED;
        int ret;
-       union drm_amdgpu_gem_mmap gem_map;
+       union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+       struct drm_amdgpu_gem_create_in bo_info = { 0 };
+       struct drm_amdgpu_gem_op gem_op = { 0 };
+       uint32_t handle = bo->handles[plane].u32;
+       struct amdgpu_linear_vma_priv *priv = NULL;
+       struct amdgpu_priv *drv_priv;
 
        if (bo->priv)
                return dri_bo_map(bo, vma, plane, map_flags);
 
-       memset(&gem_map, 0, sizeof(gem_map));
-       gem_map.in.handle = bo->handles[plane].u32;
+       drv_priv = bo->drv->priv;
+       gem_op.handle = handle;
+       gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+       gem_op.value = (uintptr_t)&bo_info;
+
+       ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
+       if (ret)
+               return MAP_FAILED;
+
+       vma->length = bo_info.bo_size;
+
+       if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
+            (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
+           drv_priv->sdma_cmdbuf_map) {
+               union drm_amdgpu_gem_create gem_create = { { 0 } };
+
+               priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
+               if (!priv)
+                       return MAP_FAILED;
 
+               gem_create.in.bo_size = bo_info.bo_size;
+               gem_create.in.alignment = 4096;
+               gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+               ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
+                                         sizeof(gem_create));
+               if (ret < 0) {
+                       drv_log("GEM create failed\n");
+                       free(priv);
+                       return MAP_FAILED;
+               }
+
+               priv->map_flags = map_flags;
+               handle = priv->handle = gem_create.out.handle;
+
+               ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
+                               bo_info.bo_size);
+               if (ret) {
+                       drv_log("SDMA copy for read failed\n");
+                       goto fail;
+               }
+       }
+
+       gem_map.in.handle = handle;
        ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
        if (ret) {
                drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
-               return MAP_FAILED;
+               goto fail;
        }
 
-       vma->length = bo->total_size;
-
-       return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+       addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
                    gem_map.out.addr_ptr);
+       if (addr == MAP_FAILED)
+               goto fail;
+
+       vma->priv = priv;
+       return addr;
+
+fail:
+       if (priv) {
+               struct drm_gem_close gem_close = { 0 };
+               gem_close.handle = priv->handle;
+               drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+               free(priv);
+       }
+       return MAP_FAILED;
 }
 
 static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
 {
        if (bo->priv)
                return dri_bo_unmap(bo, vma);
-       else
-               return munmap(vma->addr, vma->length);
+       else {
+               int r = munmap(vma->addr, vma->length);
+               if (r)
+                       return r;
+
+               if (vma->priv) {
+                       struct amdgpu_linear_vma_priv *priv = vma->priv;
+                       struct drm_gem_close gem_close = { 0 };
+
+                       if (BO_MAP_WRITE & priv->map_flags) {
+                               r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
+                                             bo->handles[0].u32, vma->length);
+                               if (r)
+                                       return r;
+                       }
+
+                       gem_close.handle = priv->handle;
+                       r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+               }
+
+               return 0;
+       }
+}
+
+static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+       int ret;
+       union drm_amdgpu_gem_wait_idle wait_idle = { { 0 } };
+
+       if (bo->priv)
+               return 0;
+
+       wait_idle.in.handle = bo->handles[0].u32;
+       wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
+
+       ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
+                                 sizeof(wait_idle));
+
+       if (ret < 0) {
+               drv_log("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret);
+               return ret;
+       }
+
+       if (ret == 0 && wait_idle.out.status)
+               drv_log("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n");
+
+       return 0;
 }
 
-static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
+static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
 {
        switch (format) {
        case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
@@ -258,11 +718,14 @@ const struct backend backend_amdgpu = {
        .init = amdgpu_init,
        .close = amdgpu_close,
        .bo_create = amdgpu_create_bo,
+       .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
        .bo_destroy = amdgpu_destroy_bo,
        .bo_import = amdgpu_import_bo,
        .bo_map = amdgpu_map_bo,
        .bo_unmap = amdgpu_unmap_bo,
+       .bo_invalidate = amdgpu_bo_invalidate,
        .resolve_format = amdgpu_resolve_format,
+       .num_planes_from_modifier = dri_num_planes_from_modifier,
 };
 
 #endif