ifdef DRV_MESON
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_meson)
endif
+ifdef DRV_MSM
+ CFLAGS += -ldl
+endif
ifdef DRV_RADEON
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_radeon)
endif
/* DRI backend decides tiling in this case. */
#define TILE_TYPE_DRI 1
+/* Height alignement for Encoder/Decoder buffers */
+#define CHROME_HEIGHT_ALIGN 16
+
struct amdgpu_priv {
struct dri_driver dri;
int drm_version;
+
+ /* sdma */
+ struct drm_amdgpu_info_device dev_info;
+ uint32_t sdma_ctx;
+ uint32_t sdma_cmdbuf_bo;
+ uint64_t sdma_cmdbuf_addr;
+ uint64_t sdma_cmdbuf_size;
+ uint32_t *sdma_cmdbuf_map;
};
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+struct amdgpu_linear_vma_priv {
+ uint32_t handle;
+ uint32_t map_flags;
+};
+
+const static uint32_t render_target_formats[] = {
+ DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
+};
const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
DRM_FORMAT_NV21, DRM_FORMAT_NV12,
DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 };
+static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
+{
+ struct drm_amdgpu_info info_args = { 0 };
+
+ info_args.return_pointer = (uintptr_t)dev_info;
+ info_args.return_size = sizeof(*dev_info);
+ info_args.query = AMDGPU_INFO_DEV_INFO;
+
+ return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
+}
+
+static int sdma_init(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ union drm_amdgpu_gem_mmap gem_map = { { 0 } };
+ struct drm_gem_close gem_close = { 0 };
+ int ret;
+
+ /* Ensure we can make a submission without BO lists. */
+ if (priv->drm_version < 27)
+ return 0;
+
+ /* Anything outside this range needs adjustments to the SDMA copy commands */
+ if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
+ return 0;
+
+ ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ if (ret < 0)
+ return ret;
+
+ priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
+
+ priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
+ gem_create.in.bo_size = priv->sdma_cmdbuf_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
+ if (ret < 0)
+ goto fail_ctx;
+
+ priv->sdma_cmdbuf_bo = gem_create.out.handle;
+
+ priv->sdma_cmdbuf_addr =
+ ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
+
+ /* Map the buffer into the GPU address space so we can use it from the GPU */
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto fail_bo;
+
+ gem_map.in.handle = priv->sdma_cmdbuf_bo;
+ ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
+ if (ret)
+ goto fail_va;
+
+ priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ fd, gem_map.out.addr_ptr);
+ if (priv->sdma_cmdbuf_map == MAP_FAILED) {
+ priv->sdma_cmdbuf_map = NULL;
+ ret = -ENOMEM;
+ goto fail_va;
+ }
+
+ return 0;
+fail_va:
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+fail_bo:
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+fail_ctx:
+ memset(&ctx_args, 0, sizeof(ctx_args));
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+ return ret;
+}
+
+static void sdma_finish(struct amdgpu_priv *priv, int fd)
+{
+ union drm_amdgpu_ctx ctx_args = { { 0 } };
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ struct drm_gem_close gem_close = { 0 };
+
+ if (!priv->sdma_cmdbuf_map)
+ return;
+
+ va_args.handle = priv->sdma_cmdbuf_bo;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = 0;
+ va_args.va_address = priv->sdma_cmdbuf_addr;
+ va_args.offset_in_bo = 0;
+ va_args.map_size = priv->sdma_cmdbuf_size;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ gem_close.handle = priv->sdma_cmdbuf_bo;
+ drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+
+ ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
+ ctx_args.in.ctx_id = priv->sdma_ctx;
+ drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
+}
+
+static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
+ uint64_t size)
+{
+ const uint64_t max_size_per_cmd = 0x3fff00;
+ const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
+ const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
+ uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
+ uint64_t dst_addr = src_addr + size;
+ struct drm_amdgpu_gem_va va_args = { 0 };
+ unsigned cmd = 0;
+ uint64_t remaining_size = size;
+ uint64_t cur_src_addr = src_addr;
+ uint64_t cur_dst_addr = dst_addr;
+ struct drm_amdgpu_cs_chunk_ib ib = { 0 };
+ struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
+ uint64_t chunk_ptrs[2];
+ union drm_amdgpu_cs cs = { { 0 } };
+ struct drm_amdgpu_bo_list_in bo_list = { 0 };
+ struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
+ union drm_amdgpu_wait_cs wait_cs = { { 0 } };
+ int ret = 0;
+
+ if (size > UINT64_MAX - max_size_per_cmd ||
+ DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
+ return -ENOMEM;
+
+ /* Map both buffers into the GPU address space so we can access them from the GPU. */
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_MAP;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ va_args.map_size = size;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ return ret;
+
+ va_args.handle = dst_handle;
+ va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+
+ ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+ if (ret)
+ goto unmap_src;
+
+ while (remaining_size) {
+ uint64_t cur_size = remaining_size;
+ if (cur_size > max_size_per_cmd)
+ cur_size = max_size_per_cmd;
+
+ priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
+ priv->sdma_cmdbuf_map[cmd++] =
+ priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
+ priv->sdma_cmdbuf_map[cmd++] = 0;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
+ priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
+
+ remaining_size -= cur_size;
+ cur_src_addr += cur_size;
+ cur_dst_addr += cur_size;
+ }
+
+ ib.va_start = priv->sdma_cmdbuf_addr;
+ ib.ib_bytes = cmd * 4;
+ ib.ip_type = AMDGPU_HW_IP_DMA;
+
+ chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[1].length_dw = sizeof(ib) / 4;
+ chunks[1].chunk_data = (uintptr_t)&ib;
+
+ bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
+ bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
+ bo_list_entries[1].bo_handle = src_handle;
+ bo_list_entries[1].bo_priority = 8;
+ bo_list_entries[2].bo_handle = dst_handle;
+ bo_list_entries[2].bo_priority = 8;
+
+ bo_list.bo_number = 3;
+ bo_list.bo_info_size = sizeof(bo_list_entries[0]);
+ bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
+
+ chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
+ chunks[0].length_dw = sizeof(bo_list) / 4;
+ chunks[0].chunk_data = (uintptr_t)&bo_list;
+
+ chunk_ptrs[0] = (uintptr_t)&chunks[0];
+ chunk_ptrs[1] = (uintptr_t)&chunks[1];
+
+ cs.in.ctx_id = priv->sdma_ctx;
+ cs.in.num_chunks = 2;
+ cs.in.chunks = (uintptr_t)chunk_ptrs;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
+ if (ret) {
+ drv_log("SDMA copy command buffer submission failed %d\n", ret);
+ goto unmap_dst;
+ }
+
+ wait_cs.in.handle = cs.out.handle;
+ wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
+ wait_cs.in.ctx_id = priv->sdma_ctx;
+ wait_cs.in.timeout = INT64_MAX;
+
+ ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
+ if (ret) {
+ drv_log("Could not wait for CS to finish\n");
+ } else if (wait_cs.out.status) {
+ drv_log("Infinite wait timed out, likely GPU hang.\n");
+ ret = -ENODEV;
+ }
+
+unmap_dst:
+ va_args.handle = dst_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = dst_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+unmap_src:
+ va_args.handle = src_handle;
+ va_args.operation = AMDGPU_VA_OP_UNMAP;
+ va_args.flags = AMDGPU_VM_DELAY_UPDATE;
+ va_args.va_address = src_addr;
+ drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
+
+ return ret;
+}
+
static int amdgpu_init(struct driver *drv)
{
struct amdgpu_priv *priv;
drv->priv = priv;
+ if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
+ }
if (dri_init(drv, DRI_PATH, "radeonsi")) {
free(priv);
drv->priv = NULL;
return -ENODEV;
}
+ if (sdma_init(priv, drv_get_fd(drv))) {
+ drv_log("SDMA init failed\n");
+
+ /* Continue, as we can still succesfully map things without SDMA. */
+ }
+
metadata.tiling = TILE_TYPE_LINEAR;
metadata.priority = 1;
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&metadata, BO_USE_TEXTURE_MASK);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ /* NV12 format for camera, display, decoding and encoding. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- /* YUV formats for camera and display. */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
- BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
/*
* The following formats will be allocated by the DRI backend and may be potentially tiled.
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
+
+ drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
return 0;
}
static void amdgpu_close(struct driver *drv)
{
+ sdma_finish(drv->priv, drv_get_fd(drv));
dri_close(drv);
free(drv->priv);
drv->priv = NULL;
int ret;
uint32_t plane, stride;
union drm_amdgpu_gem_create gem_create;
+ struct amdgpu_priv *priv = bo->drv->priv;
stride = drv_stride_from_format(format, width, 0);
stride = ALIGN(stride, 256);
+ /*
+ * Currently, allocator used by chrome aligns the height for Encoder/
+ * Decoder buffers while allocator used by android(gralloc/minigbm)
+ * doesn't provide any aligment.
+ *
+ * See b/153130069
+ */
+ if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
+ height = ALIGN(height, CHROME_HEIGHT_ALIGN);
+
drv_bo_from_format(bo, stride, height, format);
memset(&gem_create, 0, sizeof(gem_create));
- gem_create.in.bo_size = bo->meta.total_size;
+ gem_create.in.bo_size =
+ ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
gem_create.in.alignment = 256;
gem_create.in.domain_flags = 0;
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
- if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+
+ /* Scanout in GTT requires USWC, otherwise try to use cachable memory
+ * for buffers that are read often, because uncacheable reads can be
+ * very slow. USWC should be faster on the GPU though. */
+ if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
/* Allocate the buffer with the preferred heap. */
static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
+ void *addr = MAP_FAILED;
int ret;
union drm_amdgpu_gem_mmap gem_map;
+ struct drm_amdgpu_gem_create_in bo_info = { 0 };
+ struct drm_amdgpu_gem_op gem_op = { 0 };
+ uint32_t handle = bo->handles[plane].u32;
+ struct amdgpu_linear_vma_priv *priv = NULL;
+ struct amdgpu_priv *drv_priv;
if (bo->priv)
return dri_bo_map(bo, vma, plane, map_flags);
+ drv_priv = bo->drv->priv;
+ gem_op.handle = handle;
+ gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
+ gem_op.value = (uintptr_t)&bo_info;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
+ if (ret)
+ return MAP_FAILED;
+
+ vma->length = bo_info.bo_size;
+
+ if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
+ (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
+ drv_priv->sdma_cmdbuf_map) {
+ union drm_amdgpu_gem_create gem_create = { { 0 } };
+
+ priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
+ if (!priv)
+ return MAP_FAILED;
+
+ gem_create.in.bo_size = bo_info.bo_size;
+ gem_create.in.alignment = 4096;
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
+ sizeof(gem_create));
+ if (ret < 0) {
+ drv_log("GEM create failed\n");
+ free(priv);
+ return MAP_FAILED;
+ }
+
+ priv->map_flags = map_flags;
+ handle = priv->handle = gem_create.out.handle;
+
+ ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handles[0].u32, priv->handle,
+ bo_info.bo_size);
+ if (ret) {
+ drv_log("SDMA copy for read failed\n");
+ goto fail;
+ }
+ }
+
memset(&gem_map, 0, sizeof(gem_map));
- gem_map.in.handle = bo->handles[plane].u32;
+ gem_map.in.handle = handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
- return MAP_FAILED;
+ goto fail;
}
- vma->length = bo->meta.total_size;
-
- return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
+ if (addr == MAP_FAILED)
+ goto fail;
+
+ vma->priv = priv;
+ return addr;
+
+fail:
+ if (priv) {
+ struct drm_gem_close gem_close = { 0 };
+ gem_close.handle = priv->handle;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ free(priv);
+ }
+ return MAP_FAILED;
}
static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
{
if (bo->priv)
return dri_bo_unmap(bo, vma);
- else
- return munmap(vma->addr, vma->length);
+ else {
+ int r = munmap(vma->addr, vma->length);
+ if (r)
+ return r;
+
+ if (vma->priv) {
+ struct amdgpu_linear_vma_priv *priv = vma->priv;
+ struct drm_gem_close gem_close = { 0 };
+
+ if (BO_MAP_WRITE & priv->map_flags) {
+ r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
+ bo->handles[0].u32, vma->length);
+ if (r)
+ return r;
+ }
+
+ gem_close.handle = priv->handle;
+ r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ }
+
+ return 0;
+ }
}
static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
LIBDRM_CFLAGS := $(shell $(PKG_CONFIG) --cflags libdrm)
LIBDRM_LIBS := $(shell $(PKG_CONFIG) --libs libdrm)
-CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS)
+CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS) -D_GNU_SOURCE=1
CXXFLAGS += -std=c++14
-CFLAGS += -std=c99
+CFLAGS += -std=c99 -D_GNU_SOURCE=1
LIBS += -shared -lcutils -lhardware -lsync $(LIBDRM_LIBS)
OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
struct mapping *lock_data_[DRV_MAX_PLANES];
- /* Optional additional shared memory region attached to some gralloc4 buffers. */
+ /* Optional additional shared memory region attached to some gralloc buffers. */
int32_t reserved_region_fd_;
uint64_t reserved_region_size_;
void *reserved_region_addr_;
if (asprintf(&node, str, DRM_DIR_NAME, j) < 0)
continue;
- fd = open(node, O_RDWR, 0);
+ fd = open(node, O_RDWR | O_CLOEXEC);
free(node);
if (fd < 0)
int32_t create_reserved_region(const std::string &buffer_name, uint64_t reserved_region_size)
{
- int32_t reserved_region_fd;
std::string reserved_region_name = buffer_name + " reserved region";
- reserved_region_fd = memfd_create(reserved_region_name.c_str(), FD_CLOEXEC);
+#ifdef __NR_memfd_create
+ int32_t reserved_region_fd = memfd_create(reserved_region_name.c_str(), FD_CLOEXEC);
if (reserved_region_fd == -1) {
drv_log("Failed to create reserved region fd: %s.\n", strerror(errno));
return -errno;
}
return reserved_region_fd;
+#else
+ drv_log("Failed to create reserved region '%s': memfd_create not available.",
+ reserved_region_name.c_str());
+ return -1;
+#endif
}
int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descriptor *descriptor,
num_bytes = ALIGN(num_bytes, sizeof(int));
num_ints = num_bytes - sizeof(native_handle_t) - num_fds;
/*
- * Malloc is used as handles are ultimetly destroyed via free in
+ * Malloc is used as handles are ultimately destroyed via free in
* native_handle_delete().
*/
hnd = static_cast<struct cros_gralloc_handle *>(malloc(num_bytes));
hnd->width = drv_bo_get_width(bo);
hnd->height = drv_bo_get_height(bo);
hnd->format = drv_bo_get_format(bo);
+ hnd->tiling = bo->meta.tiling;
hnd->format_modifier = drv_bo_get_plane_format_modifier(bo, 0);
hnd->use_flags = descriptor->use_flags;
bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
struct bo *bo;
struct drv_import_fd_data data;
data.format = hnd->format;
+ data.tiling = hnd->tiling;
data.width = hnd->width;
data.height = hnd->height;
for (const auto &pair : handles_) {
function(pair.first);
}
-}
\ No newline at end of file
+}
uint32_t width;
uint32_t height;
uint32_t format; /* DRM format */
+ uint32_t tiling;
uint64_t format_modifier;
uint64_t use_flags; /* Buffer creation flags */
uint32_t magic;
std::mutex initialization_mutex;
};
+struct cros_gralloc0_buffer_info {
+ uint32_t drm_fourcc;
+ int num_fds;
+ int fds[4];
+ uint64_t modifier;
+ uint32_t offset[4];
+ uint32_t stride[4];
+};
+
/* This enumeration must match the one in <gralloc_drm.h>.
* The functions supported by this gralloc's temporary private API are listed
* below. Use of these functions is highly discouraged and should only be
GRALLOC_DRM_GET_FORMAT,
GRALLOC_DRM_GET_DIMENSIONS,
GRALLOC_DRM_GET_BACKING_STORE,
+ GRALLOC_DRM_GET_BUFFER_INFO,
};
// clang-format on
uint32_t *out_width, *out_height, *out_stride;
uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 };
+ struct cros_gralloc0_buffer_info *info;
auto mod = (struct gralloc0_module const *)module;
switch (op) {
case GRALLOC_DRM_GET_FORMAT:
case GRALLOC_DRM_GET_DIMENSIONS:
case GRALLOC_DRM_GET_BACKING_STORE:
+ case GRALLOC_DRM_GET_BUFFER_INFO:
break;
default:
return -EINVAL;
out_store = va_arg(args, uint64_t *);
ret = mod->driver->get_backing_store(handle, out_store);
break;
+ case GRALLOC_DRM_GET_BUFFER_INFO:
+ info = va_arg(args, struct cros_gralloc0_buffer_info *);
+ info->drm_fourcc = hnd->format;
+ info->num_fds = hnd->num_planes;
+ info->modifier = hnd->format_modifier;
+ for (uint32_t i = 0; i < hnd->num_planes; i++) {
+ info->fds[i] = hnd->fds[i];
+ info->offset[i] = hnd->offsets[i];
+ info->stride[i] = hnd->strides[i];
+ }
+ break;
default:
ret = -EINVAL;
}
--- /dev/null
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This directory is formatted to match the format of the interfaces implemented.
+
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Left
+TabWidth: 4
+UseTab: Never
\ No newline at end of file
--- /dev/null
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_binary {
+ name: "android.hardware.graphics.allocator@3.0-service.minigbm",
+ relative_install_path: "hw",
+ vendor: true,
+ init_rc: ["android.hardware.graphics.allocator@3.0-service.minigbm.rc"],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "android.hardware.graphics.allocator@3.0",
+ "android.hardware.graphics.mapper@3.0",
+ "libbase",
+ "libcutils",
+ "libhidlbase",
+ "liblog",
+ "libsync",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libdrm",
+ "libminigbm_cros_gralloc",
+ ],
+
+ srcs: [
+ "CrosGralloc3Allocator.cc",
+ "CrosGralloc3AllocatorService.cc",
+ "CrosGralloc3Utils.cc",
+ ],
+}
+
+cc_library_shared {
+ name: "android.hardware.graphics.mapper@3.0-impl.minigbm",
+ relative_install_path: "hw",
+ vendor: true,
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "android.hardware.graphics.mapper@3.0",
+ "libbase",
+ "libcutils",
+ "libhidlbase",
+ "liblog",
+ "libsync",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libdrm",
+ "libminigbm_cros_gralloc",
+ ],
+
+ srcs: [
+ "CrosGralloc3Mapper.cc",
+ "CrosGralloc3Utils.cc",
+ ],
+}
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Allocator.h"
+
+#include <optional>
+
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V3_0::Error;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo;
+
+CrosGralloc3Allocator::CrosGralloc3Allocator() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Error CrosGralloc3Allocator::allocate(const BufferDescriptorInfo& descriptor, uint32_t* outStride,
+ hidl_handle* outHandle) {
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ if (!outStride || !outHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ return Error::UNSUPPORTED;
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported && (descriptor.usage & BufferUsage::COMPOSER_OVERLAY)) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ if (!supported) {
+ std::string drmFormatString = getDrmFormatString(crosDescriptor.drm_format);
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Unsupported combination -- pixel format: %s, drm format:%s, usage: %s\n",
+ pixelFormatString.c_str(), drmFormatString.c_str(), usageString.c_str());
+ return Error::UNSUPPORTED;
+ }
+
+ buffer_handle_t handle;
+ int ret = mDriver->allocate(&crosDescriptor, &handle);
+ if (ret) {
+ return Error::NO_RESOURCES;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(handle);
+ if (!crosHandle) {
+ return Error::NO_RESOURCES;
+ }
+
+ *outHandle = handle;
+ *outStride = crosHandle->pixel_stride;
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc3Allocator::allocate(const hidl_vec<uint32_t>& encoded, uint32_t count,
+ allocate_cb hidlCb) {
+ hidl_vec<hidl_handle> handles;
+
+ if (!mDriver) {
+ drv_log("Failed to allocate. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, 0, handles);
+ return Void();
+ }
+
+ auto descriptor_opt = decodeBufferDescriptorInfo(encoded);
+ if (!descriptor_opt) {
+ drv_log("Failed to allocate. Failed to decode buffer descriptor.\n");
+ hidlCb(Error::BAD_DESCRIPTOR, 0, handles);
+ return Void();
+ }
+
+ BufferDescriptorInfo descriptor = *descriptor_opt;
+
+ handles.resize(count);
+
+ uint32_t stride = 0;
+ for (int i = 0; i < handles.size(); i++) {
+ Error err = allocate(descriptor, &stride, &(handles[i]));
+ if (err != Error::NONE) {
+ for (int j = 0; j < i; j++) {
+ mDriver->release(handles[j].getNativeHandle());
+ }
+ handles.resize(0);
+ hidlCb(err, 0, handles);
+ return Void();
+ }
+ }
+
+ hidlCb(Error::NONE, stride, handles);
+
+ for (const hidl_handle& handle : handles) {
+ mDriver->release(handle.getNativeHandle());
+ }
+
+ return Void();
+}
+
+Return<void> CrosGralloc3Allocator::dumpDebugInfo(dumpDebugInfo_cb hidl_cb) {
+ hidl_cb("CrosGralloc3Allocator::dumpDebugInfo unimplemented.");
+ return Void();
+}
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+
+class CrosGralloc3Allocator : public android::hardware::graphics::allocator::V3_0::IAllocator {
+ public:
+ CrosGralloc3Allocator();
+
+ android::hardware::Return<void> allocate(
+ const android::hardware::hidl_vec<uint32_t>& descriptor, uint32_t count,
+ allocate_cb hidl_cb) override;
+
+ android::hardware::Return<void> dumpDebugInfo(dumpDebugInfo_cb hidl_cb) override;
+
+ private:
+ android::hardware::graphics::mapper::V3_0::Error allocate(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo&
+ description,
+ uint32_t* outStride, android::hardware::hidl_handle* outHandle);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#define LOG_TAG "AllocatorService"
+
+#include <hidl/LegacySupport.h>
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Allocator.h"
+
+using android::sp;
+using android::hardware::configureRpcThreadpool;
+using android::hardware::joinRpcThreadpool;
+using android::hardware::graphics::allocator::V3_0::IAllocator;
+
+int main(int, char**) {
+ sp<IAllocator> allocator = new CrosGralloc3Allocator();
+ configureRpcThreadpool(4, true /* callerWillJoin */);
+ if (allocator->registerAsService() != android::NO_ERROR) {
+ ALOGE("failed to register graphics IAllocator 3.0 service");
+ return -EINVAL;
+ }
+
+ ALOGI("graphics IAllocator 3.0 service is initialized");
+ android::hardware::joinRpcThreadpool();
+ ALOGI("graphics IAllocator 3.0 service is terminating");
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Mapper.h"
+
+#include <cutils/native_handle.h>
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+#include "helpers.h"
+
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::Return;
+using android::hardware::Void;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+using android::hardware::graphics::mapper::V3_0::Error;
+using android::hardware::graphics::mapper::V3_0::IMapper;
+using android::hardware::graphics::mapper::V3_0::YCbCrLayout;
+
+CrosGralloc3Mapper::CrosGralloc3Mapper() : mDriver(std::make_unique<cros_gralloc_driver>()) {
+ if (mDriver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ mDriver = nullptr;
+ }
+}
+
+Return<void> CrosGralloc3Mapper::createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) {
+ hidl_vec<uint32_t> descriptor;
+
+ if (description.width == 0) {
+ drv_log("Failed to createDescriptor. Bad width: %d.\n", description.width);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.height == 0) {
+ drv_log("Failed to createDescriptor. Bad height: %d.\n", description.height);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ if (description.layerCount == 0) {
+ drv_log("Failed to createDescriptor. Bad layer count: %d.\n", description.layerCount);
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ auto descriptor_opt = encodeBufferDescriptorInfo(description);
+ if (!descriptor_opt) {
+ drv_log("Failed to createDescriptor. Failed to encodeBufferDescriptorInfo\n");
+ hidlCb(Error::BAD_VALUE, descriptor);
+ return Void();
+ }
+
+ descriptor = *descriptor_opt;
+ hidlCb(Error::NONE, descriptor);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::importBuffer(const hidl_handle& handle, importBuffer_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to import buffer. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ const native_handle_t* bufferHandle = handle.getNativeHandle();
+ if (!bufferHandle || bufferHandle->numFds == 0) {
+ drv_log("Failed to importBuffer. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ native_handle_t* importedBufferHandle = native_handle_clone(bufferHandle);
+ if (!importedBufferHandle) {
+ drv_log("Failed to importBuffer. Handle clone failed.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ int ret = mDriver->retain(importedBufferHandle);
+ if (ret) {
+ native_handle_close(importedBufferHandle);
+ native_handle_delete(importedBufferHandle);
+ hidlCb(Error::NO_RESOURCES, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, importedBufferHandle);
+ return Void();
+}
+
+Return<Error> CrosGralloc3Mapper::freeBuffer(void* rawHandle) {
+ if (!mDriver) {
+ drv_log("Failed to freeBuffer. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to freeBuffer. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ int ret = mDriver->release(bufferHandle);
+ if (ret) {
+ drv_log("Failed to freeBuffer.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ native_handle_close(bufferHandle);
+ native_handle_delete(bufferHandle);
+ return Error::NONE;
+}
+
+Return<Error> CrosGralloc3Mapper::validateBufferSize(void* rawHandle,
+ const BufferDescriptorInfo& descriptor,
+ uint32_t stride) {
+ if (!mDriver) {
+ drv_log("Failed to validateBufferSize. Driver is uninitialized.\n");
+ return Error::NO_RESOURCES;
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to validateBufferSize. Empty handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (!crosHandle) {
+ drv_log("Failed to validateBufferSize. Invalid handle.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ PixelFormat crosHandleFormat = static_cast<PixelFormat>(crosHandle->droid_format);
+ if (descriptor.format != crosHandleFormat) {
+ drv_log("Failed to validateBufferSize. Format mismatch.\n");
+ return Error::BAD_BUFFER;
+ }
+
+ if (descriptor.width != crosHandle->width) {
+ drv_log("Failed to validateBufferSize. Width mismatch (%d vs %d).\n", descriptor.width,
+ crosHandle->width);
+ return Error::BAD_VALUE;
+ }
+
+ if (descriptor.height != crosHandle->height) {
+ drv_log("Failed to validateBufferSize. Height mismatch (%d vs %d).\n", descriptor.height,
+ crosHandle->height);
+ return Error::BAD_VALUE;
+ }
+
+ if (stride != crosHandle->pixel_stride) {
+ drv_log("Failed to validateBufferSize. Stride mismatch (%d vs %d).\n", stride,
+ crosHandle->pixel_stride);
+ return Error::BAD_VALUE;
+ }
+
+ return Error::NONE;
+}
+
+Return<void> CrosGralloc3Mapper::getTransportSize(void* rawHandle, getTransportSize_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to getTransportSize. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ native_handle_t* bufferHandle = reinterpret_cast<native_handle_t*>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to getTransportSize. Bad handle.\n");
+ hidlCb(Error::BAD_BUFFER, 0, 0);
+ return Void();
+ }
+
+ // No local process data is currently stored on the native handle.
+ hidlCb(Error::NONE, bufferHandle->numFds, bufferHandle->numInts);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::lock(void* rawHandle, uint64_t cpuUsage, const Rect& accessRegion,
+ const hidl_handle& acquireFence, lock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, nullptr, 0, 0);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to lock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0, 0);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (crosHandle == nullptr) {
+ drv_log("Failed to lock. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr, 0, 0);
+ return Void();
+ }
+
+ LockResult result = lockInternal(crosHandle, cpuUsage, accessRegion, acquireFence);
+ if (result.error != Error::NONE) {
+ drv_log("Failed to lock. Failed to lockInternal.\n");
+ hidlCb(result.error, nullptr, 0, 0);
+ return Void();
+ }
+
+ int32_t bytesPerPixel = drv_bytes_per_pixel_from_format(crosHandle->format, 0);
+ int32_t bytesPerStride = static_cast<int32_t>(crosHandle->strides[0]);
+
+ hidlCb(Error::NONE, result.mapped[0], bytesPerPixel, bytesPerStride);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::lockYCbCr(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lockYCbCr_cb hidlCb) {
+ YCbCrLayout ycbcr = {};
+
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ hidlCb(Error::NO_RESOURCES, ycbcr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to lockYCbCr. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+
+ cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle);
+ if (crosHandle == nullptr) {
+ drv_log("Failed to lockYCbCr. Invalid handle.\n");
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+
+ LockResult result = lockInternal(crosHandle, cpuUsage, accessRegion, acquireFence);
+ if (result.error != Error::NONE) {
+ drv_log("Failed to lockYCbCr. Failed to lockInternal.\n");
+ hidlCb(result.error, ycbcr);
+ return Void();
+ }
+
+ switch (crosHandle->format) {
+ case DRM_FORMAT_NV12: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1] + 1;
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 2;
+ break;
+ }
+ case DRM_FORMAT_NV21: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1] + 1;
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 2;
+ break;
+ }
+ case DRM_FORMAT_YVU420: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[2];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 1;
+ break;
+ }
+ case DRM_FORMAT_YVU420_ANDROID: {
+ ycbcr.y = result.mapped[0] + crosHandle->offsets[0];
+ ycbcr.cb = result.mapped[0] + crosHandle->offsets[2];
+ ycbcr.cr = result.mapped[0] + crosHandle->offsets[1];
+ ycbcr.yStride = crosHandle->strides[0];
+ ycbcr.cStride = crosHandle->strides[1];
+ ycbcr.chromaStep = 1;
+ break;
+ }
+ default: {
+ std::string format = getDrmFormatString(crosHandle->format);
+ drv_log("Failed to lockYCbCr. Unhandled format: %s\n", format.c_str());
+ hidlCb(Error::BAD_BUFFER, ycbcr);
+ return Void();
+ }
+ }
+
+ hidlCb(Error::NONE, ycbcr);
+ return Void();
+}
+
+CrosGralloc3Mapper::LockResult CrosGralloc3Mapper::lockInternal(
+ cros_gralloc_handle_t crosHandle, uint64_t cpuUsage, const Rect& region,
+ const android::hardware::hidl_handle& acquireFence) {
+ LockResult result = {};
+
+ if (!mDriver) {
+ drv_log("Failed to lock. Driver is uninitialized.\n");
+ result.error = Error::NO_RESOURCES;
+ return result;
+ }
+
+ if (cpuUsage == 0) {
+ drv_log("Failed to lock. Bad cpu usage: %" PRIu64 ".\n", cpuUsage);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ uint32_t mapUsage = 0;
+ int ret = convertToMapUsage(cpuUsage, &mapUsage);
+ if (ret) {
+ drv_log("Failed to lock. Convert usage failed.\n");
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.left < 0) {
+ drv_log("Failed to lock. Invalid region: negative left value %d.\n", region.left);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.top < 0) {
+ drv_log("Failed to lock. Invalid region: negative top value %d.\n", region.top);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.width < 0) {
+ drv_log("Failed to lock. Invalid region: negative width value %d.\n", region.width);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.height < 0) {
+ drv_log("Failed to lock. Invalid region: negative height value %d.\n", region.height);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.width > crosHandle->width) {
+ drv_log("Failed to lock. Invalid region: width greater than buffer width (%d vs %d).\n",
+ region.width, crosHandle->width);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ if (region.height > crosHandle->height) {
+ drv_log("Failed to lock. Invalid region: height greater than buffer height (%d vs %d).\n",
+ region.height, crosHandle->height);
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ struct rectangle rect = {static_cast<uint32_t>(region.left), static_cast<uint32_t>(region.top),
+ static_cast<uint32_t>(region.width),
+ static_cast<uint32_t>(region.height)};
+
+ // An access region of all zeros means the entire buffer.
+ if (rect.x == 0 && rect.y == 0 && rect.width == 0 && rect.height == 0) {
+ rect.width = crosHandle->width;
+ rect.height = crosHandle->height;
+ }
+
+ int acquireFenceFd = -1;
+ ret = convertToFenceFd(acquireFence, &acquireFenceFd);
+ if (ret) {
+ drv_log("Failed to lock. Bad acquire fence.\n");
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(crosHandle);
+ ret = mDriver->lock(bufferHandle, acquireFenceFd, false, &rect, mapUsage, result.mapped);
+ if (ret) {
+ result.error = Error::BAD_VALUE;
+ return result;
+ }
+
+ result.error = Error::NONE;
+ return result;
+}
+
+Return<void> CrosGralloc3Mapper::unlock(void* rawHandle, unlock_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to unlock. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ buffer_handle_t bufferHandle = reinterpret_cast<buffer_handle_t>(rawHandle);
+ if (!bufferHandle) {
+ drv_log("Failed to unlock. Empty handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ int releaseFenceFd = -1;
+ int ret = mDriver->unlock(bufferHandle, &releaseFenceFd);
+ if (ret) {
+ drv_log("Failed to unlock.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidl_handle releaseFenceHandle;
+ ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle);
+ if (ret) {
+ drv_log("Failed to unlock. Failed to convert release fence to handle.\n");
+ hidlCb(Error::BAD_BUFFER, nullptr);
+ return Void();
+ }
+
+ hidlCb(Error::NONE, releaseFenceHandle);
+ return Void();
+}
+
+Return<void> CrosGralloc3Mapper::isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) {
+ if (!mDriver) {
+ drv_log("Failed to isSupported. Driver is uninitialized.\n");
+ hidlCb(Error::BAD_VALUE, false);
+ return Void();
+ }
+
+ struct cros_gralloc_buffer_descriptor crosDescriptor;
+ if (convertToCrosDescriptor(descriptor, &crosDescriptor)) {
+ hidlCb(Error::NONE, false);
+ return Void();
+ }
+
+ bool supported = mDriver->is_supported(&crosDescriptor);
+ if (!supported) {
+ crosDescriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mDriver->is_supported(&crosDescriptor);
+ }
+
+ hidlCb(Error::NONE, supported);
+ return Void();
+}
+
+int CrosGralloc3Mapper::getResolvedDrmFormat(PixelFormat pixelFormat, uint64_t bufferUsage,
+ uint32_t* outDrmFormat) {
+ uint32_t drmFormat;
+ if (convertToDrmFormat(pixelFormat, &drmFormat)) {
+ std::string pixelFormatString = getPixelFormatString(pixelFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert format %s\n",
+ pixelFormatString.c_str());
+ return -1;
+ }
+
+ uint64_t usage;
+ if (convertToBufferUsage(bufferUsage, &usage)) {
+ std::string usageString = getUsageString(bufferUsage);
+ drv_log("Failed to getResolvedDrmFormat. Failed to convert usage %s\n",
+ usageString.c_str());
+ return -1;
+ }
+
+ uint32_t resolvedDrmFormat = mDriver->get_resolved_drm_format(drmFormat, usage);
+ if (resolvedDrmFormat == DRM_FORMAT_INVALID) {
+ std::string drmFormatString = getDrmFormatString(drmFormat);
+ drv_log("Failed to getResolvedDrmFormat. Failed to resolve drm format %s\n",
+ drmFormatString.c_str());
+ return -1;
+ }
+
+ *outDrmFormat = resolvedDrmFormat;
+
+ return 0;
+}
+
+android::hardware::graphics::mapper::V3_0::IMapper* HIDL_FETCH_IMapper(const char* /*name*/) {
+ return static_cast<android::hardware::graphics::mapper::V3_0::IMapper*>(new CrosGralloc3Mapper);
+}
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+#include <optional>
+
+#include "cros_gralloc/cros_gralloc_driver.h"
+#include "cros_gralloc/cros_gralloc_handle.h"
+
+class CrosGralloc3Mapper : public android::hardware::graphics::mapper::V3_0::IMapper {
+ public:
+ CrosGralloc3Mapper();
+
+ android::hardware::Return<void> createDescriptor(const BufferDescriptorInfo& description,
+ createDescriptor_cb hidlCb) override;
+
+ android::hardware::Return<void> importBuffer(const android::hardware::hidl_handle& rawHandle,
+ importBuffer_cb hidlCb) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V3_0::Error> freeBuffer(
+ void* rawHandle) override;
+
+ android::hardware::Return<android::hardware::graphics::mapper::V3_0::Error> validateBufferSize(
+ void* rawHandle, const BufferDescriptorInfo& descriptor, uint32_t stride) override;
+
+ android::hardware::Return<void> getTransportSize(void* rawHandle,
+ getTransportSize_cb hidlCb) override;
+
+ android::hardware::Return<void> lock(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lock_cb hidlCb) override;
+
+ android::hardware::Return<void> lockYCbCr(void* rawHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence,
+ lockYCbCr_cb _hidl_cb) override;
+
+ android::hardware::Return<void> unlock(void* rawHandle, unlock_cb hidlCb) override;
+
+ android::hardware::Return<void> isSupported(const BufferDescriptorInfo& descriptor,
+ isSupported_cb hidlCb) override;
+
+ private:
+ int getResolvedDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat pixelFormat,
+ uint64_t bufferUsage, uint32_t* outDrmFormat);
+
+ struct LockResult {
+ android::hardware::graphics::mapper::V3_0::Error error;
+
+ uint8_t* mapped[DRV_MAX_PLANES];
+ };
+ LockResult lockInternal(cros_gralloc_handle_t crosHandle, uint64_t cpuUsage,
+ const Rect& accessRegion,
+ const android::hardware::hidl_handle& acquireFence);
+
+ std::unique_ptr<cros_gralloc_driver> mDriver;
+};
+
+extern "C" android::hardware::graphics::mapper::V3_0::IMapper* HIDL_FETCH_IMapper(const char* name);
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc/gralloc3/CrosGralloc3Utils.h"
+
+#include <array>
+#include <limits>
+#include <unordered_map>
+
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <cutils/native_handle.h>
+
+#include "cros_gralloc/cros_gralloc_helpers.h"
+
+using android::hardware::hidl_bitfield;
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::common::V1_2::PixelFormat;
+
+using BufferDescriptorInfo =
+ android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo;
+
+std::string getDrmFormatString(uint32_t drmFormat) {
+ switch (drmFormat) {
+ case DRM_FORMAT_ABGR1555:
+ return "DRM_FORMAT_ABGR1555";
+ case DRM_FORMAT_ABGR2101010:
+ return "DRM_FORMAT_ABGR2101010";
+ case DRM_FORMAT_ABGR4444:
+ return "DRM_FORMAT_ABGR4444";
+ case DRM_FORMAT_ABGR8888:
+ return "DRM_FORMAT_ABGR8888";
+ case DRM_FORMAT_ARGB1555:
+ return "DRM_FORMAT_ARGB1555";
+ case DRM_FORMAT_ARGB2101010:
+ return "DRM_FORMAT_ARGB2101010";
+ case DRM_FORMAT_ARGB4444:
+ return "DRM_FORMAT_ARGB4444";
+ case DRM_FORMAT_ARGB8888:
+ return "DRM_FORMAT_ARGB8888";
+ case DRM_FORMAT_AYUV:
+ return "DRM_FORMAT_AYUV";
+ case DRM_FORMAT_BGR233:
+ return "DRM_FORMAT_BGR233";
+ case DRM_FORMAT_BGR565:
+ return "DRM_FORMAT_BGR565";
+ case DRM_FORMAT_BGR888:
+ return "DRM_FORMAT_BGR888";
+ case DRM_FORMAT_BGRA1010102:
+ return "DRM_FORMAT_BGRA1010102";
+ case DRM_FORMAT_BGRA4444:
+ return "DRM_FORMAT_BGRA4444";
+ case DRM_FORMAT_BGRA5551:
+ return "DRM_FORMAT_BGRA5551";
+ case DRM_FORMAT_BGRA8888:
+ return "DRM_FORMAT_BGRA8888";
+ case DRM_FORMAT_BGRX1010102:
+ return "DRM_FORMAT_BGRX1010102";
+ case DRM_FORMAT_BGRX4444:
+ return "DRM_FORMAT_BGRX4444";
+ case DRM_FORMAT_BGRX5551:
+ return "DRM_FORMAT_BGRX5551";
+ case DRM_FORMAT_BGRX8888:
+ return "DRM_FORMAT_BGRX8888";
+ case DRM_FORMAT_C8:
+ return "DRM_FORMAT_C8";
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ return "DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED";
+ case DRM_FORMAT_GR88:
+ return "DRM_FORMAT_GR88";
+ case DRM_FORMAT_NV12:
+ return "DRM_FORMAT_NV12";
+ case DRM_FORMAT_NV21:
+ return "DRM_FORMAT_NV21";
+ case DRM_FORMAT_R8:
+ return "DRM_FORMAT_R8";
+ case DRM_FORMAT_RG88:
+ return "DRM_FORMAT_RG88";
+ case DRM_FORMAT_RGB332:
+ return "DRM_FORMAT_RGB332";
+ case DRM_FORMAT_RGB565:
+ return "DRM_FORMAT_RGB565";
+ case DRM_FORMAT_RGB888:
+ return "DRM_FORMAT_RGB888";
+ case DRM_FORMAT_RGBA1010102:
+ return "DRM_FORMAT_RGBA1010102";
+ case DRM_FORMAT_RGBA4444:
+ return "DRM_FORMAT_RGBA4444";
+ case DRM_FORMAT_RGBA5551:
+ return "DRM_FORMAT_RGBA5551";
+ case DRM_FORMAT_RGBA8888:
+ return "DRM_FORMAT_RGBA8888";
+ case DRM_FORMAT_RGBX1010102:
+ return "DRM_FORMAT_RGBX1010102";
+ case DRM_FORMAT_RGBX4444:
+ return "DRM_FORMAT_RGBX4444";
+ case DRM_FORMAT_RGBX5551:
+ return "DRM_FORMAT_RGBX5551";
+ case DRM_FORMAT_RGBX8888:
+ return "DRM_FORMAT_RGBX8888";
+ case DRM_FORMAT_UYVY:
+ return "DRM_FORMAT_UYVY";
+ case DRM_FORMAT_VYUY:
+ return "DRM_FORMAT_VYUY";
+ case DRM_FORMAT_XBGR1555:
+ return "DRM_FORMAT_XBGR1555";
+ case DRM_FORMAT_XBGR2101010:
+ return "DRM_FORMAT_XBGR2101010";
+ case DRM_FORMAT_XBGR4444:
+ return "DRM_FORMAT_XBGR4444";
+ case DRM_FORMAT_XBGR8888:
+ return "DRM_FORMAT_XBGR8888";
+ case DRM_FORMAT_XRGB1555:
+ return "DRM_FORMAT_XRGB1555";
+ case DRM_FORMAT_XRGB2101010:
+ return "DRM_FORMAT_XRGB2101010";
+ case DRM_FORMAT_XRGB4444:
+ return "DRM_FORMAT_XRGB4444";
+ case DRM_FORMAT_XRGB8888:
+ return "DRM_FORMAT_XRGB8888";
+ case DRM_FORMAT_YUYV:
+ return "DRM_FORMAT_YUYV";
+ case DRM_FORMAT_YVU420:
+ return "DRM_FORMAT_YVU420";
+ case DRM_FORMAT_YVU420_ANDROID:
+ return "DRM_FORMAT_YVU420";
+ case DRM_FORMAT_YVYU:
+ return "DRM_FORMAT_YVYU";
+ }
+ return android::base::StringPrintf("Unknown(%d)", drmFormat);
+}
+
+std::string getPixelFormatString(PixelFormat format) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ return "PixelFormat::BGRA_8888";
+ case PixelFormat::BLOB:
+ return "PixelFormat::BLOB";
+ case PixelFormat::DEPTH_16:
+ return "PixelFormat::DEPTH_16";
+ case PixelFormat::DEPTH_24:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::DEPTH_32F:
+ return "PixelFormat::DEPTH_24";
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return "PixelFormat::DEPTH_24_STENCIL_8";
+ case PixelFormat::HSV_888:
+ return "PixelFormat::HSV_888";
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ return "PixelFormat::IMPLEMENTATION_DEFINED";
+ case PixelFormat::RAW10:
+ return "PixelFormat::RAW10";
+ case PixelFormat::RAW12:
+ return "PixelFormat::RAW12";
+ case PixelFormat::RAW16:
+ return "PixelFormat::RAW16";
+ case PixelFormat::RAW_OPAQUE:
+ return "PixelFormat::RAW_OPAQUE";
+ case PixelFormat::RGBA_1010102:
+ return "PixelFormat::RGBA_1010102";
+ case PixelFormat::RGBA_8888:
+ return "PixelFormat::RGBA_8888";
+ case PixelFormat::RGBA_FP16:
+ return "PixelFormat::RGBA_FP16";
+ case PixelFormat::RGBX_8888:
+ return "PixelFormat::RGBX_8888";
+ case PixelFormat::RGB_565:
+ return "PixelFormat::RGB_565";
+ case PixelFormat::RGB_888:
+ return "PixelFormat::RGB_888";
+ case PixelFormat::STENCIL_8:
+ return "PixelFormat::STENCIL_8";
+ case PixelFormat::Y16:
+ return "PixelFormat::Y16";
+ case PixelFormat::Y8:
+ return "PixelFormat::Y8";
+ case PixelFormat::YCBCR_420_888:
+ return "PixelFormat::YCBCR_420_888";
+ case PixelFormat::YCBCR_422_I:
+ return "PixelFormat::YCBCR_422_I";
+ case PixelFormat::YCBCR_422_SP:
+ return "PixelFormat::YCBCR_422_SP";
+ case PixelFormat::YCBCR_P010:
+ return "PixelFormat::YCBCR_P010";
+ case PixelFormat::YCRCB_420_SP:
+ return "PixelFormat::YCRCB_420_SP";
+ case PixelFormat::YV12:
+ return "PixelFormat::YV12";
+ }
+ return android::base::StringPrintf("PixelFormat::Unknown(%d)", static_cast<uint32_t>(format));
+}
+
+std::string getUsageString(hidl_bitfield<BufferUsage> bufferUsage) {
+ using Underlying = typename std::underlying_type<BufferUsage>::type;
+
+ Underlying usage = static_cast<Underlying>(bufferUsage);
+
+ std::vector<std::string> usages;
+ if (usage & BufferUsage::CAMERA_INPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_INPUT);
+ usages.push_back("BufferUsage::CAMERA_INPUT");
+ }
+ if (usage & BufferUsage::CAMERA_OUTPUT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CAMERA_OUTPUT);
+ usages.push_back("BufferUsage::CAMERA_OUTPUT");
+ }
+ if (usage & BufferUsage::COMPOSER_CURSOR) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_CURSOR);
+ usages.push_back("BufferUsage::COMPOSER_CURSOR");
+ }
+ if (usage & BufferUsage::COMPOSER_OVERLAY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::COMPOSER_OVERLAY);
+ usages.push_back("BufferUsage::COMPOSER_OVERLAY");
+ }
+ if (usage & BufferUsage::CPU_READ_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_OFTEN);
+ usages.push_back("BufferUsage::CPU_READ_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_READ_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_NEVER);
+ usages.push_back("BufferUsage::CPU_READ_NEVER");
+ }
+ if (usage & BufferUsage::CPU_READ_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_READ_RARELY);
+ usages.push_back("BufferUsage::CPU_READ_RARELY");
+ }
+ if (usage & BufferUsage::CPU_WRITE_NEVER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_NEVER);
+ usages.push_back("BufferUsage::CPU_WRITE_NEVER");
+ }
+ if (usage & BufferUsage::CPU_WRITE_OFTEN) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_OFTEN);
+ usages.push_back("BufferUsage::CPU_WRITE_OFTEN");
+ }
+ if (usage & BufferUsage::CPU_WRITE_RARELY) {
+ usage &= ~static_cast<Underlying>(BufferUsage::CPU_WRITE_RARELY);
+ usages.push_back("BufferUsage::CPU_WRITE_RARELY");
+ }
+ if (usage & BufferUsage::GPU_RENDER_TARGET) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_RENDER_TARGET);
+ usages.push_back("BufferUsage::GPU_RENDER_TARGET");
+ }
+ if (usage & BufferUsage::GPU_TEXTURE) {
+ usage &= ~static_cast<Underlying>(BufferUsage::GPU_TEXTURE);
+ usages.push_back("BufferUsage::GPU_TEXTURE");
+ }
+ if (usage & BufferUsage::PROTECTED) {
+ usage &= ~static_cast<Underlying>(BufferUsage::PROTECTED);
+ usages.push_back("BufferUsage::PROTECTED");
+ }
+ if (usage & BufferUsage::RENDERSCRIPT) {
+ usage &= ~static_cast<Underlying>(BufferUsage::RENDERSCRIPT);
+ usages.push_back("BufferUsage::RENDERSCRIPT");
+ }
+ if (usage & BufferUsage::VIDEO_DECODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_DECODER);
+ usages.push_back("BufferUsage::VIDEO_DECODER");
+ }
+ if (usage & BufferUsage::VIDEO_ENCODER) {
+ usage &= ~static_cast<Underlying>(BufferUsage::VIDEO_ENCODER);
+ usages.push_back("BufferUsage::VIDEO_ENCODER");
+ }
+
+ if (usage) {
+ usages.push_back(android::base::StringPrintf("UnknownUsageBits-%" PRIu64, usage));
+ }
+
+ return android::base::Join(usages, '|');
+}
+
+int convertToDrmFormat(PixelFormat format, uint32_t* outDrmFormat) {
+ switch (format) {
+ case PixelFormat::BGRA_8888:
+ *outDrmFormat = DRM_FORMAT_ARGB8888;
+ return 0;
+ /**
+ * Choose DRM_FORMAT_R8 because <system/graphics.h> requires the buffers
+ * with a format HAL_PIXEL_FORMAT_BLOB have a height of 1, and width
+ * equal to their size in bytes.
+ */
+ case PixelFormat::BLOB:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::DEPTH_16:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24:
+ return -EINVAL;
+ case PixelFormat::DEPTH_24_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F:
+ return -EINVAL;
+ case PixelFormat::DEPTH_32F_STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::HSV_888:
+ return -EINVAL;
+ case PixelFormat::IMPLEMENTATION_DEFINED:
+ *outDrmFormat = DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED;
+ return 0;
+ case PixelFormat::RAW10:
+ return -EINVAL;
+ case PixelFormat::RAW12:
+ return -EINVAL;
+ case PixelFormat::RAW16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ /* TODO use blob */
+ case PixelFormat::RAW_OPAQUE:
+ return -EINVAL;
+ case PixelFormat::RGBA_1010102:
+ *outDrmFormat = DRM_FORMAT_ABGR2101010;
+ return 0;
+ case PixelFormat::RGBA_8888:
+ *outDrmFormat = DRM_FORMAT_ABGR8888;
+ return 0;
+ case PixelFormat::RGBA_FP16:
+ *outDrmFormat = DRM_FORMAT_ABGR16161616F;
+ return 0;
+ case PixelFormat::RGBX_8888:
+ *outDrmFormat = DRM_FORMAT_XBGR8888;
+ return 0;
+ case PixelFormat::RGB_565:
+ *outDrmFormat = DRM_FORMAT_RGB565;
+ return 0;
+ case PixelFormat::RGB_888:
+ *outDrmFormat = DRM_FORMAT_RGB888;
+ return 0;
+ case PixelFormat::STENCIL_8:
+ return -EINVAL;
+ case PixelFormat::Y16:
+ *outDrmFormat = DRM_FORMAT_R16;
+ return 0;
+ case PixelFormat::Y8:
+ *outDrmFormat = DRM_FORMAT_R8;
+ return 0;
+ case PixelFormat::YCBCR_420_888:
+ *outDrmFormat = DRM_FORMAT_FLEX_YCbCr_420_888;
+ return 0;
+ case PixelFormat::YCBCR_422_SP:
+ return -EINVAL;
+ case PixelFormat::YCBCR_422_I:
+ return -EINVAL;
+ case PixelFormat::YCBCR_P010:
+ *outDrmFormat = DRM_FORMAT_P010;
+ return 0;
+ case PixelFormat::YCRCB_420_SP:
+ *outDrmFormat = DRM_FORMAT_NV21;
+ return 0;
+ case PixelFormat::YV12:
+ *outDrmFormat = DRM_FORMAT_YVU420_ANDROID;
+ return 0;
+ };
+ return -EINVAL;
+}
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage) {
+ uint64_t bufferUsage = BO_USE_NONE;
+
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_RARELY)) {
+ bufferUsage |= BO_USE_SW_READ_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_READ_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_READ_OFTEN)) {
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_RARELY)) {
+ bufferUsage |= BO_USE_SW_WRITE_RARELY;
+ }
+ if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) ==
+ static_cast<uint64_t>(BufferUsage::CPU_WRITE_OFTEN)) {
+ bufferUsage |= BO_USE_SW_WRITE_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::GPU_TEXTURE) {
+ bufferUsage |= BO_USE_TEXTURE;
+ }
+ if (grallocUsage & BufferUsage::GPU_RENDER_TARGET) {
+ bufferUsage |= BO_USE_RENDERING;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_OVERLAY) {
+ /* HWC wants to use display hardware, but can defer to OpenGL. */
+ bufferUsage |= BO_USE_SCANOUT | BO_USE_TEXTURE;
+ }
+ if (grallocUsage & BufferUsage::PROTECTED) {
+ bufferUsage |= BO_USE_PROTECTED;
+ }
+ if (grallocUsage & BufferUsage::COMPOSER_CURSOR) {
+ bufferUsage |= BO_USE_NONE;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_ENCODER) {
+ /*HACK: See b/30054495 */
+ bufferUsage |= BO_USE_SW_READ_OFTEN;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_OUTPUT) {
+ bufferUsage |= BO_USE_CAMERA_WRITE;
+ }
+ if (grallocUsage & BufferUsage::CAMERA_INPUT) {
+ bufferUsage |= BO_USE_CAMERA_READ;
+ }
+ if (grallocUsage & BufferUsage::RENDERSCRIPT) {
+ bufferUsage |= BO_USE_RENDERSCRIPT;
+ }
+ if (grallocUsage & BufferUsage::VIDEO_DECODER) {
+ bufferUsage |= BO_USE_HW_VIDEO_DECODER;
+ }
+
+ *outBufferUsage = bufferUsage;
+ return 0;
+}
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage) {
+ uint32_t mapUsage = BO_MAP_NONE;
+
+ if (grallocUsage & BufferUsage::CPU_READ_MASK) {
+ mapUsage |= BO_MAP_READ;
+ }
+ if (grallocUsage & BufferUsage::CPU_WRITE_MASK) {
+ mapUsage |= BO_MAP_WRITE;
+ }
+
+ *outMapUsage = mapUsage;
+ return 0;
+}
+
+int convertToCrosDescriptor(const BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor) {
+ outCrosDescriptor->width = descriptor.width;
+ outCrosDescriptor->height = descriptor.height;
+ outCrosDescriptor->droid_format = static_cast<int32_t>(descriptor.format);
+ outCrosDescriptor->droid_usage = descriptor.usage;
+ outCrosDescriptor->reserved_region_size = 0;
+
+ if (convertToDrmFormat(descriptor.format, &outCrosDescriptor->drm_format)) {
+ std::string pixelFormatString = getPixelFormatString(descriptor.format);
+ drv_log("Failed to convert descriptor. Unsupported format %s\n", pixelFormatString.c_str());
+ return -1;
+ }
+ if (convertToBufferUsage(descriptor.usage, &outCrosDescriptor->use_flags)) {
+ std::string usageString = getUsageString(descriptor.usage);
+ drv_log("Failed to convert descriptor. Unsupported usage flags %s\n", usageString.c_str());
+ return -1;
+ }
+ return 0;
+}
+
+int convertToFenceFd(const hidl_handle& fenceHandle, int* outFenceFd) {
+ if (!outFenceFd) {
+ return -EINVAL;
+ }
+
+ const native_handle_t* nativeHandle = fenceHandle.getNativeHandle();
+ if (nativeHandle && nativeHandle->numFds > 1) {
+ return -EINVAL;
+ }
+
+ *outFenceFd = (nativeHandle && nativeHandle->numFds == 1) ? nativeHandle->data[0] : -1;
+ return 0;
+}
+
+int convertToFenceHandle(int fenceFd, hidl_handle* outFenceHandle) {
+ if (!outFenceHandle) {
+ return -EINVAL;
+ }
+ if (fenceFd < 0) {
+ return 0;
+ }
+
+ NATIVE_HANDLE_DECLARE_STORAGE(handleStorage, 1, 0);
+ auto fenceHandle = native_handle_init(handleStorage, 1, 0);
+ fenceHandle->data[0] = fenceFd;
+
+ *outFenceHandle = fenceHandle;
+ return 0;
+}
+
+std::optional<BufferDescriptorInfo> decodeBufferDescriptorInfo(const hidl_vec<uint32_t>& encoded) {
+ if (encoded.size() != 5) {
+ drv_log("Failed to decodeBufferDescriptorInfo. Invalid size: %zd.\n", encoded.size());
+ return {};
+ }
+
+ BufferDescriptorInfo descriptor;
+ descriptor.width = encoded[0];
+ descriptor.height = encoded[1];
+ descriptor.layerCount = encoded[2];
+ descriptor.format = static_cast<PixelFormat>(encoded[3]);
+ descriptor.usage = encoded[4];
+ return std::move(descriptor);
+}
+
+std::optional<hidl_vec<uint32_t>> encodeBufferDescriptorInfo(const BufferDescriptorInfo& info) {
+ hidl_vec<uint32_t> encoded;
+ encoded.resize(5);
+ encoded[0] = info.width;
+ encoded[1] = info.height;
+ encoded[2] = info.layerCount;
+ encoded[3] = static_cast<uint32_t>(info.format);
+ encoded[4] = info.usage & std::numeric_limits<uint32_t>::max();
+ return std::move(encoded);
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright 2020 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+
+std::string getDrmFormatString(uint32_t drmFormat);
+
+std::string getPixelFormatString(android::hardware::graphics::common::V1_2::PixelFormat format);
+
+std::string getUsageString(
+ android::hardware::hidl_bitfield<android::hardware::graphics::common::V1_2::BufferUsage>
+ usage);
+
+int convertToDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat format,
+ uint32_t* outDrmFormat);
+
+int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage);
+
+int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage);
+
+int convertToCrosDescriptor(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo& descriptor,
+ struct cros_gralloc_buffer_descriptor* outCrosDescriptor);
+
+int convertToFenceFd(const android::hardware::hidl_handle& fence_handle, int* out_fence_fd);
+
+int convertToFenceHandle(int fence_fd, android::hardware::hidl_handle* out_fence_handle);
+
+std::optional<android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo>
+decodeBufferDescriptorInfo(const android::hardware::hidl_vec<uint32_t>& encoded);
+
+std::optional<android::hardware::hidl_vec<uint32_t>> encodeBufferDescriptorInfo(
+ const android::hardware::graphics::mapper::V3_0::IMapper::BufferDescriptorInfo& info);
--- /dev/null
+#
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+service vendor.graphics.allocator-3-0 /vendor/bin/hw/android.hardware.graphics.allocator@3.0-service.minigbm
+ interface android.hardware.graphics.allocator@3.0::IAllocator default
+ class hal animation
+ user system
+ group graphics drmrpc
+ capabilities SYS_NICE
+ onrestart restart surfaceflinger
+ writepid /dev/cpuset/system-background/tasks
-/*
- * Copyright 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+//
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
cc_binary {
name: "android.hardware.graphics.allocator@4.0-service.minigbm",
native_handle_t* importedBufferHandle = native_handle_clone(bufferHandle);
if (!importedBufferHandle) {
- drv_log("Failed to importBuffer. Handle clone failed.\n");
+ drv_log("Failed to importBuffer. Handle clone failed: %s.\n", strerror(errno));
hidlCb(Error::NO_RESOURCES, nullptr);
return Void();
}
PixelFormat pixelFormat = static_cast<PixelFormat>(crosHandle->droid_format);
status = android::gralloc4::encodePixelFormatRequested(pixelFormat, &encodedMetadata);
} else if (metadataType == android::gralloc4::MetadataType_PixelFormatFourCC) {
- status = android::gralloc4::encodePixelFormatFourCC(crosHandle->format, &encodedMetadata);
+ uint32_t format = crosHandle->format;
+ // Map internal fourcc codes back to standard fourcc codes.
+ if (format == DRM_FORMAT_YVU420_ANDROID) {
+ format = DRM_FORMAT_YVU420;
+ }
+ status = android::gralloc4::encodePixelFormatFourCC(format, &encodedMetadata);
} else if (metadataType == android::gralloc4::MetadataType_PixelFormatModifier) {
status = android::gralloc4::encodePixelFormatModifier(crosHandle->format_modifier,
&encodedMetadata);
{ DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 },
{ DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 },
{ DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 },
+ { DRM_FORMAT_XBGR2101010, __DRI_IMAGE_FORMAT_XBGR2101010 },
{ DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
+ { DRM_FORMAT_ABGR2101010, __DRI_IMAGE_FORMAT_ABGR2101010 },
};
static int drm_format_to_dri_format(uint32_t drm_format)
uint32_t width;
uint32_t height;
uint32_t format;
+ uint32_t tiling;
uint64_t use_flags;
};
#define VIRGL_BIND_LINEAR (1 << 22)
+#define VIRGL_BIND_SHARED_SUBFLAGS (0xff << 24)
+
+#define VIRGL_BIND_MINIGBM_CAMERA_WRITE (1 << 24)
+#define VIRGL_BIND_MINIGBM_CAMERA_READ (1 << 25)
+#define VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER (1 << 26)
+#define VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER (1 << 27)
+#define VIRGL_BIND_MINIGBM_SW_READ_OFTEN (1 << 28)
+#define VIRGL_BIND_MINIGBM_SW_READ_RARELY (1 << 29)
+#define VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN (1 << 30)
+#define VIRGL_BIND_MINIGBM_SW_WRITE_RARELY (1 << 31)
+#define VIRGL_BIND_MINIGBM_PROTECTED (0xf << 28) // Mutually exclusive with SW_ flags
+
struct virgl_caps_bool_set1 {
unsigned indep_blend_enable:1;
unsigned indep_blend_func:1;
--- /dev/null
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_PROTOCOL_H
+#define VIRGL_PROTOCOL_H
+
+#define VIRGL_QUERY_STATE_NEW 0
+#define VIRGL_QUERY_STATE_DONE 1
+#define VIRGL_QUERY_STATE_WAIT_HOST 2
+
+struct virgl_host_query_state {
+ uint32_t query_state;
+ uint32_t result_size;
+ uint64_t result;
+};
+
+enum virgl_object_type {
+ VIRGL_OBJECT_NULL,
+ VIRGL_OBJECT_BLEND,
+ VIRGL_OBJECT_RASTERIZER,
+ VIRGL_OBJECT_DSA,
+ VIRGL_OBJECT_SHADER,
+ VIRGL_OBJECT_VERTEX_ELEMENTS,
+ VIRGL_OBJECT_SAMPLER_VIEW,
+ VIRGL_OBJECT_SAMPLER_STATE,
+ VIRGL_OBJECT_SURFACE,
+ VIRGL_OBJECT_QUERY,
+ VIRGL_OBJECT_STREAMOUT_TARGET,
+ VIRGL_MAX_OBJECTS,
+};
+
+/* context cmds to be encoded in the command stream */
+enum virgl_context_cmd {
+ VIRGL_CCMD_NOP = 0,
+ VIRGL_CCMD_CREATE_OBJECT = 1,
+ VIRGL_CCMD_BIND_OBJECT,
+ VIRGL_CCMD_DESTROY_OBJECT,
+ VIRGL_CCMD_SET_VIEWPORT_STATE,
+ VIRGL_CCMD_SET_FRAMEBUFFER_STATE,
+ VIRGL_CCMD_SET_VERTEX_BUFFERS,
+ VIRGL_CCMD_CLEAR,
+ VIRGL_CCMD_DRAW_VBO,
+ VIRGL_CCMD_RESOURCE_INLINE_WRITE,
+ VIRGL_CCMD_SET_SAMPLER_VIEWS,
+ VIRGL_CCMD_SET_INDEX_BUFFER,
+ VIRGL_CCMD_SET_CONSTANT_BUFFER,
+ VIRGL_CCMD_SET_STENCIL_REF,
+ VIRGL_CCMD_SET_BLEND_COLOR,
+ VIRGL_CCMD_SET_SCISSOR_STATE,
+ VIRGL_CCMD_BLIT,
+ VIRGL_CCMD_RESOURCE_COPY_REGION,
+ VIRGL_CCMD_BIND_SAMPLER_STATES,
+ VIRGL_CCMD_BEGIN_QUERY,
+ VIRGL_CCMD_END_QUERY,
+ VIRGL_CCMD_GET_QUERY_RESULT,
+ VIRGL_CCMD_SET_POLYGON_STIPPLE,
+ VIRGL_CCMD_SET_CLIP_STATE,
+ VIRGL_CCMD_SET_SAMPLE_MASK,
+ VIRGL_CCMD_SET_STREAMOUT_TARGETS,
+ VIRGL_CCMD_SET_RENDER_CONDITION,
+ VIRGL_CCMD_SET_UNIFORM_BUFFER,
+
+ VIRGL_CCMD_SET_SUB_CTX,
+ VIRGL_CCMD_CREATE_SUB_CTX,
+ VIRGL_CCMD_DESTROY_SUB_CTX,
+ VIRGL_CCMD_BIND_SHADER,
+ VIRGL_CCMD_SET_TESS_STATE,
+ VIRGL_CCMD_SET_MIN_SAMPLES,
+ VIRGL_CCMD_SET_SHADER_BUFFERS,
+ VIRGL_CCMD_SET_SHADER_IMAGES,
+ VIRGL_CCMD_MEMORY_BARRIER,
+ VIRGL_CCMD_LAUNCH_GRID,
+ VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH,
+ VIRGL_CCMD_TEXTURE_BARRIER,
+ VIRGL_CCMD_SET_ATOMIC_BUFFERS,
+ VIRGL_CCMD_SET_DEBUG_FLAGS,
+ VIRGL_CCMD_GET_QUERY_RESULT_QBO,
+ VIRGL_CCMD_TRANSFER3D,
+ VIRGL_CCMD_END_TRANSFERS,
+ VIRGL_CCMD_COPY_TRANSFER3D,
+ VIRGL_CCMD_SET_TWEAKS,
+ VIRGL_CCMD_CLEAR_TEXTURE,
+ VIRGL_CCMD_PIPE_RESOURCE_CREATE,
+};
+
+/*
+ 8-bit cmd headers
+ 8-bit object type
+ 16-bit length
+*/
+
+#define VIRGL_CMD0(cmd, obj, len) ((cmd) | ((obj) << 8) | ((len) << 16))
+#define VIRGL_CMD0_MAX_DWORDS (((1ULL << 16) - 1) / 4) * 4
+
+/* hw specification */
+#define VIRGL_MAX_COLOR_BUFS 8
+#define VIRGL_MAX_CLIP_PLANES 8
+
+#define VIRGL_OBJ_CREATE_HEADER 0
+#define VIRGL_OBJ_CREATE_HANDLE 1
+
+#define VIRGL_OBJ_BIND_HEADER 0
+#define VIRGL_OBJ_BIND_HANDLE 1
+
+#define VIRGL_OBJ_DESTROY_HANDLE 1
+
+/* some of these defines are a specification - not used in the code */
+/* bit offsets for blend state object */
+#define VIRGL_OBJ_BLEND_SIZE (VIRGL_MAX_COLOR_BUFS + 3)
+#define VIRGL_OBJ_BLEND_HANDLE 1
+#define VIRGL_OBJ_BLEND_S0 2
+#define VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(x) ((x) & 0x1 << 0)
+#define VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_BLEND_S0_DITHER(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_BLEND_S1 3
+#define VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(x) (((x) & 0xf) << 0)
+/* repeated once per number of cbufs */
+
+#define VIRGL_OBJ_BLEND_S2(cbuf) (4 + (cbuf))
+#define VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(x) (((x) & 0x1f) << 4)
+#define VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(x) (((x) & 0x1f) << 9)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(x) (((x) & 0x7) << 14)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(x) (((x) & 0x1f) << 17)
+#define VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(x) (((x) & 0x1f) << 22)
+#define VIRGL_OBJ_BLEND_S2_RT_COLORMASK(x) (((x) & 0xf) << 27)
+
+/* bit offsets for DSA state */
+#define VIRGL_OBJ_DSA_SIZE 5
+#define VIRGL_OBJ_DSA_HANDLE 1
+#define VIRGL_OBJ_DSA_S0 2
+#define VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_DSA_S0_DEPTH_FUNC(x) (((x) & 0x7) << 2)
+#define VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(x) (((x) & 0x1) << 8)
+#define VIRGL_OBJ_DSA_S0_ALPHA_FUNC(x) (((x) & 0x7) << 9)
+#define VIRGL_OBJ_DSA_S1 3
+#define VIRGL_OBJ_DSA_S2 4
+#define VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FUNC(x) (((x) & 0x7) << 1)
+#define VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(x) (((x) & 0x7) << 4)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(x) (((x) & 0x7) << 7)
+#define VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(x) (((x) & 0xff) << 13)
+#define VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(x) (((x) & 0xff) << 21)
+#define VIRGL_OBJ_DSA_ALPHA_REF 5
+
+/* offsets for rasterizer state */
+#define VIRGL_OBJ_RS_SIZE 9
+#define VIRGL_OBJ_RS_HANDLE 1
+#define VIRGL_OBJ_RS_S0 2
+#define VIRGL_OBJ_RS_S0_FLATSHADE(x) (((x) & 0x1) << 0)
+#define VIRGL_OBJ_RS_S0_DEPTH_CLIP(x) (((x) & 0x1) << 1)
+#define VIRGL_OBJ_RS_S0_CLIP_HALFZ(x) (((x) & 0x1) << 2)
+#define VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(x) (((x) & 0x1) << 3)
+#define VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(x) (((x) & 0x1) << 4)
+#define VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(x) (((x) & 0x1) << 5)
+#define VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(x) (((x) & 0x1) << 6)
+#define VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(x) (((x) & 0x1) << 7)
+#define VIRGL_OBJ_RS_S0_CULL_FACE(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_RS_S0_FILL_FRONT(x) (((x) & 0x3) << 10)
+#define VIRGL_OBJ_RS_S0_FILL_BACK(x) (((x) & 0x3) << 12)
+#define VIRGL_OBJ_RS_S0_SCISSOR(x) (((x) & 0x1) << 14)
+#define VIRGL_OBJ_RS_S0_FRONT_CCW(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(x) (((x) & 0x1) << 16)
+#define VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(x) (((x) & 0x1) << 17)
+#define VIRGL_OBJ_RS_S0_OFFSET_LINE(x) (((x) & 0x1) << 18)
+#define VIRGL_OBJ_RS_S0_OFFSET_POINT(x) (((x) & 0x1) << 19)
+#define VIRGL_OBJ_RS_S0_OFFSET_TRI(x) (((x) & 0x1) << 20)
+#define VIRGL_OBJ_RS_S0_POLY_SMOOTH(x) (((x) & 0x1) << 21)
+#define VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(x) (((x) & 0x1) << 22)
+#define VIRGL_OBJ_RS_S0_POINT_SMOOTH(x) (((x) & 0x1) << 23)
+#define VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(x) (((x) & 0x1) << 24)
+#define VIRGL_OBJ_RS_S0_MULTISAMPLE(x) (((x) & 0x1) << 25)
+#define VIRGL_OBJ_RS_S0_LINE_SMOOTH(x) (((x) & 0x1) << 26)
+#define VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(x) (((x) & 0x1) << 27)
+#define VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(x) (((x) & 0x1) << 28)
+#define VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(x) (((x) & 0x1) << 29)
+#define VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(x) (((x) & 0x1) << 30)
+#define VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(x) (((x) & 0x1) << 31)
+
+#define VIRGL_OBJ_RS_POINT_SIZE 3
+#define VIRGL_OBJ_RS_SPRITE_COORD_ENABLE 4
+#define VIRGL_OBJ_RS_S3 5
+
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(x) (((x) & 0xffff) << 0)
+#define VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(x) (((x) & 0xff) << 16)
+#define VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(x) (((x) & 0xff) << 24)
+#define VIRGL_OBJ_RS_LINE_WIDTH 6
+#define VIRGL_OBJ_RS_OFFSET_UNITS 7
+#define VIRGL_OBJ_RS_OFFSET_SCALE 8
+#define VIRGL_OBJ_RS_OFFSET_CLAMP 9
+
+#define VIRGL_OBJ_CLEAR_SIZE 8
+#define VIRGL_OBJ_CLEAR_BUFFERS 1
+#define VIRGL_OBJ_CLEAR_COLOR_0 2 /* color is 4 * u32/f32/i32 */
+#define VIRGL_OBJ_CLEAR_COLOR_1 3
+#define VIRGL_OBJ_CLEAR_COLOR_2 4
+#define VIRGL_OBJ_CLEAR_COLOR_3 5
+#define VIRGL_OBJ_CLEAR_DEPTH_0 6 /* depth is a double precision float */
+#define VIRGL_OBJ_CLEAR_DEPTH_1 7
+#define VIRGL_OBJ_CLEAR_STENCIL 8
+
+/* shader object */
+#define VIRGL_OBJ_SHADER_HDR_SIZE(nso) (5 + ((nso) ? (2 * nso) + 4 : 0))
+#define VIRGL_OBJ_SHADER_HANDLE 1
+#define VIRGL_OBJ_SHADER_TYPE 2
+#define VIRGL_OBJ_SHADER_OFFSET 3
+#define VIRGL_OBJ_SHADER_OFFSET_VAL(x) (((x) & 0x7fffffff) << 0)
+/* start contains full length in VAL - also implies continuations */
+/* continuation contains offset in VAL */
+#define VIRGL_OBJ_SHADER_OFFSET_CONT (0x1u << 31)
+#define VIRGL_OBJ_SHADER_NUM_TOKENS 4
+#define VIRGL_OBJ_SHADER_SO_NUM_OUTPUTS 5
+#define VIRGL_OBJ_SHADER_SO_STRIDE(x) (6 + (x))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0(x) (10 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(x) (((x) & 0xff) << 0)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(x) (((x) & 0x3) << 8)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(x) (((x) & 0x7) << 10)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(x) (((x) & 0x7) << 13)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(x) (((x) & 0xffff) << 16)
+#define VIRGL_OBJ_SHADER_SO_OUTPUT0_SO(x) (11 + (x * 2))
+#define VIRGL_OBJ_SHADER_SO_OUTPUT_STREAM(x) (((x) & 0x03) << 0)
+
+/* viewport state */
+#define VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports) ((6 * num_viewports) + 1)
+#define VIRGL_SET_VIEWPORT_START_SLOT 1
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_0(x) (2 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_1(x) (3 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_SCALE_2(x) (4 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_0(x) (5 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_1(x) (6 + (x * 6))
+#define VIRGL_SET_VIEWPORT_STATE_TRANSLATE_2(x) (7 + (x * 6))
+
+/* framebuffer state */
+#define VIRGL_SET_FRAMEBUFFER_STATE_SIZE(nr_cbufs) (nr_cbufs + 2)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_CBUFS 1
+#define VIRGL_SET_FRAMEBUFFER_STATE_NR_ZSURF_HANDLE 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_CBUF_HANDLE(x) ((x) + 3)
+
+/* vertex elements object */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements) (((num_elements) * 4) + 1)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_HANDLE 1
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_OFFSET(x) (((x) * 4) + 2) /* repeated per VE */
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_INSTANCE_DIVISOR(x) (((x) * 4) + 3)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_VERTEX_BUFFER_INDEX(x) (((x) * 4) + 4)
+#define VIRGL_OBJ_VERTEX_ELEMENTS_V0_SRC_FORMAT(x) (((x) * 4) + 5)
+
+/* vertex buffers */
+#define VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers) ((num_buffers) * 3)
+#define VIRGL_SET_VERTEX_BUFFER_STRIDE(x) (((x) * 3) + 1)
+#define VIRGL_SET_VERTEX_BUFFER_OFFSET(x) (((x) * 3) + 2)
+#define VIRGL_SET_VERTEX_BUFFER_HANDLE(x) (((x) * 3) + 3)
+
+/* index buffer */
+#define VIRGL_SET_INDEX_BUFFER_SIZE(ib) (((ib) ? 2 : 0) + 1)
+#define VIRGL_SET_INDEX_BUFFER_HANDLE 1
+#define VIRGL_SET_INDEX_BUFFER_INDEX_SIZE 2 /* only if sending an IB handle */
+#define VIRGL_SET_INDEX_BUFFER_OFFSET 3 /* only if sending an IB handle */
+
+/* constant buffer */
+#define VIRGL_SET_CONSTANT_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_CONSTANT_BUFFER_INDEX 2
+#define VIRGL_SET_CONSTANT_BUFFER_DATA_START 3
+
+#define VIRGL_SET_UNIFORM_BUFFER_SIZE 5
+#define VIRGL_SET_UNIFORM_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_UNIFORM_BUFFER_INDEX 2
+#define VIRGL_SET_UNIFORM_BUFFER_OFFSET 3
+#define VIRGL_SET_UNIFORM_BUFFER_LENGTH 4
+#define VIRGL_SET_UNIFORM_BUFFER_RES_HANDLE 5
+
+/* draw VBO */
+#define VIRGL_DRAW_VBO_SIZE 12
+#define VIRGL_DRAW_VBO_SIZE_TESS 14
+#define VIRGL_DRAW_VBO_SIZE_INDIRECT 20
+#define VIRGL_DRAW_VBO_START 1
+#define VIRGL_DRAW_VBO_COUNT 2
+#define VIRGL_DRAW_VBO_MODE 3
+#define VIRGL_DRAW_VBO_INDEXED 4
+#define VIRGL_DRAW_VBO_INSTANCE_COUNT 5
+#define VIRGL_DRAW_VBO_INDEX_BIAS 6
+#define VIRGL_DRAW_VBO_START_INSTANCE 7
+#define VIRGL_DRAW_VBO_PRIMITIVE_RESTART 8
+#define VIRGL_DRAW_VBO_RESTART_INDEX 9
+#define VIRGL_DRAW_VBO_MIN_INDEX 10
+#define VIRGL_DRAW_VBO_MAX_INDEX 11
+#define VIRGL_DRAW_VBO_COUNT_FROM_SO 12
+/* tess packet */
+#define VIRGL_DRAW_VBO_VERTICES_PER_PATCH 13
+#define VIRGL_DRAW_VBO_DRAWID 14
+/* indirect packet */
+#define VIRGL_DRAW_VBO_INDIRECT_HANDLE 15
+#define VIRGL_DRAW_VBO_INDIRECT_OFFSET 16
+#define VIRGL_DRAW_VBO_INDIRECT_STRIDE 17
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT 18
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_OFFSET 19
+#define VIRGL_DRAW_VBO_INDIRECT_DRAW_COUNT_HANDLE 20
+
+/* create surface */
+#define VIRGL_OBJ_SURFACE_SIZE 5
+#define VIRGL_OBJ_SURFACE_HANDLE 1
+#define VIRGL_OBJ_SURFACE_RES_HANDLE 2
+#define VIRGL_OBJ_SURFACE_FORMAT 3
+#define VIRGL_OBJ_SURFACE_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SURFACE_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SURFACE_TEXTURE_LEVEL 4
+#define VIRGL_OBJ_SURFACE_TEXTURE_LAYERS 5
+
+/* create streamout target */
+#define VIRGL_OBJ_STREAMOUT_SIZE 4
+#define VIRGL_OBJ_STREAMOUT_HANDLE 1
+#define VIRGL_OBJ_STREAMOUT_RES_HANDLE 2
+#define VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET 3
+#define VIRGL_OBJ_STREAMOUT_BUFFER_SIZE 4
+
+/* sampler state */
+#define VIRGL_OBJ_SAMPLER_STATE_SIZE 9
+#define VIRGL_OBJ_SAMPLER_STATE_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_STATE_S0 2
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(x) (((x) & 0x3) << 9)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(x) (((x) & 0x3) << 11)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(x) (((x) & 0x3) << 13)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(x) (((x) & 0x1) << 15)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(x) (((x) & 0x7) << 16)
+#define VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(x) (((x) & 0x1) << 19)
+
+#define VIRGL_OBJ_SAMPLER_STATE_LOD_BIAS 3
+#define VIRGL_OBJ_SAMPLER_STATE_MIN_LOD 4
+#define VIRGL_OBJ_SAMPLER_STATE_MAX_LOD 5
+#define VIRGL_OBJ_SAMPLER_STATE_BORDER_COLOR(x) ((x) + 6) /* 6 - 9 */
+
+
+/* sampler view */
+#define VIRGL_OBJ_SAMPLER_VIEW_SIZE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_HANDLE 1
+#define VIRGL_OBJ_SAMPLER_VIEW_RES_HANDLE 2
+#define VIRGL_OBJ_SAMPLER_VIEW_FORMAT 3
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_FIRST_ELEMENT 4
+#define VIRGL_OBJ_SAMPLER_VIEW_BUFFER_LAST_ELEMENT 5
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LAYER 4
+#define VIRGL_OBJ_SAMPLER_VIEW_TEXTURE_LEVEL 5
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE 6
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(x) (((x) & 0x7) << 0)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(x) (((x) & 0x7) << 3)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(x) (((x) & 0x7) << 6)
+#define VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(x) (((x) & 0x7) << 9)
+
+/* set sampler views */
+#define VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views) ((num_views) + 2)
+#define VIRGL_SET_SAMPLER_VIEWS_SHADER_TYPE 1
+#define VIRGL_SET_SAMPLER_VIEWS_START_SLOT 2
+#define VIRGL_SET_SAMPLER_VIEWS_V0_HANDLE 3
+
+/* bind sampler states */
+#define VIRGL_BIND_SAMPLER_STATES(num_states) ((num_states) + 2)
+#define VIRGL_BIND_SAMPLER_STATES_SHADER_TYPE 1
+#define VIRGL_BIND_SAMPLER_STATES_START_SLOT 2
+#define VIRGL_BIND_SAMPLER_STATES_S0_HANDLE 3
+
+/* set stencil reference */
+#define VIRGL_SET_STENCIL_REF_SIZE 1
+#define VIRGL_SET_STENCIL_REF 1
+#define VIRGL_STENCIL_REF_VAL(f, s) ((f & 0xff) | (((s & 0xff) << 8)))
+
+/* set blend color */
+#define VIRGL_SET_BLEND_COLOR_SIZE 4
+#define VIRGL_SET_BLEND_COLOR(x) ((x) + 1)
+
+/* set scissor state */
+#define VIRGL_SET_SCISSOR_STATE_SIZE(x) (1 + 2 * x)
+#define VIRGL_SET_SCISSOR_START_SLOT 1
+#define VIRGL_SET_SCISSOR_MINX_MINY(x) (2 + (x * 2))
+#define VIRGL_SET_SCISSOR_MAXX_MAXY(x) (3 + (x * 2))
+
+/* resource copy region */
+#define VIRGL_CMD_RESOURCE_COPY_REGION_SIZE 13
+#define VIRGL_CMD_RCR_DST_RES_HANDLE 1
+#define VIRGL_CMD_RCR_DST_LEVEL 2
+#define VIRGL_CMD_RCR_DST_X 3
+#define VIRGL_CMD_RCR_DST_Y 4
+#define VIRGL_CMD_RCR_DST_Z 5
+#define VIRGL_CMD_RCR_SRC_RES_HANDLE 6
+#define VIRGL_CMD_RCR_SRC_LEVEL 7
+#define VIRGL_CMD_RCR_SRC_X 8
+#define VIRGL_CMD_RCR_SRC_Y 9
+#define VIRGL_CMD_RCR_SRC_Z 10
+#define VIRGL_CMD_RCR_SRC_W 11
+#define VIRGL_CMD_RCR_SRC_H 12
+#define VIRGL_CMD_RCR_SRC_D 13
+
+/* blit */
+#define VIRGL_CMD_BLIT_SIZE 21
+#define VIRGL_CMD_BLIT_S0 1
+#define VIRGL_CMD_BLIT_S0_MASK(x) (((x) & 0xff) << 0)
+#define VIRGL_CMD_BLIT_S0_FILTER(x) (((x) & 0x3) << 8)
+#define VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(x) (((x) & 0x1) << 10)
+#define VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(x) (((x) & 0x1) << 11)
+#define VIRGL_CMD_BLIT_S0_ALPHA_BLEND(x) (((x) & 0x1) << 12)
+#define VIRGL_CMD_BLIT_SCISSOR_MINX_MINY 2
+#define VIRGL_CMD_BLIT_SCISSOR_MAXX_MAXY 3
+#define VIRGL_CMD_BLIT_DST_RES_HANDLE 4
+#define VIRGL_CMD_BLIT_DST_LEVEL 5
+#define VIRGL_CMD_BLIT_DST_FORMAT 6
+#define VIRGL_CMD_BLIT_DST_X 7
+#define VIRGL_CMD_BLIT_DST_Y 8
+#define VIRGL_CMD_BLIT_DST_Z 9
+#define VIRGL_CMD_BLIT_DST_W 10
+#define VIRGL_CMD_BLIT_DST_H 11
+#define VIRGL_CMD_BLIT_DST_D 12
+#define VIRGL_CMD_BLIT_SRC_RES_HANDLE 13
+#define VIRGL_CMD_BLIT_SRC_LEVEL 14
+#define VIRGL_CMD_BLIT_SRC_FORMAT 15
+#define VIRGL_CMD_BLIT_SRC_X 16
+#define VIRGL_CMD_BLIT_SRC_Y 17
+#define VIRGL_CMD_BLIT_SRC_Z 18
+#define VIRGL_CMD_BLIT_SRC_W 19
+#define VIRGL_CMD_BLIT_SRC_H 20
+#define VIRGL_CMD_BLIT_SRC_D 21
+
+/* query object */
+#define VIRGL_OBJ_QUERY_SIZE 4
+#define VIRGL_OBJ_QUERY_HANDLE 1
+#define VIRGL_OBJ_QUERY_TYPE_INDEX 2
+#define VIRGL_OBJ_QUERY_TYPE(x) (x & 0xffff)
+#define VIRGL_OBJ_QUERY_INDEX(x) ((x & 0xffff) << 16)
+#define VIRGL_OBJ_QUERY_OFFSET 3
+#define VIRGL_OBJ_QUERY_RES_HANDLE 4
+
+#define VIRGL_QUERY_BEGIN_HANDLE 1
+
+#define VIRGL_QUERY_END_HANDLE 1
+
+#define VIRGL_QUERY_RESULT_SIZE 2
+#define VIRGL_QUERY_RESULT_HANDLE 1
+#define VIRGL_QUERY_RESULT_WAIT 2
+
+/* render condition */
+#define VIRGL_RENDER_CONDITION_SIZE 3
+#define VIRGL_RENDER_CONDITION_HANDLE 1
+#define VIRGL_RENDER_CONDITION_CONDITION 2
+#define VIRGL_RENDER_CONDITION_MODE 3
+
+/* resource inline write */
+#define VIRGL_RESOURCE_IW_RES_HANDLE 1
+#define VIRGL_RESOURCE_IW_LEVEL 2
+#define VIRGL_RESOURCE_IW_USAGE 3
+#define VIRGL_RESOURCE_IW_STRIDE 4
+#define VIRGL_RESOURCE_IW_LAYER_STRIDE 5
+#define VIRGL_RESOURCE_IW_X 6
+#define VIRGL_RESOURCE_IW_Y 7
+#define VIRGL_RESOURCE_IW_Z 8
+#define VIRGL_RESOURCE_IW_W 9
+#define VIRGL_RESOURCE_IW_H 10
+#define VIRGL_RESOURCE_IW_D 11
+#define VIRGL_RESOURCE_IW_DATA_START 12
+
+/* set streamout targets */
+#define VIRGL_SET_STREAMOUT_TARGETS_APPEND_BITMASK 1
+#define VIRGL_SET_STREAMOUT_TARGETS_H0 2
+
+/* set sample mask */
+#define VIRGL_SET_SAMPLE_MASK_SIZE 1
+#define VIRGL_SET_SAMPLE_MASK_MASK 1
+
+/* set clip state */
+#define VIRGL_SET_CLIP_STATE_SIZE 32
+#define VIRGL_SET_CLIP_STATE_C0 1
+
+/* polygon stipple */
+#define VIRGL_POLYGON_STIPPLE_SIZE 32
+#define VIRGL_POLYGON_STIPPLE_P0 1
+
+#define VIRGL_BIND_SHADER_SIZE 2
+#define VIRGL_BIND_SHADER_HANDLE 1
+#define VIRGL_BIND_SHADER_TYPE 2
+
+/* tess state */
+#define VIRGL_TESS_STATE_SIZE 6
+
+/* set min samples */
+#define VIRGL_SET_MIN_SAMPLES_SIZE 1
+#define VIRGL_SET_MIN_SAMPLES_MASK 1
+
+/* set shader buffers */
+#define VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE 3
+#define VIRGL_SET_SHADER_BUFFER_SIZE(x) (VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE * (x)) + 2
+#define VIRGL_SET_SHADER_BUFFER_SHADER_TYPE 1
+#define VIRGL_SET_SHADER_BUFFER_START_SLOT 2
+#define VIRGL_SET_SHADER_BUFFER_OFFSET(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 3)
+#define VIRGL_SET_SHADER_BUFFER_LENGTH(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 4)
+#define VIRGL_SET_SHADER_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 5)
+
+/* set shader images */
+#define VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE 5
+#define VIRGL_SET_SHADER_IMAGE_SIZE(x) (VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE * (x)) + 2
+#define VIRGL_SET_SHADER_IMAGE_SHADER_TYPE 1
+#define VIRGL_SET_SHADER_IMAGE_START_SLOT 2
+#define VIRGL_SET_SHADER_IMAGE_FORMAT(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 3)
+#define VIRGL_SET_SHADER_IMAGE_ACCESS(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 4)
+#define VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 5)
+#define VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 6)
+#define VIRGL_SET_SHADER_IMAGE_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 7)
+
+/* memory barrier */
+#define VIRGL_MEMORY_BARRIER_SIZE 1
+#define VIRGL_MEMORY_BARRIER_FLAGS 1
+
+/* launch grid */
+#define VIRGL_LAUNCH_GRID_SIZE 8
+#define VIRGL_LAUNCH_BLOCK_X 1
+#define VIRGL_LAUNCH_BLOCK_Y 2
+#define VIRGL_LAUNCH_BLOCK_Z 3
+#define VIRGL_LAUNCH_GRID_X 4
+#define VIRGL_LAUNCH_GRID_Y 5
+#define VIRGL_LAUNCH_GRID_Z 6
+#define VIRGL_LAUNCH_INDIRECT_HANDLE 7
+#define VIRGL_LAUNCH_INDIRECT_OFFSET 8
+
+/* framebuffer state no attachment */
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH_HEIGHT 1
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_WIDTH(x) (x & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_HEIGHT(x) ((x >> 16) & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS_SAMPLES 2
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_LAYERS(x) (x & 0xffff)
+#define VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SAMPLES(x) ((x >> 16) & 0xff)
+
+/* texture barrier */
+#define VIRGL_TEXTURE_BARRIER_SIZE 1
+#define VIRGL_TEXTURE_BARRIER_FLAGS 1
+
+/* hw atomics */
+#define VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE 3
+#define VIRGL_SET_ATOMIC_BUFFER_SIZE(x) (VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE * (x)) + 1
+#define VIRGL_SET_ATOMIC_BUFFER_START_SLOT 1
+#define VIRGL_SET_ATOMIC_BUFFER_OFFSET(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2)
+#define VIRGL_SET_ATOMIC_BUFFER_LENGTH(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3)
+#define VIRGL_SET_ATOMIC_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4)
+
+/* qbo */
+#define VIRGL_QUERY_RESULT_QBO_SIZE 6
+#define VIRGL_QUERY_RESULT_QBO_HANDLE 1
+#define VIRGL_QUERY_RESULT_QBO_QBO_HANDLE 2
+#define VIRGL_QUERY_RESULT_QBO_WAIT 3
+#define VIRGL_QUERY_RESULT_QBO_RESULT_TYPE 4
+#define VIRGL_QUERY_RESULT_QBO_OFFSET 5
+#define VIRGL_QUERY_RESULT_QBO_INDEX 6
+
+#define VIRGL_TRANSFER_TO_HOST 1
+#define VIRGL_TRANSFER_FROM_HOST 2
+
+/* Transfer */
+#define VIRGL_TRANSFER3D_SIZE 13
+/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
+#define VIRGL_TRANSFER3D_DATA_OFFSET 12
+#define VIRGL_TRANSFER3D_DIRECTION 13
+
+/* Copy transfer */
+#define VIRGL_COPY_TRANSFER3D_SIZE 14
+/* The first 11 dwords are the same as VIRGL_RESOURCE_IW_* */
+#define VIRGL_COPY_TRANSFER3D_SRC_RES_HANDLE 12
+#define VIRGL_COPY_TRANSFER3D_SRC_RES_OFFSET 13
+#define VIRGL_COPY_TRANSFER3D_SYNCHRONIZED 14
+
+/* set tweak flags */
+#define VIRGL_SET_TWEAKS_SIZE 2
+#define VIRGL_SET_TWEAKS_ID 1
+#define VIRGL_SET_TWEAKS_VALUE 2
+
+/* virgl create */
+#define VIRGL_PIPE_RES_CREATE_SIZE 11
+#define VIRGL_PIPE_RES_CREATE_TARGET 1
+#define VIRGL_PIPE_RES_CREATE_FORMAT 2
+#define VIRGL_PIPE_RES_CREATE_BIND 3
+#define VIRGL_PIPE_RES_CREATE_WIDTH 4
+#define VIRGL_PIPE_RES_CREATE_HEIGHT 5
+#define VIRGL_PIPE_RES_CREATE_DEPTH 6
+#define VIRGL_PIPE_RES_CREATE_ARRAY_SIZE 7
+#define VIRGL_PIPE_RES_CREATE_LAST_LEVEL 8
+#define VIRGL_PIPE_RES_CREATE_NR_SAMPLES 9
+#define VIRGL_PIPE_RES_CREATE_FLAGS 10
+#define VIRGL_PIPE_RES_CREATE_BLOB_ID 11
+
+enum vrend_tweak_type {
+ virgl_tweak_gles_brga_emulate,
+ virgl_tweak_gles_brga_apply_dest_swizzle,
+ virgl_tweak_gles_tf3_samples_passes_multiplier,
+ virgl_tweak_undefined
+};
+
+/* Clear texture */
+#define VIRGL_CLEAR_TEXTURE_SIZE 12
+#define VIRGL_TEXTURE_HANDLE 1
+#define VIRGL_TEXTURE_LEVEL 2
+#define VIRGL_TEXTURE_SRC_X 3
+#define VIRGL_TEXTURE_SRC_Y 4
+#define VIRGL_TEXTURE_SRC_Z 5
+#define VIRGL_TEXTURE_SRC_W 6
+#define VIRGL_TEXTURE_SRC_H 7
+#define VIRGL_TEXTURE_SRC_D 8
+#define VIRGL_TEXTURE_ARRAY_A 9
+#define VIRGL_TEXTURE_ARRAY_B 10
+#define VIRGL_TEXTURE_ARRAY_C 11
+#define VIRGL_TEXTURE_ARRAY_D 12
+
+#endif
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
__u32 res_handle;
__u32 size;
union {
- __u32 stride;
+ __u32 blob_mem;
__u32 strides[4]; /* strides[0] is accessible with stride. */
- };
+ };
__u32 num_planes;
__u32 offsets[4];
__u64 format_modifier;
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
#if defined(__cplusplus)
}
#endif
return bo;
}
+PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t transfer_flags, uint32_t *stride, void **map_data)
+{
+ return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, 0);
+}
+
PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
{
assert(bo);
return drv_bo_get_plane_fd(bo->bo, plane);
}
-PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane)
-{
- return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, plane);
-}
-
PUBLIC void *gbm_bo_map2(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
uint32_t transfer_flags, uint32_t *stride, void **map_data, int plane)
{
GBM_BO_TRANSFER_READ_WRITE = (GBM_BO_TRANSFER_READ | GBM_BO_TRANSFER_WRITE),
};
+void *
+gbm_bo_map(struct gbm_bo *bo,
+ uint32_t x, uint32_t y, uint32_t width, uint32_t height,
+ uint32_t flags, uint32_t *stride, void **map_data);
+
void
gbm_bo_unmap(struct gbm_bo *bo, void *map_data);
gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane);
void *
-gbm_bo_map(struct gbm_bo *bo,
- uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, uint32_t *stride, void **map_data, size_t plane);
-void *
gbm_bo_map2(struct gbm_bo *bo,
uint32_t x, uint32_t y, uint32_t width, uint32_t height,
uint32_t flags, uint32_t *stride, void **map_data, int plane);
bo->handles[plane].u32 = prime_handle.handle;
}
+ bo->meta.tiling = data->tiling;
return 0;
}
static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+static const uint64_t gen_modifier_order[] = { I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+static const uint64_t gen11_modifier_order[] = { I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR };
+
+struct modifier_support_t {
+ const uint64_t *order;
+ uint32_t count;
+};
+
struct i915_device {
uint32_t gen;
int32_t has_llc;
+ struct modifier_support_t modifier;
};
static uint32_t i915_get_gen(int device_id)
{
const uint16_t gen3_ids[] = { 0x2582, 0x2592, 0x2772, 0x27A2, 0x27AE,
0x29C2, 0x29B2, 0x29D2, 0xA001, 0xA011 };
+ const uint16_t gen11_ids[] = { 0x4E71, 0x4E61, 0x4E51, 0x4E55, 0x4E57 };
+
unsigned i;
for (i = 0; i < ARRAY_SIZE(gen3_ids); i++)
if (gen3_ids[i] == device_id)
return 3;
+ /* Gen 11 */
+ for (i = 0; i < ARRAY_SIZE(gen11_ids); i++)
+ if (gen11_ids[i] == device_id)
+ return 11;
return 4;
}
+static void i915_get_modifier_order(struct i915_device *i915)
+{
+ if (i915->gen == 11) {
+ i915->modifier.order = gen11_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen11_modifier_order);
+ } else {
+ i915->modifier.order = gen_modifier_order;
+ i915->modifier.count = ARRAY_SIZE(gen_modifier_order);
+ }
+}
+
static uint64_t unset_flags(uint64_t current_flags, uint64_t mask)
{
uint64_t value = current_flags & ~mask;
texture_only);
drv_modify_linear_combinations(drv);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
+
+ /* NV12 format for camera, display, decoding and encoding. */
/* IPU3 camera ISP supports only NV12 output. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER |
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
render = unset_flags(render, linear_mask);
scanout_and_render = unset_flags(scanout_and_render, linear_mask);
}
i915->gen = i915_get_gen(device_id);
+ i915_get_modifier_order(i915);
memset(&get_param, 0, sizeof(get_param));
get_param.param = I915_PARAM_HAS_LLC;
static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags, const uint64_t *modifiers, uint32_t count)
{
- static const uint64_t modifier_order[] = {
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- };
uint64_t modifier;
+ struct i915_device *i915 = bo->drv->priv;
+ bool huge_bo = (i915->gen <= 11) && (width > 4096);
if (modifiers) {
modifier =
- drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+ drv_pick_modifier(modifiers, count, i915->modifier.order, i915->modifier.count);
} else {
struct combination *combo = drv_get_combination(bo->drv, format, use_flags);
if (!combo)
modifier = combo->metadata.modifier;
}
+ /*
+ * i915 only supports linear/x-tiled above 4096 wide
+ */
+ if (huge_bo && modifier != I915_FORMAT_MOD_X_TILED && modifier != DRM_FORMAT_MOD_LINEAR) {
+ uint32_t i;
+ for (i = 0; modifiers && i < count; i++) {
+ if (modifiers[i] == I915_FORMAT_MOD_X_TILED)
+ break;
+ }
+ if (i == count)
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ else
+ modifier = I915_FORMAT_MOD_X_TILED;
+ }
+
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
bo->meta.tiling = I915_TILING_NONE;
drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_PROTECTED);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
metadata.modifier = DRM_FORMAT_MOD_LINEAR;
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER);
+#ifdef MTK_MT8183
+ // TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER);
+#endif
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB for input/output from
+ * hardware decoder/encoder.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
+ /* NV12 format for encoding and display. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER | BO_USE_CAMERA_READ |
+ BO_USE_CAMERA_WRITE);
#ifdef MTK_MT8183
/* Only for MT8183 Camera subsystem */
- drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_YUYV, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
- drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
/* Private formats for private reprocessing in camera */
drv_add_combination(drv, DRM_FORMAT_MTISP_SXYZW10, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK);
* reprocessing and hence given the private format for MTK. */
if (use_flags & BO_USE_CAMERA_READ)
return DRM_FORMAT_MTISP_SXYZW10;
- /* For non-reprocessing uses, only MT8183 Camera subsystem
- * requires NV12. */
- else if (use_flags & BO_USE_CAMERA_WRITE)
- return DRM_FORMAT_NV12;
#endif
+ if (use_flags & BO_USE_CAMERA_WRITE)
+ return DRM_FORMAT_NV12;
+
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
#ifdef MTK_MT8183
- /* MT8183 camera and decoder subsystems require NV12. */
- if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
- BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER)) {
+ // TODO(hiroh): Switch to use NV12 for video decoder on MT8173 as well.
+ if (use_flags & (BO_USE_HW_VIDEO_DECODER)) {
return DRM_FORMAT_NV12;
}
#endif
+ if (use_flags &
+ (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_ENCODER)) {
+ return DRM_FORMAT_NV12;
+ }
return DRM_FORMAT_YVU420;
default:
return format;
static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_BGR888, DRM_FORMAT_BGR565};
+ DRM_FORMAT_BGR888, DRM_FORMAT_BGR565 };
+
+static const uint32_t texture_only_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
static int meson_init(struct driver *drv)
{
drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
&LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
+ drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
+
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
+
return drv_modify_linear_combinations(drv);
}
#ifdef DRV_MSM
#include <assert.h>
+#include <dlfcn.h>
#include <drm_fourcc.h>
#include <errno.h>
#include <inttypes.h>
y_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
uv_stride = ALIGN(width, VENUS_STRIDE_ALIGN);
y_scanline = ALIGN(height, VENUS_SCANLINE_ALIGN * 2);
- uv_scanline = ALIGN(DIV_ROUND_UP(height, 2), VENUS_SCANLINE_ALIGN);
+ uv_scanline = ALIGN(DIV_ROUND_UP(height, 2),
+ VENUS_SCANLINE_ALIGN * (bo->meta.tiling ? 2 : 1));
y_plane = y_stride * y_scanline;
uv_plane = uv_stride * uv_scanline;
if (bo->meta.tiling == MSM_UBWC_TILING) {
+ y_plane = ALIGN(y_plane, PLANE_SIZE_ALIGN);
+ uv_plane = ALIGN(uv_plane, PLANE_SIZE_ALIGN);
y_plane += get_ubwc_meta_size(width, height, 32, 8);
uv_plane += get_ubwc_meta_size(width >> 1, height >> 1, 16, 8);
extra_padding = NV12_UBWC_PADDING(y_stride);
}
}
+/**
+ * Check for buggy apps that are known to not support modifiers, to avoid surprising them
+ * with a UBWC buffer.
+ */
+static bool should_avoid_ubwc(void)
+{
+#ifndef __ANDROID__
+ /* waffle is buggy and, requests a renderable buffer (which on qcom platforms, we
+ * want to use UBWC), and then passes it to the kernel discarding the modifier.
+ * So mesa ends up correctly rendering to as tiled+compressed, but kernel tries
+ * to display as linear. Other platforms do not see this issue, simply because
+ * they only use compressed (ex, AFBC) with the BO_USE_SCANOUT flag.
+ *
+ * See b/163137550
+ */
+ if (dlsym(RTLD_DEFAULT, "waffle_display_connect")) {
+ drv_log("WARNING: waffle detected, disabling UBWC\n");
+ return true;
+ }
+#endif
+ return false;
+}
+
static int msm_init(struct driver *drv)
{
struct format_metadata metadata;
uint64_t render_use_flags = BO_USE_RENDER_MASK | BO_USE_SCANOUT;
uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER;
- uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_OFTEN |
+ uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_MASK |
BO_USE_LINEAR | BO_USE_PROTECTED);
drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
&LINEAR_METADATA, texture_use_flags);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER);
-
/* The camera stack standardizes on NV12 for YUV buffers. */
+ /* YVU420 and NV12 formats for camera, display and encoding. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_ENCODER);
+
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
/* Android CTS tests require this. */
drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
drv_modify_linear_combinations(drv);
+ if (should_avoid_ubwc())
+ return 0;
+
metadata.tiling = MSM_UBWC_TILING;
metadata.priority = 2;
metadata.modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED;
static uint32_t msm_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
switch (format) {
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* Camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
+ /*HACK: See b/28671744 */
+ return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
return DRM_FORMAT_NV12;
default:
'(' -name '*.[ch]' -or -name '*.cc' ')' \
-not -name 'virtgpu_drm.h' \
-not -name 'gbm.h' -not -name 'virgl_hw.h' \
+ -not -name 'virgl_protocol.h' \
-exec clang-format -style=file -i {} +
drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
BO_USE_TEXTURE_MASK);
- /*
- * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the
- * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future.
- */
- drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
+ /* NV12 format for camera, display, decoding and encoding. */
/* Camera ISP supports only NV12 output. */
drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_SCANOUT);
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
+ BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
drv_modify_linear_combinations(drv);
/*
* R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
- * from camera.
+ * from camera and input/output from hardware decoder/encoder.
*/
drv_add_combination(drv, DRM_FORMAT_R8, &metadata,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK |
- BO_USE_LINEAR | BO_USE_PROTECTED);
+ BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
return 0;
}
#include "util.h"
static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888 };
+ DRM_FORMAT_XRGB8888 };
static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12,
- DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+ DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
static int synaptics_init(struct driver *drv)
{
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
if (ret) {
- drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->meta.total_size);
+ drv_log("DRM_IOCTL_VC4_CREATE_BO failed (size=%zu)\n", bo->meta.total_size);
return -errno;
}
#include <xf86drm.h>
#include "drv_priv.h"
+#include "external/virgl_hw.h"
+#include "external/virgl_protocol.h"
+#include "external/virtgpu_drm.h"
#include "helpers.h"
#include "util.h"
-#include "virgl_hw.h"
-#include "virtgpu_drm.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
enum feature_id {
feat_3d,
feat_capset_fix,
+ feat_resource_blob,
+ feat_host_visible,
+ feat_host_cross_device,
feat_max,
};
x, #x, 0 \
}
-static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
- FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
+static struct feature features[] = {
+ FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+ FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
+ FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
+};
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
+ if (use_flags & BO_USE_PROTECTED) {
+ handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
+ } else {
+ // Make sure we don't set both flags, since that could be mistaken for
+ // protected. Give OFTEN priority over RARELY.
+ if (use_flags & BO_USE_SW_READ_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_RARELY);
+ }
+ if (use_flags & BO_USE_SW_WRITE_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
+ }
+ }
- // All host drivers only support linear camera buffer formats. If
- // that changes, this will need to be modified.
- handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
+ handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
if (use_flags) {
drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
- drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
- drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ BO_USE_HW_VIDEO_ENCODER);
return drv_modify_linear_combinations(drv);
}
drv->priv = NULL;
}
+static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+ int ret;
+ uint32_t stride;
+ uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+
+ uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
+ }
+
+ stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
+ drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
+ bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
+ bo->meta.tiling = blob_flags;
+
+ cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
+ cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
+ cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
+ cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
+ cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
+ cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
+ cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
+
+ drm_rc_blob.cmd = (uint64_t)&cmd;
+ drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
+ drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+ drm_rc_blob.blob_flags = blob_flags;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+ return 0;
+}
+
+static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // TODO(gurchetansingh): remove once all minigbm users are blob-safe
+#ifndef VIRTIO_GPU_NEXT
+ return false;
+#endif
+
+ // Only use blob when host gbm is available
+ if (!priv->host_gbm_enabled)
+ return false;
+
+ // Focus on non-GPU apps for now
+ if (use_flags & (BO_USE_RENDERING | BO_USE_TEXTURE))
+ return false;
+
+ // Simple, strictly defined formats for now
+ if (format != DRM_FORMAT_YVU420_ANDROID && format != DRM_FORMAT_R8)
+ return false;
+
+ if (use_flags &
+ (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW))
+ return true;
+
+ return false;
+}
+
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
+ if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+ should_use_blob(bo->drv, format, use_flags))
+ return virtio_gpu_bo_create_blob(bo->drv, bo);
+
if (features[feat_3d].enabled)
return virtio_virgl_bo_create(bo, width, height, format, use_flags);
else
BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
return 0;
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
+
if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
// Unfortunately, the kernel doesn't actually pass the guest layer_stride
// and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
+
// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
// guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
// the level to work around this.