LOCAL_CPP_EXTENSION := .cc
LOCAL_SRC_FILES += \
- cros_gralloc/cros_alloc_device.cc \
+ cros_gralloc/cros_gralloc_buffer.cc \
+ cros_gralloc/cros_gralloc_driver.cc \
cros_gralloc/cros_gralloc_helpers.cc \
- cros_gralloc/cros_gralloc_module.cc
+ cros_gralloc/gralloc0/gralloc0.cc
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
MINIGBM_GRALLOC_MK := $(call my-dir)/Android.gralloc.mk
LOCAL_PATH := $(call my-dir)
intel_drivers := i915 i965
-include $(CLEAR_VARS)
-
-SUBDIRS := cros_gralloc
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libdrm
-LOCAL_SRC_FILES := \
+MINIGBM_SRC := \
amdgpu.c \
- cirrus.c \
drv.c \
evdi.c \
exynos.c \
- gma500.c \
+ helpers_array.c \
helpers.c \
i915.c \
marvell.c \
mediatek.c \
+ meson.c \
+ msm.c \
nouveau.c \
+ radeon.c \
rockchip.c \
tegra.c \
udl.c \
vgem.c \
virtio_gpu.c
-include $(MINIGBM_GRALLOC_MK)
-
-LOCAL_CPPFLAGS += -std=c++11 -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
-LOCAL_CFLAGS += -Wall -Wsign-compare -Wpointer-arith \
- -Wcast-qual -Wcast-align \
- -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64
+MINIGBM_CPPFLAGS := -std=c++14
+MINIGBM_CFLAGS := \
+ -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64 \
+ -Wall -Wsign-compare -Wpointer-arith \
+ -Wcast-qual -Wcast-align \
+ -Wno-unused-parameter
ifneq ($(filter $(intel_drivers), $(BOARD_GPU_DRIVERS)),)
-LOCAL_CPPFLAGS += -DDRV_I915
-LOCAL_CFLAGS += -DDRV_I915
+MINIGBM_CPPFLAGS += -DDRV_I915
+MINIGBM_CFLAGS += -DDRV_I915
LOCAL_SHARED_LIBRARIES += libdrm_intel
endif
+ifneq ($(filter meson, $(BOARD_GPU_DRIVERS)),)
+MINIGBM_CPPFLAGS += -DDRV_MESON
+MINIGBM_CFLAGS += -DDRV_MESON
+endif
+
+include $(CLEAR_VARS)
+
+SUBDIRS := cros_gralloc
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libdrm
+
+LOCAL_SRC_FILES := $(MINIGBM_SRC)
+
+include $(MINIGBM_GRALLOC_MK)
+
+LOCAL_CFLAGS := $(MINIGBM_CFLAGS)
+LOCAL_CPPFLAGS := $(MINIGBM_CPPFLAGS)
+
LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
LOCAL_MODULE_TAGS := optional
# The preferred path for vendor HALs is /vendor/lib/hw
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
LOCAL_MODULE_SUFFIX := $(TARGET_SHLIB_SUFFIX)
+LOCAL_HEADER_LIBRARIES += \
+ libhardware_headers libnativebase_headers libsystem_headers
+LOCAL_SHARED_LIBRARIES += libnativewindow libsync liblog
+LOCAL_STATIC_LIBRARIES += libarect
+include $(BUILD_SHARED_LIBRARY)
+
+
+include $(CLEAR_VARS)
+LOCAL_SHARED_LIBRARIES := liblog
+LOCAL_STATIC_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES += $(MINIGBM_SRC) gbm.c gbm_helpers.c
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+LOCAL_CFLAGS := $(MINIGBM_CFLAGS)
+LOCAL_CPPFLAGS := $(MINIGBM_CPPFLAGS)
+
+LOCAL_MODULE := libminigbm
+LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)
-#endif
+endif
value: "https://chromium.googlesource.com/chromiumos/platform/minigbm/"
}
version: ""
- last_upgrade_date { year: 2017 month: 6 day: 12 }
+ last_upgrade_date { year: 2018 month: 6 day: 25 }
license_type: NOTICE
}
ifdef DRV_AMDGPU
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_amdgpu)
- LDLIBS += -lamdgpuaddr
+ LDLIBS += -ldrm_amdgpu -ldl
endif
ifdef DRV_EXYNOS
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_exynos)
ifdef DRV_I915
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_intel)
endif
+ifdef DRV_MESON
+ CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_meson)
+endif
+ifdef DRV_RADEON
+ CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_radeon)
+endif
ifdef DRV_ROCKCHIP
CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_rockchip)
endif
+ifdef DRV_VC4
+ CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_vc4)
+endif
CPPFLAGS += $(PC_CFLAGS)
LDLIBS += $(PC_LIBS)
#include <sys/mman.h>
#include <xf86drm.h>
-#include "addrinterface.h"
+#include "dri.h"
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
-#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
-#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
+#ifdef __ANDROID__
+#define DRI_PATH "/vendor/lib/dri/radeonsi_dri.so"
+#else
+#define DRI_PATH "/usr/lib64/dri/radeonsi_dri.so"
#endif
-// clang-format off
-#define mmCC_RB_BACKEND_DISABLE 0x263d
-#define mmGB_TILE_MODE0 0x2644
-#define mmGB_MACROTILE_MODE0 0x2664
-#define mmGB_ADDR_CONFIG 0x263e
-#define mmMC_ARB_RAMCFG 0x9d8
-
-enum {
- FAMILY_UNKNOWN,
- FAMILY_SI,
- FAMILY_CI,
- FAMILY_KV,
- FAMILY_VI,
- FAMILY_CZ,
- FAMILY_PI,
- FAMILY_LAST,
+#define TILE_TYPE_LINEAR 0
+/* DRI backend decides tiling in this case. */
+#define TILE_TYPE_DRI 1
+
+struct amdgpu_priv {
+ struct dri_driver dri;
+ int drm_version;
};
-// clang-format on
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XBGR8888,
+const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
-const static uint32_t texture_source_formats[] = { DRM_FORMAT_NV21, DRM_FORMAT_NV12 };
+const static uint32_t texture_source_formats[] = { DRM_FORMAT_BGR888, DRM_FORMAT_GR88,
+ DRM_FORMAT_R8, DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12, DRM_FORMAT_YVU420_ANDROID };
-static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info)
+static int amdgpu_init(struct driver *drv)
{
- struct drm_amdgpu_gem_metadata args = { 0 };
-
- if (!info)
- return -EINVAL;
-
- args.handle = handle;
- args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
- args.data.flags = info->flags;
- args.data.tiling_info = info->tiling_info;
+ struct amdgpu_priv *priv;
+ drmVersionPtr drm_version;
+ struct format_metadata metadata;
+ uint64_t use_flags = BO_USE_RENDER_MASK;
- if (info->size_metadata > sizeof(args.data.data))
- return -EINVAL;
+ priv = calloc(1, sizeof(struct amdgpu_priv));
+ if (!priv)
+ return -ENOMEM;
- if (info->size_metadata) {
- args.data.data_size_bytes = info->size_metadata;
- memcpy(args.data.data, info->umd_metadata, info->size_metadata);
+ drm_version = drmGetVersion(drv_get_fd(drv));
+ if (!drm_version) {
+ free(priv);
+ return -ENODEV;
}
- return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args));
-}
-
-static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance,
- uint32_t flags, uint32_t *values)
-{
- struct drm_amdgpu_info request;
-
- memset(&request, 0, sizeof(request));
- request.return_pointer = (uintptr_t)values;
- request.return_size = count * sizeof(uint32_t);
- request.query = AMDGPU_INFO_READ_MMR_REG;
- request.read_mmr_reg.dword_offset = dword_offset;
- request.read_mmr_reg.count = count;
- request.read_mmr_reg.instance = instance;
- request.read_mmr_reg.flags = flags;
-
- return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info));
-}
-
-static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info)
-{
- int ret;
- uint32_t instance;
-
- if (!gpu_info)
- return -EINVAL;
-
- instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT;
-
- ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0,
- &gpu_info->backend_disable[0]);
- if (ret)
- return ret;
- /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */
- gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0,
- gpu_info->gb_macro_tile_mode);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg);
- if (ret)
- return ret;
-
- ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in)
-{
- return malloc(in->sizeInBytes);
-}
-
-static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in)
-{
- free(in->pVirtAddr);
- return ADDR_OK;
-}
-
-static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format,
- uint32_t usage, uint32_t *tiling_flags,
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out)
-{
- ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 };
- ADDR_TILEINFO addr_tile_info = { 0 };
- ADDR_TILEINFO addr_tile_info_out = { 0 };
- uint32_t bits_per_pixel;
-
- addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
-
- /* Set the requested tiling mode. */
- addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1;
- if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
- addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED;
- else if (width <= 16 || height <= 16)
- addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1;
-
- bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8;
- /* Bits per pixel should be calculated from format*/
- addr_surf_info_in.bpp = bits_per_pixel;
- addr_surf_info_in.numSamples = 1;
- addr_surf_info_in.width = width;
- addr_surf_info_in.height = height;
- addr_surf_info_in.numSlices = 1;
- addr_surf_info_in.pTileInfo = &addr_tile_info;
- addr_surf_info_in.tileIndex = -1;
-
- /* This disables incorrect calculations (hacks) in addrlib. */
- addr_surf_info_in.flags.noStencil = 1;
-
- /* Set the micro tile type. */
- if (usage & BO_USE_SCANOUT)
- addr_surf_info_in.tileType = ADDR_DISPLAYABLE;
- else
- addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE;
-
- addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
- addr_out->pTileInfo = &addr_tile_info_out;
-
- if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK)
- return -EINVAL;
-
- ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 };
- ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 };
- ADDR_TILEINFO s_tile_hw_info_out = { 0 };
-
- s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT);
- /* Convert from real value to HW value */
- s_in.reverse = 0;
- s_in.pTileInfo = &addr_tile_info_out;
- s_in.tileIndex = -1;
-
- s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT);
- s_out.pTileInfo = &s_tile_hw_info_out;
-
- if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK)
- return -EINVAL;
-
- if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1)
- /* 2D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4);
- else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1)
- /* 1D_TILED_THIN1 */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2);
- else
- /* LINEAR_ALIGNED */
- *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1);
-
- *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth));
- *tiling_flags |=
- AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight));
- *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes);
- *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT,
- drv_log_base2(addr_tile_info_out.macroAspectRatio));
- *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig);
- *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks);
-
- return 0;
-}
-
-static void *amdgpu_addrlib_init(int fd)
-{
- int ret;
- ADDR_CREATE_INPUT addr_create_input = { 0 };
- ADDR_CREATE_OUTPUT addr_create_output = { 0 };
- ADDR_REGISTER_VALUE reg_value = { 0 };
- ADDR_CREATE_FLAGS create_flags = { { 0 } };
- ADDR_E_RETURNCODE addr_ret;
-
- addr_create_input.size = sizeof(ADDR_CREATE_INPUT);
- addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT);
-
- struct amdgpu_gpu_info gpu_info = { 0 };
-
- ret = amdgpu_query_gpu(fd, &gpu_info);
+ priv->drm_version = drm_version->version_minor;
+ drmFreeVersion(drm_version);
- if (ret) {
- fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret);
- return NULL;
- }
+ drv->priv = priv;
- reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3;
- reg_value.gbAddrConfig = gpu_info.gb_addr_cfg;
- reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2;
-
- reg_value.backendDisables = gpu_info.backend_disable[0];
- reg_value.pTileConfig = gpu_info.gb_tile_mode;
- reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]);
- reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode;
- reg_value.noOfMacroEntries =
- sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]);
- create_flags.value = 0;
- create_flags.useTileIndex = 1;
-
- addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
-
- addr_create_input.chipFamily = FAMILY_CZ;
- addr_create_input.createFlags = create_flags;
- addr_create_input.callbacks.allocSysMem = alloc_sys_mem;
- addr_create_input.callbacks.freeSysMem = free_sys_mem;
- addr_create_input.callbacks.debugPrint = 0;
- addr_create_input.regValue = reg_value;
-
- addr_ret = AddrCreate(&addr_create_input, &addr_create_output);
-
- if (addr_ret != ADDR_OK) {
- fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret);
- return NULL;
+ if (dri_init(drv, DRI_PATH, "radeonsi")) {
+ free(priv);
+ drv->priv = NULL;
+ return -ENODEV;
}
- return addr_create_output.hLib;
-}
-
-static int amdgpu_init(struct driver *drv)
-{
- int ret;
- void *addrlib;
- struct format_metadata metadata;
- uint32_t flags = BO_USE_RENDER_MASK;
+ metadata.tiling = TILE_TYPE_LINEAR;
+ metadata.priority = 1;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- addrlib = amdgpu_addrlib_init(drv_get_fd(drv));
- if (!addrlib)
- return -1;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
- drv->priv = addrlib;
-
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
-
- drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT);
-
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 2;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
-
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &metadata, BO_USE_TEXTURE_MASK);
+ /* Linear formats supported by display. */
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED;
- metadata.priority = 3;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
-
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
-
- flags &= ~BO_USE_SW_WRITE_OFTEN;
- flags &= ~BO_USE_SW_READ_OFTEN;
- flags &= ~BO_USE_LINEAR;
-
- metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 4;
+ /* YUV formats for camera and display. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT);
+
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
+
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
+ /*
+ * The following formats will be allocated by the DRI backend and may be potentially tiled.
+ * Since format modifier support hasn't been implemented fully yet, it's not
+ * possible to enumerate the different types of buffers (like i915 can).
+ */
+ use_flags &= ~BO_USE_RENDERSCRIPT;
+ use_flags &= ~BO_USE_SW_WRITE_OFTEN;
+ use_flags &= ~BO_USE_SW_READ_OFTEN;
+ use_flags &= ~BO_USE_LINEAR;
+
+ metadata.tiling = TILE_TYPE_DRI;
+ metadata.priority = 2;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
- drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
- drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
+ /* Potentially tiled formats supported by display. */
+ drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
+ drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
-
- metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1;
- metadata.priority = 5;
-
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
-
- return ret;
+ return 0;
}
static void amdgpu_close(struct driver *drv)
{
- AddrDestroy(drv->priv);
+ dri_close(drv);
+ free(drv->priv);
drv->priv = NULL;
}
-static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t usage)
+static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
- void *addrlib = bo->drv->priv;
- union drm_amdgpu_gem_create gem_create;
- struct amdgpu_bo_metadata metadata = { 0 };
- ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 };
- uint32_t tiling_flags = 0;
- uint32_t gem_create_flags = 0;
- size_t plane;
int ret;
+ uint32_t plane, stride;
+ struct combination *combo;
+ union drm_amdgpu_gem_create gem_create;
+ struct amdgpu_priv *priv = bo->drv->priv;
- if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) {
- drv_bo_from_format(bo, ALIGN(width, 64), height, format);
- } else {
- if (amdgpu_addrlib_compute(addrlib, width, height, format, usage, &tiling_flags,
- &addr_out) < 0)
- return -EINVAL;
-
- bo->tiling = tiling_flags;
- /* RGB has 1 plane only */
- bo->offsets[0] = 0;
- bo->total_size = bo->sizes[0] = addr_out.surfSize;
- bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8);
- }
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ if (combo->metadata.tiling == TILE_TYPE_DRI)
+ return dri_bo_create(bo, width, height, format, use_flags);
- if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN |
- BO_USE_SW_WRITE_RARELY | BO_USE_SW_READ_RARELY))
- gem_create_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ stride = drv_stride_from_format(format, width, 0);
+ if (format == DRM_FORMAT_YVU420_ANDROID)
+ stride = ALIGN(stride, 128);
else
- gem_create_flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ stride = ALIGN(stride, 64);
- memset(&gem_create, 0, sizeof(gem_create));
+ drv_bo_from_format(bo, stride, height, format);
+ memset(&gem_create, 0, sizeof(gem_create));
gem_create.in.bo_size = bo->total_size;
- gem_create.in.alignment = addr_out.baseAlign;
- /* Set the placement. */
- gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM;
- gem_create.in.domain_flags = gem_create_flags;
+ gem_create.in.alignment = 256;
+ gem_create.in.domain_flags = 0;
+
+ if (use_flags & (BO_USE_LINEAR | BO_USE_SW))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
+ if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT)))
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ /* If drm_version >= 21 everything exposes explicit synchronization primitives
+ and chromeos/arc++ will use them. Disable implicit synchronization. */
+ if (priv->drm_version >= 21) {
+ gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+ }
+
/* Allocate the buffer with the preferred heap. */
ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
sizeof(gem_create));
-
if (ret < 0)
return ret;
- metadata.tiling_info = tiling_flags;
-
for (plane = 0; plane < bo->num_planes; plane++)
bo->handles[plane].u32 = gem_create.out.handle;
- ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata);
+ return 0;
+}
- return ret;
+static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
+{
+ struct combination *combo;
+ combo = drv_get_combination(bo->drv, data->format, data->use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ if (combo->metadata.tiling == TILE_TYPE_DRI)
+ return dri_bo_import(bo, data);
+ else
+ return drv_prime_bo_import(bo, data);
+}
+
+static int amdgpu_destroy_bo(struct bo *bo)
+{
+ if (bo->priv)
+ return dri_bo_destroy(bo);
+ else
+ return drv_gem_bo_destroy(bo);
}
-static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
union drm_amdgpu_gem_mmap gem_map;
+ if (bo->priv)
+ return dri_bo_map(bo, vma, plane, map_flags);
+
memset(&gem_map, 0, sizeof(gem_map));
gem_map.in.handle = bo->handles[plane].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
+ drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
return MAP_FAILED;
}
- data->length = bo->total_size;
- return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ vma->length = bo->total_size;
+
+ return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.out.addr_ptr);
}
-static uint32_t amdgpu_resolve_format(uint32_t format)
+static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
+{
+ if (bo->priv)
+ return dri_bo_unmap(bo, vma);
+ else
+ return munmap(vma->addr, vma->length);
+}
+
+static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
+ case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* Camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
+ /*HACK: See b/28671744 */
+ return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
return DRM_FORMAT_NV12;
default:
}
}
-struct backend backend_amdgpu = {
+const struct backend backend_amdgpu = {
.name = "amdgpu",
.init = amdgpu_init,
.close = amdgpu_close,
- .bo_create = amdgpu_bo_create,
- .bo_destroy = drv_gem_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = amdgpu_bo_map,
+ .bo_create = amdgpu_create_bo,
+ .bo_destroy = amdgpu_destroy_bo,
+ .bo_import = amdgpu_import_bo,
+ .bo_map = amdgpu_map_bo,
+ .bo_unmap = amdgpu_unmap_bo,
.resolve_format = amdgpu_resolve_format,
};
+++ /dev/null
-/*
- * Copyright 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888 };
-
-static int cirrus_init(struct driver *drv)
-{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
-
- return drv_modify_linear_combinations(drv);
-}
-
-struct backend backend_cirrus = {
- .name = "cirrus",
- .init = cirrus_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
-};
SRCS = $(wildcard *.cc)
SRCS += $(wildcard ../*.c)
+
+SRCS += $(wildcard gralloc0/*.cc)
+
SOURCES = $(filter-out ../gbm%, $(SRCS))
PKG_CONFIG ?= pkg-config
LIBDRM_LIBS := $(shell $(PKG_CONFIG) --libs libdrm)
CPPFLAGS += -Wall -fPIC -Werror -flto $(LIBDRM_CFLAGS)
-CXXFLAGS += -std=c++11
+CXXFLAGS += -std=c++14
CFLAGS += -std=c99
-LIBS += -shared -lcutils -lhardware $(LIBDRM_LIBS)
+LIBS += -shared -lcutils -lhardware -lsync $(LIBDRM_LIBS)
OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
+++ /dev/null
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "cros_gralloc.h"
-
-static struct cros_gralloc_bo *cros_gralloc_bo_create(struct driver *drv, int width, int height,
- int format, int usage)
-{
- uint64_t drv_usage;
- uint32_t drv_format;
- struct combination *combo;
- struct cros_gralloc_bo *bo;
-
- drv_format = cros_gralloc_convert_format(format);
- drv_format = drv_resolve_format(drv, drv_format);
- drv_usage = cros_gralloc_convert_flags(usage);
-
- combo = drv_get_combination(drv, drv_format, drv_usage);
-
- if (!combo && (usage & GRALLOC_USAGE_HW_COMPOSER)) {
- drv_usage &= ~BO_USE_SCANOUT;
- combo = drv_get_combination(drv, drv_format, drv_usage);
- }
-
- if (!combo) {
- cros_gralloc_error("Unsupported combination -- HAL format: %u, HAL flags: %u, "
- "drv_format: %4.4s, drv_flags: %llu",
- format, usage, reinterpret_cast<char *>(&drv_format),
- static_cast<unsigned long long>(drv_usage));
- return NULL;
- }
-
- bo = new cros_gralloc_bo();
-
- bo->bo = drv_bo_create(drv, width, height, drv_format, drv_usage);
- if (!bo->bo) {
- delete bo;
- cros_gralloc_error("Failed to create bo.");
- return NULL;
- }
-
- /*
- * If there is a desire for more than one kernel buffer, this can be
- * removed once the ArcCodec and Wayland service have the ability to
- * send more than one fd. GL/Vulkan drivers may also have to modified.
- */
- if (drv_num_buffers_per_bo(bo->bo) != 1) {
- drv_bo_destroy(bo->bo);
- delete bo;
- cros_gralloc_error("Can only support one buffer per bo.");
- return NULL;
- }
-
- bo->refcount = 1;
-
- return bo;
-}
-
-static struct cros_gralloc_handle *cros_gralloc_handle_from_bo(struct bo *bo)
-{
- uint64_t mod;
- size_t num_planes;
- struct cros_gralloc_handle *hnd;
-
- hnd = new cros_gralloc_handle();
-
- num_planes = drv_bo_get_num_planes(bo);
-
- hnd->base.version = sizeof(hnd->base);
- hnd->base.numFds = num_planes;
- hnd->base.numInts = handle_data_size - num_planes;
-
- for (size_t p = 0; p < num_planes; p++) {
- hnd->fds[p] = drv_bo_get_plane_fd(bo, p);
- hnd->strides[p] = drv_bo_get_plane_stride(bo, p);
- hnd->offsets[p] = drv_bo_get_plane_offset(bo, p);
- hnd->sizes[p] = drv_bo_get_plane_size(bo, p);
-
- mod = drv_bo_get_plane_format_modifier(bo, p);
- hnd->format_modifiers[2 * p] = static_cast<uint32_t>(mod >> 32);
- hnd->format_modifiers[2 * p + 1] = static_cast<uint32_t>(mod);
- }
-
- hnd->width = drv_bo_get_width(bo);
- hnd->height = drv_bo_get_height(bo);
- hnd->format = drv_bo_get_format(bo);
- hnd->pixel_stride = drv_bo_get_stride_in_pixels(bo);
-
- hnd->magic = cros_gralloc_magic;
-
- return hnd;
-}
-
-static int cros_gralloc_alloc(alloc_device_t *dev, int w, int h, int format, int usage,
- buffer_handle_t *handle, int *stride)
-{
- auto mod = (struct cros_gralloc_module *)dev->common.module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- auto bo = cros_gralloc_bo_create(mod->drv, w, h, format, usage);
- if (!bo)
- return CROS_GRALLOC_ERROR_NO_RESOURCES;
-
- auto hnd = cros_gralloc_handle_from_bo(bo->bo);
- hnd->droid_format = static_cast<int32_t>(format);
- hnd->usage = static_cast<int32_t>(usage);
-
- mod->handles[hnd].registrations = 0;
- mod->handles[hnd].bo = bo;
- bo->hnd = hnd;
-
- mod->buffers[drv_bo_get_plane_handle(bo->bo, 0).u32] = bo;
-
- *stride = static_cast<int>(hnd->pixel_stride);
- *handle = &hnd->base;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_free(alloc_device_t *dev, buffer_handle_t handle)
-{
- struct cros_gralloc_bo *bo;
- auto hnd = (struct cros_gralloc_handle *)handle;
- auto mod = (struct cros_gralloc_module *)dev->common.module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (mod->handles[hnd].registrations > 0) {
- cros_gralloc_error("Deallocating before unregistering.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- return cros_gralloc_decrement_reference_count(mod, bo);
-}
-
-static int cros_gralloc_close(struct hw_device_t *dev)
-{
- auto mod = (struct cros_gralloc_module *)dev->module;
- auto alloc = (struct alloc_device_t *)dev;
-
- if (mod->drv) {
- drv_destroy(mod->drv);
- mod->drv = NULL;
- }
-
- mod->buffers.clear();
- mod->handles.clear();
-
- delete alloc;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-int cros_gralloc_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev)
-{
- auto module = (struct cros_gralloc_module *)mod;
- std::lock_guard<std::mutex> lock(module->mutex);
-
- if (module->drv)
- return CROS_GRALLOC_ERROR_NONE;
-
- if (strcmp(name, GRALLOC_HARDWARE_GPU0)) {
- cros_gralloc_error("Incorrect device name - %s.", name);
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
-
- if (cros_gralloc_rendernode_open(&module->drv)) {
- cros_gralloc_error("Failed to open render node.");
- return CROS_GRALLOC_ERROR_NO_RESOURCES;
- }
-
- auto alloc = new alloc_device_t();
-
- alloc->alloc = cros_gralloc_alloc;
- alloc->free = cros_gralloc_free;
- alloc->common.tag = HARDWARE_DEVICE_TAG;
- alloc->common.version = 0;
- alloc->common.module = (hw_module_t *)mod;
- alloc->common.close = cros_gralloc_close;
-
- *dev = &alloc->common;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
+++ /dev/null
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef GBM_GRALLOC_H
-#define GBM_GRALLOC_H
-
-#include "cros_gralloc_helpers.h"
-
-#include <mutex>
-#include <unordered_map>
-#include <unordered_set>
-
-struct cros_gralloc_bo {
- struct bo *bo;
- int32_t refcount;
- struct cros_gralloc_handle *hnd;
- struct map_info *map_data;
- int32_t lockcount;
-};
-
-struct handle_info {
- cros_gralloc_bo *bo;
- int32_t registrations;
-};
-
-struct cros_gralloc_module {
- gralloc_module_t base;
- struct driver *drv;
- std::mutex mutex;
- std::unordered_map<cros_gralloc_handle *, handle_info> handles;
- std::unordered_map<uint32_t, cros_gralloc_bo *> buffers;
-};
-
-int cros_gralloc_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev);
-
-int cros_gralloc_validate_reference(struct cros_gralloc_module *mod,
- struct cros_gralloc_handle *hnd, struct cros_gralloc_bo **obj);
-
-int cros_gralloc_decrement_reference_count(struct cros_gralloc_module *mod,
- struct cros_gralloc_bo *obj);
-
-#endif
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc_buffer.h"
+
+#include <assert.h>
+#include <sys/mman.h>
+
+cros_gralloc_buffer::cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo,
+ struct cros_gralloc_handle *acquire_handle)
+ : id_(id), bo_(acquire_bo), hnd_(acquire_handle), refcount_(1), lockcount_(0)
+{
+ assert(bo_);
+ num_planes_ = drv_bo_get_num_planes(bo_);
+ for (uint32_t plane = 0; plane < num_planes_; plane++)
+ lock_data_[plane] = nullptr;
+}
+
+cros_gralloc_buffer::~cros_gralloc_buffer()
+{
+ drv_bo_destroy(bo_);
+ if (hnd_) {
+ native_handle_close(&hnd_->base);
+ delete hnd_;
+ }
+}
+
+uint32_t cros_gralloc_buffer::get_id() const
+{
+ return id_;
+}
+
+int32_t cros_gralloc_buffer::increase_refcount()
+{
+ return ++refcount_;
+}
+
+int32_t cros_gralloc_buffer::decrease_refcount()
+{
+ assert(refcount_ > 0);
+ return --refcount_;
+}
+
+int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_flags,
+ uint8_t *addr[DRV_MAX_PLANES])
+{
+ void *vaddr = nullptr;
+
+ memset(addr, 0, DRV_MAX_PLANES * sizeof(*addr));
+
+ /*
+ * Gralloc consumers don't support more than one kernel buffer per buffer object yet, so
+ * just use the first kernel buffer.
+ */
+ if (drv_num_buffers_per_bo(bo_) != 1) {
+ drv_log("Can only support one buffer per bo.\n");
+ return -EINVAL;
+ }
+
+ if (map_flags) {
+ if (lock_data_[0]) {
+ drv_bo_invalidate(bo_, lock_data_[0]);
+ vaddr = lock_data_[0]->vma->addr;
+ } else {
+ vaddr = drv_bo_map(bo_, rect, map_flags, &lock_data_[0], 0);
+ }
+
+ if (vaddr == MAP_FAILED) {
+ drv_log("Mapping failed.\n");
+ return -EFAULT;
+ }
+ }
+
+ for (uint32_t plane = 0; plane < num_planes_; plane++)
+ addr[plane] = static_cast<uint8_t *>(vaddr) + drv_bo_get_plane_offset(bo_, plane);
+
+ lockcount_++;
+ return 0;
+}
+
+int32_t cros_gralloc_buffer::unlock()
+{
+ if (lockcount_ <= 0) {
+ drv_log("Buffer was not locked.\n");
+ return -EINVAL;
+ }
+
+ if (!--lockcount_) {
+ if (lock_data_[0]) {
+ drv_bo_flush_or_unmap(bo_, lock_data_[0]);
+ lock_data_[0] = nullptr;
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_BUFFER_H
+#define CROS_GRALLOC_BUFFER_H
+
+#include "../drv.h"
+#include "cros_gralloc_helpers.h"
+
+class cros_gralloc_buffer
+{
+ public:
+ cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo,
+ struct cros_gralloc_handle *acquire_handle);
+ ~cros_gralloc_buffer();
+
+ uint32_t get_id() const;
+
+ /* The new reference count is returned by both these functions. */
+ int32_t increase_refcount();
+ int32_t decrease_refcount();
+
+ int32_t lock(const struct rectangle *rect, uint32_t map_flags,
+ uint8_t *addr[DRV_MAX_PLANES]);
+ int32_t unlock();
+
+ private:
+ cros_gralloc_buffer(cros_gralloc_buffer const &);
+ cros_gralloc_buffer operator=(cros_gralloc_buffer const &);
+
+ uint32_t id_;
+ struct bo *bo_;
+ struct cros_gralloc_handle *hnd_;
+
+ int32_t refcount_;
+ int32_t lockcount_;
+ uint32_t num_planes_;
+
+ struct mapping *lock_data_[DRV_MAX_PLANES];
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "cros_gralloc_driver.h"
+#include "../util.h"
+
+#include <cstdlib>
+#include <fcntl.h>
+#include <xf86drm.h>
+
+cros_gralloc_driver::cros_gralloc_driver() : drv_(nullptr)
+{
+}
+
+cros_gralloc_driver::~cros_gralloc_driver()
+{
+ buffers_.clear();
+ handles_.clear();
+
+ if (drv_) {
+ drv_destroy(drv_);
+ drv_ = nullptr;
+ }
+}
+
+int32_t cros_gralloc_driver::init()
+{
+ /*
+ * Create a driver from rendernode while filtering out
+ * the specified undesired driver.
+ *
+ * TODO(gsingh): Enable render nodes on udl/evdi.
+ */
+
+ int fd;
+ drmVersionPtr version;
+ char const *str = "%s/renderD%d";
+ const char *undesired[2] = { "vgem", nullptr };
+ uint32_t num_nodes = 63;
+ uint32_t min_node = 128;
+ uint32_t max_node = (min_node + num_nodes);
+
+ for (uint32_t i = 0; i < ARRAY_SIZE(undesired); i++) {
+ for (uint32_t j = min_node; j < max_node; j++) {
+ char *node;
+ if (asprintf(&node, str, DRM_DIR_NAME, j) < 0)
+ continue;
+
+ fd = open(node, O_RDWR, 0);
+ free(node);
+
+ if (fd < 0)
+ continue;
+
+ version = drmGetVersion(fd);
+ if (!version)
+ continue;
+
+ if (undesired[i] && !strcmp(version->name, undesired[i])) {
+ drmFreeVersion(version);
+ continue;
+ }
+
+ drmFreeVersion(version);
+ drv_ = drv_create(fd);
+ if (drv_)
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+bool cros_gralloc_driver::is_supported(const struct cros_gralloc_buffer_descriptor *descriptor)
+{
+ struct combination *combo;
+ uint32_t resolved_format;
+ resolved_format = drv_resolve_format(drv_, descriptor->drm_format, descriptor->use_flags);
+ combo = drv_get_combination(drv_, resolved_format, descriptor->use_flags);
+ return (combo != nullptr);
+}
+
+int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descriptor *descriptor,
+ buffer_handle_t *out_handle)
+{
+ uint32_t id;
+ uint64_t mod;
+ size_t num_planes;
+ uint32_t resolved_format;
+ uint32_t bytes_per_pixel;
+ uint64_t use_flags;
+
+ struct bo *bo;
+ struct cros_gralloc_handle *hnd;
+
+ resolved_format = drv_resolve_format(drv_, descriptor->drm_format, descriptor->use_flags);
+ use_flags = descriptor->use_flags;
+ /*
+ * TODO(b/79682290): ARC++ assumes NV12 is always linear and doesn't
+ * send modifiers across Wayland protocol, so we or in the
+ * BO_USE_LINEAR flag here. We need to fix ARC++ to allocate and work
+ * with tiled buffers.
+ */
+ if (resolved_format == DRM_FORMAT_NV12)
+ use_flags |= BO_USE_LINEAR;
+
+ bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format, use_flags);
+ if (!bo) {
+ drv_log("Failed to create bo.\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * If there is a desire for more than one kernel buffer, this can be
+ * removed once the ArcCodec and Wayland service have the ability to
+ * send more than one fd. GL/Vulkan drivers may also have to modified.
+ */
+ if (drv_num_buffers_per_bo(bo) != 1) {
+ drv_bo_destroy(bo);
+ drv_log("Can only support one buffer per bo.\n");
+ return -EINVAL;
+ }
+
+ hnd = new cros_gralloc_handle();
+ num_planes = drv_bo_get_num_planes(bo);
+
+ hnd->base.version = sizeof(hnd->base);
+ hnd->base.numFds = num_planes;
+ hnd->base.numInts = handle_data_size - num_planes;
+
+ for (size_t plane = 0; plane < num_planes; plane++) {
+ hnd->fds[plane] = drv_bo_get_plane_fd(bo, plane);
+ hnd->strides[plane] = drv_bo_get_plane_stride(bo, plane);
+ hnd->offsets[plane] = drv_bo_get_plane_offset(bo, plane);
+
+ mod = drv_bo_get_plane_format_modifier(bo, plane);
+ hnd->format_modifiers[2 * plane] = static_cast<uint32_t>(mod >> 32);
+ hnd->format_modifiers[2 * plane + 1] = static_cast<uint32_t>(mod);
+ }
+
+ hnd->width = drv_bo_get_width(bo);
+ hnd->height = drv_bo_get_height(bo);
+ hnd->format = drv_bo_get_format(bo);
+ hnd->use_flags[0] = static_cast<uint32_t>(descriptor->use_flags >> 32);
+ hnd->use_flags[1] = static_cast<uint32_t>(descriptor->use_flags);
+ bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0);
+ hnd->pixel_stride = DIV_ROUND_UP(hnd->strides[0], bytes_per_pixel);
+ hnd->magic = cros_gralloc_magic;
+ hnd->droid_format = descriptor->droid_format;
+ hnd->usage = descriptor->producer_usage;
+
+ id = drv_bo_get_plane_handle(bo, 0).u32;
+ auto buffer = new cros_gralloc_buffer(id, bo, hnd);
+
+ std::lock_guard<std::mutex> lock(mutex_);
+ buffers_.emplace(id, buffer);
+ handles_.emplace(hnd, std::make_pair(buffer, 1));
+ *out_handle = &hnd->base;
+ return 0;
+}
+
+int32_t cros_gralloc_driver::retain(buffer_handle_t handle)
+{
+ uint32_t id;
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (buffer) {
+ handles_[hnd].second++;
+ buffer->increase_refcount();
+ return 0;
+ }
+
+ if (drmPrimeFDToHandle(drv_get_fd(drv_), hnd->fds[0], &id)) {
+ drv_log("drmPrimeFDToHandle failed.\n");
+ return -errno;
+ }
+
+ if (buffers_.count(id)) {
+ buffer = buffers_[id];
+ buffer->increase_refcount();
+ } else {
+ struct bo *bo;
+ struct drv_import_fd_data data;
+ data.format = hnd->format;
+ data.width = hnd->width;
+ data.height = hnd->height;
+ data.use_flags = static_cast<uint64_t>(hnd->use_flags[0]) << 32;
+ data.use_flags |= hnd->use_flags[1];
+
+ memcpy(data.fds, hnd->fds, sizeof(data.fds));
+ memcpy(data.strides, hnd->strides, sizeof(data.strides));
+ memcpy(data.offsets, hnd->offsets, sizeof(data.offsets));
+ for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++) {
+ data.format_modifiers[plane] =
+ static_cast<uint64_t>(hnd->format_modifiers[2 * plane]) << 32;
+ data.format_modifiers[plane] |= hnd->format_modifiers[2 * plane + 1];
+ }
+
+ bo = drv_bo_import(drv_, &data);
+ if (!bo)
+ return -EFAULT;
+
+ id = drv_bo_get_plane_handle(bo, 0).u32;
+
+ buffer = new cros_gralloc_buffer(id, bo, nullptr);
+ buffers_.emplace(id, buffer);
+ }
+
+ handles_.emplace(hnd, std::make_pair(buffer, 1));
+ return 0;
+}
+
+int32_t cros_gralloc_driver::release(buffer_handle_t handle)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ if (!--handles_[hnd].second)
+ handles_.erase(hnd);
+
+ if (buffer->decrease_refcount() == 0) {
+ buffers_.erase(buffer->get_id());
+ delete buffer;
+ }
+
+ return 0;
+}
+
+int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence,
+ const struct rectangle *rect, uint32_t map_flags,
+ uint8_t *addr[DRV_MAX_PLANES])
+{
+ int32_t ret = cros_gralloc_sync_wait(acquire_fence);
+ if (ret)
+ return ret;
+
+ std::lock_guard<std::mutex> lock(mutex_);
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ return buffer->lock(rect, map_flags, addr);
+}
+
+int32_t cros_gralloc_driver::unlock(buffer_handle_t handle, int32_t *release_fence)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * From the ANativeWindow::dequeueBuffer documentation:
+ *
+ * "A value of -1 indicates that the caller may access the buffer immediately without
+ * waiting on a fence."
+ */
+ *release_fence = -1;
+ return buffer->unlock();
+}
+
+int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t *out_store)
+{
+ std::lock_guard<std::mutex> lock(mutex_);
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ auto buffer = get_buffer(hnd);
+ if (!buffer) {
+ drv_log("Invalid Reference.\n");
+ return -EINVAL;
+ }
+
+ *out_store = static_cast<uint64_t>(buffer->get_id());
+ return 0;
+}
+
+cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd)
+{
+ /* Assumes driver mutex is held. */
+ if (handles_.count(hnd))
+ return handles_[hnd].first;
+
+ return nullptr;
+}
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_DRIVER_H
+#define CROS_GRALLOC_DRIVER_H
+
+#include "cros_gralloc_buffer.h"
+
+#include <mutex>
+#include <unordered_map>
+
+class cros_gralloc_driver
+{
+ public:
+ cros_gralloc_driver();
+ ~cros_gralloc_driver();
+
+ int32_t init();
+ bool is_supported(const struct cros_gralloc_buffer_descriptor *descriptor);
+ int32_t allocate(const struct cros_gralloc_buffer_descriptor *descriptor,
+ buffer_handle_t *out_handle);
+
+ int32_t retain(buffer_handle_t handle);
+ int32_t release(buffer_handle_t handle);
+
+ int32_t lock(buffer_handle_t handle, int32_t acquire_fence, const struct rectangle *rect,
+ uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]);
+ int32_t unlock(buffer_handle_t handle, int32_t *release_fence);
+
+ int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store);
+
+ private:
+ cros_gralloc_driver(cros_gralloc_driver const &);
+ cros_gralloc_driver operator=(cros_gralloc_driver const &);
+ cros_gralloc_buffer *get_buffer(cros_gralloc_handle_t hnd);
+
+ struct driver *drv_;
+ std::mutex mutex_;
+ std::unordered_map<uint32_t, cros_gralloc_buffer *> buffers_;
+ std::unordered_map<cros_gralloc_handle_t, std::pair<cros_gralloc_buffer *, int32_t>>
+ handles_;
+};
+
+#endif
int32_t fds[DRV_MAX_PLANES];
uint32_t strides[DRV_MAX_PLANES];
uint32_t offsets[DRV_MAX_PLANES];
- uint32_t sizes[DRV_MAX_PLANES];
uint32_t format_modifiers[2 * DRV_MAX_PLANES];
uint32_t width;
uint32_t height;
- uint32_t format; /* DRM format */
+ uint32_t format; /* DRM format */
+ uint32_t use_flags[2]; /* Buffer creation flags */
uint32_t magic;
uint32_t pixel_stride;
int32_t droid_format;
int32_t usage; /* Android usage. */
};
+typedef const struct cros_gralloc_handle *cros_gralloc_handle_t;
+
#endif
#include "cros_gralloc_helpers.h"
-#include <cstdlib>
-#include <cutils/log.h>
-#include <fcntl.h>
-#include <xf86drm.h>
-
-uint64_t cros_gralloc_convert_flags(int flags)
-{
- uint64_t usage = BO_USE_NONE;
-
- if (flags & GRALLOC_USAGE_CURSOR)
- usage |= BO_USE_NONE;
- if ((flags & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_RARELY)
- usage |= BO_USE_SW_READ_RARELY;
- if ((flags & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
- usage |= BO_USE_SW_READ_OFTEN;
- if ((flags & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_RARELY)
- usage |= BO_USE_SW_WRITE_RARELY;
- if ((flags & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_OFTEN)
- usage |= BO_USE_SW_WRITE_OFTEN;
- if (flags & GRALLOC_USAGE_HW_TEXTURE)
- usage |= BO_USE_TEXTURE;
- if (flags & GRALLOC_USAGE_HW_RENDER)
- usage |= BO_USE_RENDERING;
- if (flags & GRALLOC_USAGE_HW_2D)
- usage |= BO_USE_RENDERING;
- if (flags & GRALLOC_USAGE_HW_COMPOSER)
- /* HWC wants to use display hardware, but can defer to OpenGL. */
- usage |= BO_USE_SCANOUT | BO_USE_TEXTURE;
- if (flags & GRALLOC_USAGE_HW_FB)
- usage |= BO_USE_NONE;
- if (flags & GRALLOC_USAGE_EXTERNAL_DISP)
- /* We're ignoring this flag until we decide what to with display link */
- usage |= BO_USE_NONE;
- if (flags & GRALLOC_USAGE_PROTECTED)
- usage |= BO_USE_PROTECTED;
- if (flags & GRALLOC_USAGE_HW_VIDEO_ENCODER)
- /*HACK: See b/30054495 */
- usage |= BO_USE_SW_READ_OFTEN;
- if (flags & GRALLOC_USAGE_HW_CAMERA_WRITE)
- usage |= BO_USE_HW_CAMERA_WRITE;
- if (flags & GRALLOC_USAGE_HW_CAMERA_READ)
- usage |= BO_USE_HW_CAMERA_READ;
- if (flags & GRALLOC_USAGE_HW_CAMERA_ZSL)
- usage |= BO_USE_HW_CAMERA_ZSL;
- if (flags & GRALLOC_USAGE_RENDERSCRIPT)
- /* We use CPU for compute. */
- usage |= BO_USE_LINEAR;
-
- return usage;
-}
+#include <sync/sync.h>
uint32_t cros_gralloc_convert_format(int format)
{
case HAL_PIXEL_FORMAT_RGB_565:
return DRM_FORMAT_RGB565;
case HAL_PIXEL_FORMAT_RGB_888:
- return DRM_FORMAT_RGB888;
+ return DRM_FORMAT_BGR888;
case HAL_PIXEL_FORMAT_RGBA_8888:
return DRM_FORMAT_ABGR8888;
case HAL_PIXEL_FORMAT_RGBX_8888:
return DRM_FORMAT_NONE;
}
-static int32_t cros_gralloc_query_rendernode(struct driver **drv, const char *undesired)
-{
- /*
- * Create a driver from rendernode while filtering out
- * the specified undesired driver.
- *
- * TODO(gsingh): Enable render nodes on udl/evdi.
- */
-
- int fd;
- drmVersionPtr version;
- char const *str = "%s/renderD%d";
- int32_t num_nodes = 63;
- int32_t min_node = 128;
- int32_t max_node = (min_node + num_nodes);
-
- for (int i = min_node; i < max_node; i++) {
- char *node;
-
- if (asprintf(&node, str, DRM_DIR_NAME, i) < 0)
- continue;
-
- fd = open(node, O_RDWR, 0);
- free(node);
-
- if (fd < 0)
- continue;
-
- version = drmGetVersion(fd);
- if (!version)
- continue;
-
- if (undesired && !strcmp(version->name, undesired)) {
- drmFreeVersion(version);
- continue;
- }
-
- drmFreeVersion(version);
- *drv = drv_create(fd);
-
- if (*drv)
- return CROS_GRALLOC_ERROR_NONE;
- }
-
- return CROS_GRALLOC_ERROR_NO_RESOURCES;
-}
-
-int32_t cros_gralloc_rendernode_open(struct driver **drv)
+cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle)
{
- int32_t ret;
- ret = cros_gralloc_query_rendernode(drv, "vgem");
-
- /* Allow vgem driver if no hardware is found. */
- if (ret)
- ret = cros_gralloc_query_rendernode(drv, NULL);
+ auto hnd = reinterpret_cast<cros_gralloc_handle_t>(handle);
+ if (!hnd || hnd->magic != cros_gralloc_magic)
+ return nullptr;
- return ret;
+ return hnd;
}
-int32_t cros_gralloc_validate_handle(struct cros_gralloc_handle *hnd)
+int32_t cros_gralloc_sync_wait(int32_t acquire_fence)
{
- if (!hnd || hnd->magic != cros_gralloc_magic)
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
+ if (acquire_fence < 0)
+ return 0;
- return CROS_GRALLOC_ERROR_NONE;
-}
+ /*
+ * Wait initially for 1000 ms, and then wait indefinitely. The SYNC_IOC_WAIT
+ * documentation states the caller waits indefinitely on the fence if timeout < 0.
+ */
+ int err = sync_wait(acquire_fence, 1000);
+ if (err < 0) {
+ drv_log("Timed out on sync wait, err = %s\n", strerror(errno));
+ err = sync_wait(acquire_fence, -1);
+ if (err < 0) {
+ drv_log("sync wait error = %s\n", strerror(errno));
+ return -errno;
+ }
+ }
-void cros_gralloc_log(const char *prefix, const char *file, int line, const char *format, ...)
-{
- char buf[50];
- snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
+ err = close(acquire_fence);
+ if (err) {
+ drv_log("Unable to close fence fd, err = %s\n", strerror(errno));
+ return -errno;
+ }
- va_list args;
- va_start(args, format);
- __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
- va_end(args);
+ return 0;
}
#include "../drv.h"
#include "cros_gralloc_handle.h"
+#include "cros_gralloc_types.h"
-#include <hardware/gralloc.h>
#include <system/graphics.h>
-
-/* Use these error codes derived from gralloc1 to make transition easier when
- * it happens
- */
-typedef enum {
- CROS_GRALLOC_ERROR_NONE = 0,
- CROS_GRALLOC_ERROR_BAD_DESCRIPTOR = 1,
- CROS_GRALLOC_ERROR_BAD_HANDLE = 2,
- CROS_GRALLOC_ERROR_BAD_VALUE = 3,
- CROS_GRALLOC_ERROR_NOT_SHARED = 4,
- CROS_GRALLOC_ERROR_NO_RESOURCES = 5,
- CROS_GRALLOC_ERROR_UNDEFINED = 6,
- CROS_GRALLOC_ERROR_UNSUPPORTED = 7,
-} cros_gralloc_error_t;
-
-/* This enumeration must match the one in <gralloc_drm.h>.
- * The functions supported by this gralloc's temporary private API are listed
- * below. Use of these functions is highly discouraged and should only be
- * reserved for cases where no alternative to get same information (such as
- * querying ANativeWindow) exists.
- */
-// clang-format off
-enum {
- GRALLOC_DRM_GET_STRIDE,
- GRALLOC_DRM_GET_FORMAT,
- GRALLOC_DRM_GET_DIMENSIONS,
- GRALLOC_DRM_GET_BACKING_STORE,
-};
-// clang-format on
+#include <system/window.h>
constexpr uint32_t cros_gralloc_magic = 0xABCDDCBA;
-
constexpr uint32_t handle_data_size =
((sizeof(struct cros_gralloc_handle) - offsetof(cros_gralloc_handle, fds[0])) / sizeof(int));
-constexpr uint32_t sw_access = GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK;
-
-uint64_t cros_gralloc_convert_flags(int flags);
-
-uint32_t cros_gralloc_convert_format(int format);
-
-int32_t cros_gralloc_rendernode_open(struct driver **drv);
-
-int32_t cros_gralloc_validate_handle(struct cros_gralloc_handle *hnd);
+uint32_t cros_gralloc_convert_format(int32_t format);
-/* Logging code adapted from bsdrm */
-__attribute__((format(printf, 4, 5))) void cros_gralloc_log(const char *prefix, const char *file,
- int line, const char *format, ...);
+cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle);
-#define cros_gralloc_error(...) \
- do { \
- cros_gralloc_log("CROS_GRALLOC_ERROR", __FILE__, __LINE__, __VA_ARGS__); \
- } while (0)
+int32_t cros_gralloc_sync_wait(int32_t acquire_fence);
#endif
+++ /dev/null
-/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "cros_gralloc.h"
-
-#include <sys/mman.h>
-#include <xf86drm.h>
-
-int cros_gralloc_validate_reference(struct cros_gralloc_module *mod,
- struct cros_gralloc_handle *hnd, struct cros_gralloc_bo **bo)
-{
- if (!mod->handles.count(hnd))
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
-
- *bo = mod->handles[hnd].bo;
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-int cros_gralloc_decrement_reference_count(struct cros_gralloc_module *mod,
- struct cros_gralloc_bo *bo)
-{
- if (bo->refcount <= 0) {
- cros_gralloc_error("The reference count is <= 0.");
- assert(0);
- }
-
- if (!--bo->refcount) {
- mod->buffers.erase(drv_bo_get_plane_handle(bo->bo, 0).u32);
- drv_bo_destroy(bo->bo);
-
- if (bo->hnd) {
- mod->handles.erase(bo->hnd);
- native_handle_close(&bo->hnd->base);
- delete bo->hnd;
- }
-
- delete bo;
- }
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_register_buffer(struct gralloc_module_t const *module,
- buffer_handle_t handle)
-{
- uint32_t id;
- struct cros_gralloc_bo *bo;
- auto hnd = (struct cros_gralloc_handle *)handle;
- auto mod = (struct cros_gralloc_module *)module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (!mod->drv) {
- if (cros_gralloc_rendernode_open(&mod->drv)) {
- cros_gralloc_error("Failed to open render node.");
- return CROS_GRALLOC_ERROR_NO_RESOURCES;
- }
- }
-
- if (!cros_gralloc_validate_reference(mod, hnd, &bo)) {
- bo->refcount++;
- mod->handles[hnd].registrations++;
- return CROS_GRALLOC_ERROR_NONE;
- }
-
- if (drmPrimeFDToHandle(drv_get_fd(mod->drv), hnd->fds[0], &id)) {
- cros_gralloc_error("drmPrimeFDToHandle failed.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (mod->buffers.count(id)) {
- bo = mod->buffers[id];
- bo->refcount++;
- } else {
- struct drv_import_fd_data data;
- data.format = hnd->format;
- data.width = hnd->width;
- data.height = hnd->height;
-
- memcpy(data.fds, hnd->fds, sizeof(data.fds));
- memcpy(data.strides, hnd->strides, sizeof(data.strides));
- memcpy(data.offsets, hnd->offsets, sizeof(data.offsets));
- memcpy(data.sizes, hnd->sizes, sizeof(data.sizes));
- for (uint32_t p = 0; p < DRV_MAX_PLANES; p++) {
- data.format_modifiers[p] =
- static_cast<uint64_t>(hnd->format_modifiers[2 * p]) << 32;
- data.format_modifiers[p] |= hnd->format_modifiers[2 * p + 1];
- }
-
- bo = new cros_gralloc_bo();
- bo->bo = drv_bo_import(mod->drv, &data);
- if (!bo->bo) {
- delete bo;
- return CROS_GRALLOC_ERROR_NO_RESOURCES;
- }
-
- id = drv_bo_get_plane_handle(bo->bo, 0).u32;
- mod->buffers[id] = bo;
-
- bo->refcount = 1;
- }
-
- mod->handles[hnd].bo = bo;
- mod->handles[hnd].registrations = 1;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_unregister_buffer(struct gralloc_module_t const *module,
- buffer_handle_t handle)
-{
- struct cros_gralloc_bo *bo;
- auto hnd = (struct cros_gralloc_handle *)handle;
- auto mod = (struct cros_gralloc_module *)module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (mod->handles[hnd].registrations <= 0) {
- cros_gralloc_error("Handle not registered.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- mod->handles[hnd].registrations--;
-
- if (!mod->handles[hnd].registrations)
- mod->handles.erase(hnd);
-
- return cros_gralloc_decrement_reference_count(mod, bo);
-}
-
-static int cros_gralloc_lock(struct gralloc_module_t const *module, buffer_handle_t handle,
- int usage, int l, int t, int w, int h, void **vaddr)
-{
- struct cros_gralloc_bo *bo;
- auto mod = (struct cros_gralloc_module *)module;
- auto hnd = (struct cros_gralloc_handle *)handle;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if ((hnd->droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888)) {
- cros_gralloc_error("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (sw_access & usage) {
- if (bo->map_data) {
- *vaddr = bo->map_data->addr;
- } else {
- *vaddr = drv_bo_map(bo->bo, 0, 0, drv_bo_get_width(bo->bo),
- drv_bo_get_height(bo->bo), 0, &bo->map_data, 0);
- }
-
- if (*vaddr == MAP_FAILED) {
- cros_gralloc_error("Mapping failed.");
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
- }
-
- bo->lockcount++;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_unlock(struct gralloc_module_t const *module, buffer_handle_t handle)
-{
- struct cros_gralloc_bo *bo;
- auto hnd = (struct cros_gralloc_handle *)handle;
- auto mod = (struct cros_gralloc_module *)module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (!--bo->lockcount && bo->map_data) {
- drv_bo_unmap(bo->bo, bo->map_data);
- bo->map_data = NULL;
- }
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_perform(struct gralloc_module_t const *module, int op, ...)
-{
- va_list args;
- struct cros_gralloc_bo *bo;
- int32_t *out_format;
- uint64_t *out_store;
- buffer_handle_t handle;
- uint32_t *out_width, *out_height, *out_stride;
- auto mod = (struct cros_gralloc_module *)module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- switch (op) {
- case GRALLOC_DRM_GET_STRIDE:
- case GRALLOC_DRM_GET_FORMAT:
- case GRALLOC_DRM_GET_DIMENSIONS:
- case GRALLOC_DRM_GET_BACKING_STORE:
- break;
- default:
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
-
- va_start(args, op);
- handle = va_arg(args, buffer_handle_t);
- auto hnd = (struct cros_gralloc_handle *)handle;
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- switch (op) {
- case GRALLOC_DRM_GET_STRIDE:
- out_stride = va_arg(args, uint32_t *);
- *out_stride = hnd->pixel_stride;
- break;
- case GRALLOC_DRM_GET_FORMAT:
- out_format = va_arg(args, int32_t *);
- *out_format = hnd->droid_format;
- break;
- case GRALLOC_DRM_GET_DIMENSIONS:
- out_width = va_arg(args, uint32_t *);
- out_height = va_arg(args, uint32_t *);
- *out_width = hnd->width;
- *out_height = hnd->height;
- break;
- case GRALLOC_DRM_GET_BACKING_STORE:
- out_store = va_arg(args, uint64_t *);
- *out_store = drv_bo_get_plane_handle(bo->bo, 0).u64;
- break;
- default:
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
-
- va_end(args);
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static int cros_gralloc_lock_ycbcr(struct gralloc_module_t const *module, buffer_handle_t handle,
- int usage, int l, int t, int w, int h,
- struct android_ycbcr *ycbcr)
-{
- uint8_t *addr = NULL;
- size_t offsets[DRV_MAX_PLANES];
- struct cros_gralloc_bo *bo;
- auto hnd = (struct cros_gralloc_handle *)handle;
- auto mod = (struct cros_gralloc_module *)module;
- std::lock_guard<std::mutex> lock(mod->mutex);
-
- if (cros_gralloc_validate_handle(hnd)) {
- cros_gralloc_error("Invalid handle.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (cros_gralloc_validate_reference(mod, hnd, &bo)) {
- cros_gralloc_error("Invalid Reference.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) &&
- (hnd->droid_format != HAL_PIXEL_FORMAT_YV12)) {
- cros_gralloc_error("Non-YUV format not compatible.");
- return CROS_GRALLOC_ERROR_BAD_HANDLE;
- }
-
- if (sw_access & usage) {
- void *vaddr;
- if (bo->map_data) {
- vaddr = bo->map_data->addr;
- } else {
- vaddr = drv_bo_map(bo->bo, 0, 0, drv_bo_get_width(bo->bo),
- drv_bo_get_height(bo->bo), 0, &bo->map_data, 0);
- }
-
- if (vaddr == MAP_FAILED) {
- cros_gralloc_error("Mapping failed.");
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
-
- addr = static_cast<uint8_t *>(vaddr);
- }
-
- for (size_t p = 0; p < drv_bo_get_num_planes(bo->bo); p++)
- offsets[p] = drv_bo_get_plane_offset(bo->bo, p);
-
- switch (hnd->format) {
- case DRM_FORMAT_NV12:
- ycbcr->y = addr;
- ycbcr->cb = addr + offsets[1];
- ycbcr->cr = addr + offsets[1] + 1;
- ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
- ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 1);
- ycbcr->chroma_step = 2;
- break;
- case DRM_FORMAT_YVU420_ANDROID:
- ycbcr->y = addr;
- ycbcr->cb = addr + offsets[2];
- ycbcr->cr = addr + offsets[1];
- ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
- ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 1);
- ycbcr->chroma_step = 1;
- break;
- case DRM_FORMAT_UYVY:
- ycbcr->y = addr + 1;
- ycbcr->cb = addr;
- ycbcr->cr = addr + 2;
- ycbcr->ystride = drv_bo_get_plane_stride(bo->bo, 0);
- ycbcr->cstride = drv_bo_get_plane_stride(bo->bo, 0);
- ycbcr->chroma_step = 2;
- break;
- default:
- return CROS_GRALLOC_ERROR_UNSUPPORTED;
- }
-
- bo->lockcount++;
-
- return CROS_GRALLOC_ERROR_NONE;
-}
-
-static struct hw_module_methods_t cros_gralloc_module_methods = {.open = cros_gralloc_open };
-
-struct cros_gralloc_module HAL_MODULE_INFO_SYM = {
- .base =
- {
- .common =
- {
- .tag = HARDWARE_MODULE_TAG,
- .module_api_version = GRALLOC_MODULE_API_VERSION_0_2,
- .hal_api_version = 0,
- .id = GRALLOC_HARDWARE_MODULE_ID,
- .name = "CrOS Gralloc",
- .author = "Chrome OS",
- .methods = &cros_gralloc_module_methods,
- },
- .registerBuffer = cros_gralloc_register_buffer,
- .unregisterBuffer = cros_gralloc_unregister_buffer,
- .lock = cros_gralloc_lock,
- .unlock = cros_gralloc_unlock,
- .perform = cros_gralloc_perform,
- .lock_ycbcr = cros_gralloc_lock_ycbcr,
- },
-
- .drv = NULL,
-};
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_TYPES_H
+#define CROS_GRALLOC_TYPES_H
+
+struct cros_gralloc_buffer_descriptor {
+ uint32_t width;
+ uint32_t height;
+ uint32_t consumer_usage;
+ uint32_t producer_usage;
+ uint32_t droid_format;
+ uint32_t drm_format;
+ uint64_t use_flags;
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "../cros_gralloc_driver.h"
+
+#include <cassert>
+#include <hardware/gralloc.h>
+#include <memory.h>
+
+struct gralloc0_module {
+ gralloc_module_t base;
+ std::unique_ptr<alloc_device_t> alloc;
+ std::unique_ptr<cros_gralloc_driver> driver;
+ bool initialized;
+ std::mutex initialization_mutex;
+};
+
+/* This enumeration must match the one in <gralloc_drm.h>.
+ * The functions supported by this gralloc's temporary private API are listed
+ * below. Use of these functions is highly discouraged and should only be
+ * reserved for cases where no alternative to get same information (such as
+ * querying ANativeWindow) exists.
+ */
+// clang-format off
+enum {
+ GRALLOC_DRM_GET_STRIDE,
+ GRALLOC_DRM_GET_FORMAT,
+ GRALLOC_DRM_GET_DIMENSIONS,
+ GRALLOC_DRM_GET_BACKING_STORE,
+};
+// clang-format on
+
+static uint64_t gralloc0_convert_usage(int usage)
+{
+ uint64_t use_flags = BO_USE_NONE;
+
+ if (usage & GRALLOC_USAGE_CURSOR)
+ use_flags |= BO_USE_NONE;
+ if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_RARELY)
+ use_flags |= BO_USE_SW_READ_RARELY;
+ if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
+ use_flags |= BO_USE_SW_READ_OFTEN;
+ if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_RARELY)
+ use_flags |= BO_USE_SW_WRITE_RARELY;
+ if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) == GRALLOC_USAGE_SW_WRITE_OFTEN)
+ use_flags |= BO_USE_SW_WRITE_OFTEN;
+ if (usage & GRALLOC_USAGE_HW_TEXTURE)
+ use_flags |= BO_USE_TEXTURE;
+ if (usage & GRALLOC_USAGE_HW_RENDER)
+ use_flags |= BO_USE_RENDERING;
+ if (usage & GRALLOC_USAGE_HW_2D)
+ use_flags |= BO_USE_RENDERING;
+ if (usage & GRALLOC_USAGE_HW_COMPOSER)
+ /* HWC wants to use display hardware, but can defer to OpenGL. */
+ use_flags |= BO_USE_SCANOUT | BO_USE_TEXTURE;
+ if (usage & GRALLOC_USAGE_HW_FB)
+ use_flags |= BO_USE_NONE;
+ if (usage & GRALLOC_USAGE_EXTERNAL_DISP)
+ /*
+ * This flag potentially covers external display for the normal drivers (i915,
+ * rockchip) and usb monitors (evdi/udl). It's complicated so ignore it.
+ * */
+ use_flags |= BO_USE_NONE;
+ if (usage & GRALLOC_USAGE_PROTECTED)
+ use_flags |= BO_USE_PROTECTED;
+ if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
+ /*HACK: See b/30054495 */
+ use_flags |= BO_USE_SW_READ_OFTEN;
+ if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE)
+ use_flags |= BO_USE_CAMERA_WRITE;
+ if (usage & GRALLOC_USAGE_HW_CAMERA_READ)
+ use_flags |= BO_USE_CAMERA_READ;
+ if (usage & GRALLOC_USAGE_RENDERSCRIPT)
+ use_flags |= BO_USE_RENDERSCRIPT;
+
+ return use_flags;
+}
+
+static uint32_t gralloc0_convert_map_usage(int map_usage)
+{
+ uint32_t map_flags = BO_MAP_NONE;
+
+ if (map_usage & GRALLOC_USAGE_SW_READ_MASK)
+ map_flags |= BO_MAP_READ;
+ if (map_usage & GRALLOC_USAGE_SW_WRITE_MASK)
+ map_flags |= BO_MAP_WRITE;
+
+ return map_flags;
+}
+
+static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usage,
+ buffer_handle_t *handle, int *stride)
+{
+ int32_t ret;
+ bool supported;
+ struct cros_gralloc_buffer_descriptor descriptor;
+ auto mod = (struct gralloc0_module const *)dev->common.module;
+
+ descriptor.width = w;
+ descriptor.height = h;
+ descriptor.droid_format = format;
+ descriptor.producer_usage = descriptor.consumer_usage = usage;
+ descriptor.drm_format = cros_gralloc_convert_format(format);
+ descriptor.use_flags = gralloc0_convert_usage(usage);
+
+ supported = mod->driver->is_supported(&descriptor);
+ if (!supported && (usage & GRALLOC_USAGE_HW_COMPOSER)) {
+ descriptor.use_flags &= ~BO_USE_SCANOUT;
+ supported = mod->driver->is_supported(&descriptor);
+ }
+
+ if (!supported) {
+ drv_log("Unsupported combination -- HAL format: %u, HAL usage: %u, "
+ "drv_format: %4.4s, use_flags: %llu\n",
+ format, usage, reinterpret_cast<char *>(&descriptor.drm_format),
+ static_cast<unsigned long long>(descriptor.use_flags));
+ return -EINVAL;
+ }
+
+ ret = mod->driver->allocate(&descriptor, handle);
+ if (ret)
+ return ret;
+
+ auto hnd = cros_gralloc_convert_handle(*handle);
+ *stride = hnd->pixel_stride;
+
+ return 0;
+}
+
+static int gralloc0_free(alloc_device_t *dev, buffer_handle_t handle)
+{
+ auto mod = (struct gralloc0_module const *)dev->common.module;
+ return mod->driver->release(handle);
+}
+
+static int gralloc0_close(struct hw_device_t *dev)
+{
+ /* Memory is freed by managed pointers on process close. */
+ return 0;
+}
+
+static int gralloc0_init(struct gralloc0_module *mod, bool initialize_alloc)
+{
+ std::lock_guard<std::mutex> lock(mod->initialization_mutex);
+
+ if (mod->initialized)
+ return 0;
+
+ mod->driver = std::make_unique<cros_gralloc_driver>();
+ if (mod->driver->init()) {
+ drv_log("Failed to initialize driver.\n");
+ return -ENODEV;
+ }
+
+ if (initialize_alloc) {
+ mod->alloc = std::make_unique<alloc_device_t>();
+ mod->alloc->alloc = gralloc0_alloc;
+ mod->alloc->free = gralloc0_free;
+ mod->alloc->common.tag = HARDWARE_DEVICE_TAG;
+ mod->alloc->common.version = 0;
+ mod->alloc->common.module = (hw_module_t *)mod;
+ mod->alloc->common.close = gralloc0_close;
+ }
+
+ mod->initialized = true;
+ return 0;
+}
+
+static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev)
+{
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(mod);
+ auto module = const_cast<struct gralloc0_module *>(const_module);
+
+ if (module->initialized) {
+ *dev = &module->alloc->common;
+ return 0;
+ }
+
+ if (strcmp(name, GRALLOC_HARDWARE_GPU0)) {
+ drv_log("Incorrect device name - %s.\n", name);
+ return -EINVAL;
+ }
+
+ if (gralloc0_init(module, true))
+ return -ENODEV;
+
+ *dev = &module->alloc->common;
+ return 0;
+}
+
+static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
+{
+ auto const_module = reinterpret_cast<const struct gralloc0_module *>(module);
+ auto mod = const_cast<struct gralloc0_module *>(const_module);
+
+ if (!mod->initialized)
+ if (gralloc0_init(mod, false))
+ return -ENODEV;
+
+ return mod->driver->retain(handle);
+}
+
+static int gralloc0_unregister_buffer(struct gralloc_module_t const *module, buffer_handle_t handle)
+{
+ auto mod = (struct gralloc0_module const *)module;
+ return mod->driver->release(handle);
+}
+
+static int gralloc0_lock(struct gralloc_module_t const *module, buffer_handle_t handle, int usage,
+ int l, int t, int w, int h, void **vaddr)
+{
+ return module->lockAsync(module, handle, usage, l, t, w, h, vaddr, -1);
+}
+
+static int gralloc0_unlock(struct gralloc_module_t const *module, buffer_handle_t handle)
+{
+ int32_t fence_fd, ret;
+ auto mod = (struct gralloc0_module const *)module;
+ ret = mod->driver->unlock(handle, &fence_fd);
+ if (ret)
+ return ret;
+
+ ret = cros_gralloc_sync_wait(fence_fd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...)
+{
+ va_list args;
+ int32_t *out_format, ret;
+ uint64_t *out_store;
+ buffer_handle_t handle;
+ uint32_t *out_width, *out_height, *out_stride;
+ auto mod = (struct gralloc0_module const *)module;
+
+ switch (op) {
+ case GRALLOC_DRM_GET_STRIDE:
+ case GRALLOC_DRM_GET_FORMAT:
+ case GRALLOC_DRM_GET_DIMENSIONS:
+ case GRALLOC_DRM_GET_BACKING_STORE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ va_start(args, op);
+
+ ret = 0;
+ handle = va_arg(args, buffer_handle_t);
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ switch (op) {
+ case GRALLOC_DRM_GET_STRIDE:
+ out_stride = va_arg(args, uint32_t *);
+ *out_stride = hnd->pixel_stride;
+ break;
+ case GRALLOC_DRM_GET_FORMAT:
+ out_format = va_arg(args, int32_t *);
+ *out_format = hnd->droid_format;
+ break;
+ case GRALLOC_DRM_GET_DIMENSIONS:
+ out_width = va_arg(args, uint32_t *);
+ out_height = va_arg(args, uint32_t *);
+ *out_width = hnd->width;
+ *out_height = hnd->height;
+ break;
+ case GRALLOC_DRM_GET_BACKING_STORE:
+ out_store = va_arg(args, uint64_t *);
+ ret = mod->driver->get_backing_store(handle, out_store);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ va_end(args);
+
+ return ret;
+}
+
+static int gralloc0_lock_ycbcr(struct gralloc_module_t const *module, buffer_handle_t handle,
+ int usage, int l, int t, int w, int h, struct android_ycbcr *ycbcr)
+{
+ return module->lockAsync_ycbcr(module, handle, usage, l, t, w, h, ycbcr, -1);
+}
+
+static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_handle_t handle,
+ int usage, int l, int t, int w, int h, void **vaddr, int fence_fd)
+{
+ int32_t ret;
+ uint32_t map_flags;
+ uint8_t *addr[DRV_MAX_PLANES];
+ auto mod = (struct gralloc0_module const *)module;
+ struct rectangle rect = { .x = static_cast<uint32_t>(l),
+ .y = static_cast<uint32_t>(t),
+ .width = static_cast<uint32_t>(w),
+ .height = static_cast<uint32_t>(h) };
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ if (hnd->droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+ drv_log("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.\n");
+ return -EINVAL;
+ }
+
+ assert(l >= 0);
+ assert(t >= 0);
+ assert(w >= 0);
+ assert(h >= 0);
+
+ map_flags = gralloc0_convert_map_usage(usage);
+ ret = mod->driver->lock(handle, fence_fd, &rect, map_flags, addr);
+ *vaddr = addr[0];
+ return ret;
+}
+
+static int gralloc0_unlock_async(struct gralloc_module_t const *module, buffer_handle_t handle,
+ int *fence_fd)
+{
+ auto mod = (struct gralloc0_module const *)module;
+ return mod->driver->unlock(handle, fence_fd);
+}
+
+static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buffer_handle_t handle,
+ int usage, int l, int t, int w, int h,
+ struct android_ycbcr *ycbcr, int fence_fd)
+{
+ int32_t ret;
+ uint32_t map_flags;
+ uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr };
+ auto mod = (struct gralloc0_module const *)module;
+ struct rectangle rect = { .x = static_cast<uint32_t>(l),
+ .y = static_cast<uint32_t>(t),
+ .width = static_cast<uint32_t>(w),
+ .height = static_cast<uint32_t>(h) };
+
+ auto hnd = cros_gralloc_convert_handle(handle);
+ if (!hnd) {
+ drv_log("Invalid handle.\n");
+ return -EINVAL;
+ }
+
+ if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) &&
+ (hnd->droid_format != HAL_PIXEL_FORMAT_YV12) &&
+ (hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
+ drv_log("Non-YUV format not compatible.\n");
+ return -EINVAL;
+ }
+
+ assert(l >= 0);
+ assert(t >= 0);
+ assert(w >= 0);
+ assert(h >= 0);
+
+ map_flags = gralloc0_convert_map_usage(usage);
+ ret = mod->driver->lock(handle, fence_fd, &rect, map_flags, addr);
+ if (ret)
+ return ret;
+
+ switch (hnd->format) {
+ case DRM_FORMAT_NV12:
+ ycbcr->y = addr[0];
+ ycbcr->cb = addr[1];
+ ycbcr->cr = addr[1] + 1;
+ ycbcr->ystride = hnd->strides[0];
+ ycbcr->cstride = hnd->strides[1];
+ ycbcr->chroma_step = 2;
+ break;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ ycbcr->y = addr[0];
+ ycbcr->cb = addr[2];
+ ycbcr->cr = addr[1];
+ ycbcr->ystride = hnd->strides[0];
+ ycbcr->cstride = hnd->strides[1];
+ ycbcr->chroma_step = 1;
+ break;
+ default:
+ module->unlock(module, handle);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+// clang-format off
+static struct hw_module_methods_t gralloc0_module_methods = { .open = gralloc0_open };
+// clang-format on
+
+struct gralloc0_module HAL_MODULE_INFO_SYM = {
+ .base =
+ {
+ .common =
+ {
+ .tag = HARDWARE_MODULE_TAG,
+ .module_api_version = GRALLOC_MODULE_API_VERSION_0_3,
+ .hal_api_version = 0,
+ .id = GRALLOC_HARDWARE_MODULE_ID,
+ .name = "CrOS Gralloc",
+ .author = "Chrome OS",
+ .methods = &gralloc0_module_methods,
+ },
+
+ .registerBuffer = gralloc0_register_buffer,
+ .unregisterBuffer = gralloc0_unregister_buffer,
+ .lock = gralloc0_lock,
+ .unlock = gralloc0_unlock,
+ .perform = gralloc0_perform,
+ .lock_ycbcr = gralloc0_lock_ycbcr,
+ .lockAsync = gralloc0_lock_async,
+ .unlockAsync = gralloc0_unlock_async,
+ .lockAsync_ycbcr = gralloc0_lock_async_ycbcr,
+ },
+
+ .alloc = nullptr,
+ .driver = nullptr,
+ .initialized = false,
+};
--- /dev/null
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+GRALLOCTEST = gralloctest
+SOURCES += gralloctest.c
+
+CCFLAGS += -g -O2 -Wall -fPIE
+LIBS += -lhardware -lsync -lcutils -pie
+
+OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source))))
+
+OBJECTS = $(addprefix $(TARGET_DIR), $(notdir $(OBJS)))
+BINARY = $(addprefix $(TARGET_DIR), $(GRALLOCTEST))
+
+.PHONY: all clean
+
+all: $(BINARY)
+
+$(BINARY): $(OBJECTS)
+
+clean:
+ $(RM) $(BINARY)
+ $(RM) $(OBJECTS)
+
+$(BINARY):
+ $(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@ $(LIBS)
+
+$(TARGET_DIR)%.o: %.c
+ $(CC) $(CFLAGS) -c $^ -o $@ -MMD
--- /dev/null
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * Please run clang-format on this file after making changes:
+ *
+ * clang-format -style=file -i gralloctest.c
+ *
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <cutils/native_handle.h>
+#include <hardware/gralloc.h>
+#include <sync/sync.h>
+#include <system/graphics.h>
+
+#define ALIGN(A, B) (((A) + (B)-1) / (B) * (B))
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A)))
+
+#define CHECK(cond) \
+ do { \
+ if (!(cond)) { \
+ fprintf(stderr, "[ FAILED ] check in %s() %s:%d\n", __func__, __FILE__, \
+ __LINE__); \
+ return 0; \
+ } \
+ } while (0)
+
+#define CHECK_NO_MSG(cond) \
+ do { \
+ if (!(cond)) { \
+ return 0; \
+ } \
+ } while (0)
+
+/* Private API enumeration -- see <gralloc_drm.h> */
+enum { GRALLOC_DRM_GET_STRIDE,
+ GRALLOC_DRM_GET_FORMAT,
+ GRALLOC_DRM_GET_DIMENSIONS,
+ GRALLOC_DRM_GET_BACKING_STORE,
+};
+
+struct gralloctest_context {
+ struct gralloc_module_t *module;
+ struct alloc_device_t *device;
+ int api;
+};
+
+struct gralloc_testcase {
+ const char *name;
+ int (*run_test)(struct gralloctest_context *ctx);
+ int required_api;
+};
+
+struct combinations {
+ int32_t format;
+ int32_t usage;
+};
+
+// clang-format off
+static struct combinations combos[] = {
+ { HAL_PIXEL_FORMAT_RGBA_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_CURSOR },
+ { HAL_PIXEL_FORMAT_RGBA_8888,
+ GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_HW_COMPOSER },
+ { HAL_PIXEL_FORMAT_RGBX_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN },
+ { HAL_PIXEL_FORMAT_YCbCr_420_888,
+ GRALLOC_USAGE_EXTERNAL_DISP | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_HW_TEXTURE },
+ { HAL_PIXEL_FORMAT_YCbCr_420_888,
+ GRALLOC_USAGE_RENDERSCRIPT | GRALLOC_USAGE_SW_READ_OFTEN |
+ GRALLOC_USAGE_SW_WRITE_OFTEN },
+ { HAL_PIXEL_FORMAT_YV12,
+ GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_HW_COMPOSER |
+ GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP },
+ { HAL_PIXEL_FORMAT_RGB_565,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN },
+ { HAL_PIXEL_FORMAT_BGRA_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN },
+ { HAL_PIXEL_FORMAT_BLOB,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN },
+};
+// clang-format on
+
+struct grallocinfo {
+ buffer_handle_t handle; /* handle to the buffer */
+ int w; /* width of buffer */
+ int h; /* height of buffer */
+ int format; /* format of the buffer */
+ int usage; /* bitfield indicating usage */
+ int fence_fd; /* fence file descriptor */
+ void *vaddr; /* buffer virtual memory address */
+ int stride; /* stride in pixels */
+ struct android_ycbcr ycbcr; /* sw access for yuv buffers */
+};
+
+/* This function is meant to initialize the test to commonly used defaults. */
+void grallocinfo_init(struct grallocinfo *info, int w, int h, int format, int usage)
+{
+ info->w = w;
+ info->h = h;
+ info->format = format;
+ info->usage = usage;
+ info->fence_fd = -1;
+ info->vaddr = NULL;
+ info->ycbcr.y = NULL;
+ info->ycbcr.cb = NULL;
+ info->ycbcr.cr = NULL;
+ info->stride = 0;
+}
+
+static native_handle_t *duplicate_buffer_handle(buffer_handle_t handle)
+{
+ native_handle_t *hnd = native_handle_create(handle->numFds, handle->numInts);
+
+ if (hnd == NULL)
+ return NULL;
+
+ const int *old_data = handle->data;
+ int *new_data = hnd->data;
+
+ int i;
+ for (i = 0; i < handle->numFds; i++) {
+ *new_data = dup(*old_data);
+ old_data++;
+ new_data++;
+ }
+
+ memcpy(new_data, old_data, sizeof(int) * handle->numInts);
+
+ return hnd;
+}
+
+/****************************************************************
+ * Wrappers around gralloc_module_t and alloc_device_t functions.
+ * GraphicBufferMapper/GraphicBufferAllocator could replace this
+ * in theory.
+ ***************************************************************/
+
+static int allocate(struct alloc_device_t *device, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = device->alloc(device, info->w, info->h, info->format, info->usage, &info->handle,
+ &info->stride);
+
+ CHECK_NO_MSG(ret == 0);
+ CHECK_NO_MSG(info->handle->version > 0);
+ CHECK_NO_MSG(info->handle->numInts >= 0);
+ CHECK_NO_MSG(info->handle->numFds >= 0);
+ CHECK_NO_MSG(info->stride >= 0);
+
+ return 1;
+}
+
+static int deallocate(struct alloc_device_t *device, struct grallocinfo *info)
+{
+ int ret;
+ ret = device->free(device, info->handle);
+ CHECK(ret == 0);
+ return 1;
+}
+
+static int register_buffer(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+ ret = module->registerBuffer(module, info->handle);
+ return (ret == 0);
+}
+
+static int unregister_buffer(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+ ret = module->unregisterBuffer(module, info->handle);
+ return (ret == 0);
+}
+
+static int lock(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = module->lock(module, info->handle, info->usage, 0, 0, (info->w) / 2, (info->h) / 2,
+ &info->vaddr);
+
+ return (ret == 0);
+}
+
+static int unlock(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+ ret = module->unlock(module, info->handle);
+ return (ret == 0);
+}
+
+static int lock_ycbcr(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = module->lock_ycbcr(module, info->handle, info->usage, 0, 0, (info->w) / 2,
+ (info->h) / 2, &info->ycbcr);
+
+ return (ret == 0);
+}
+
+static int lock_async(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = module->lockAsync(module, info->handle, info->usage, 0, 0, (info->w) / 2,
+ (info->h) / 2, &info->vaddr, info->fence_fd);
+
+ return (ret == 0);
+}
+
+static int unlock_async(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = module->unlockAsync(module, info->handle, &info->fence_fd);
+
+ return (ret == 0);
+}
+
+static int lock_async_ycbcr(struct gralloc_module_t *module, struct grallocinfo *info)
+{
+ int ret;
+
+ ret = module->lockAsync_ycbcr(module, info->handle, info->usage, 0, 0, (info->w) / 2,
+ (info->h) / 2, &info->ycbcr, info->fence_fd);
+
+ return (ret == 0);
+}
+
+/**************************************************************
+ * END WRAPPERS *
+ **************************************************************/
+
+/* This function tests initialization of gralloc module and allocator. */
+static struct gralloctest_context *test_init_gralloc()
+{
+ int err;
+ hw_module_t const *hw_module;
+ struct gralloctest_context *ctx = calloc(1, sizeof(*ctx));
+
+ err = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hw_module);
+ if (err)
+ return NULL;
+
+ gralloc_open(hw_module, &ctx->device);
+ ctx->module = (gralloc_module_t *)hw_module;
+ if (!ctx->module || !ctx->device)
+ return NULL;
+
+ switch (ctx->module->common.module_api_version) {
+ case GRALLOC_MODULE_API_VERSION_0_3:
+ ctx->api = 3;
+ break;
+ case GRALLOC_MODULE_API_VERSION_0_2:
+ ctx->api = 2;
+ break;
+ default:
+ ctx->api = 1;
+ }
+
+ return ctx;
+}
+
+static int test_close_gralloc(struct gralloctest_context *ctx)
+{
+ CHECK(gralloc_close(ctx->device) == 0);
+ return 1;
+}
+
+/* This function tests allocation with varying buffer dimensions. */
+static int test_alloc_varying_sizes(struct gralloctest_context *ctx)
+{
+ struct grallocinfo info;
+ int i;
+
+ grallocinfo_init(&info, 0, 0, HAL_PIXEL_FORMAT_BGRA_8888, GRALLOC_USAGE_SW_READ_OFTEN);
+
+ for (i = 1; i < 1920; i++) {
+ info.w = i;
+ info.h = i;
+ CHECK(allocate(ctx->device, &info));
+ CHECK(deallocate(ctx->device, &info));
+ }
+
+ info.w = 1;
+ for (i = 1; i < 1920; i++) {
+ info.h = i;
+ CHECK(allocate(ctx->device, &info));
+ CHECK(deallocate(ctx->device, &info));
+ }
+
+ info.h = 1;
+ for (i = 1; i < 1920; i++) {
+ info.w = i;
+ CHECK(allocate(ctx->device, &info));
+ CHECK(deallocate(ctx->device, &info));
+ }
+
+ return 1;
+}
+
+/*
+ * This function tests that we find at least one working format for each
+ * combos which we consider important.
+ */
+static int test_alloc_combinations(struct gralloctest_context *ctx)
+{
+ int i;
+
+ struct grallocinfo info;
+ grallocinfo_init(&info, 512, 512, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(combos); i++) {
+ info.format = combos[i].format;
+ info.usage = combos[i].usage;
+ CHECK(allocate(ctx->device, &info));
+ CHECK(deallocate(ctx->device, &info));
+ }
+
+ return 1;
+}
+
+/*
+ * This function tests the advertised API version.
+ * Version_0_2 added (*lock_ycbcr)() method.
+ * Version_0_3 added fence passing to/from lock/unlock.
+ */
+static int test_api(struct gralloctest_context *ctx)
+{
+
+ CHECK(ctx->module->registerBuffer);
+ CHECK(ctx->module->unregisterBuffer);
+ CHECK(ctx->module->lock);
+ CHECK(ctx->module->unlock);
+
+ switch (ctx->module->common.module_api_version) {
+ case GRALLOC_MODULE_API_VERSION_0_3:
+ CHECK(ctx->module->lock_ycbcr);
+ CHECK(ctx->module->lockAsync);
+ CHECK(ctx->module->unlockAsync);
+ CHECK(ctx->module->lockAsync_ycbcr);
+ break;
+ case GRALLOC_MODULE_API_VERSION_0_2:
+ CHECK(ctx->module->lock_ycbcr);
+ CHECK(ctx->module->lockAsync == NULL);
+ CHECK(ctx->module->unlockAsync == NULL);
+ CHECK(ctx->module->lockAsync_ycbcr == NULL);
+ break;
+ case GRALLOC_MODULE_API_VERSION_0_1:
+ CHECK(ctx->module->lockAsync == NULL);
+ CHECK(ctx->module->unlockAsync == NULL);
+ CHECK(ctx->module->lockAsync_ycbcr == NULL);
+ CHECK(ctx->module->lock_ycbcr == NULL);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * This function registers, unregisters, locks and unlocks the buffer in
+ * various orders.
+ */
+static int test_gralloc_order(struct gralloctest_context *ctx)
+{
+ struct grallocinfo info, duplicate;
+
+ grallocinfo_init(&info, 512, 512, HAL_PIXEL_FORMAT_BGRA_8888, GRALLOC_USAGE_SW_READ_OFTEN);
+
+ grallocinfo_init(&duplicate, 512, 512, HAL_PIXEL_FORMAT_BGRA_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN);
+
+ CHECK(allocate(ctx->device, &info));
+
+ /*
+ * Duplicate the buffer handle to simulate an additional reference
+ * in same process.
+ */
+ native_handle_t *native_handle = duplicate_buffer_handle(info.handle);
+ duplicate.handle = native_handle;
+
+ CHECK(unregister_buffer(ctx->module, &duplicate) == 0);
+ CHECK(register_buffer(ctx->module, &duplicate));
+
+ CHECK(unlock(ctx->module, &duplicate) == 0);
+
+ CHECK(lock(ctx->module, &duplicate));
+ CHECK(duplicate.vaddr);
+ CHECK(unlock(ctx->module, &duplicate));
+
+ CHECK(unregister_buffer(ctx->module, &duplicate));
+
+ CHECK(register_buffer(ctx->module, &duplicate));
+ CHECK(unregister_buffer(ctx->module, &duplicate));
+ CHECK(unregister_buffer(ctx->module, &duplicate) == 0);
+
+ CHECK(register_buffer(ctx->module, &duplicate));
+ CHECK(deallocate(ctx->device, &info));
+
+ CHECK(lock(ctx->module, &duplicate));
+ CHECK(lock(ctx->module, &duplicate));
+ CHECK(unlock(ctx->module, &duplicate));
+ CHECK(unlock(ctx->module, &duplicate));
+ CHECK(unlock(ctx->module, &duplicate) == 0);
+ CHECK(unregister_buffer(ctx->module, &duplicate));
+
+ CHECK(native_handle_close(duplicate.handle) == 0);
+ CHECK(native_handle_delete(native_handle) == 0);
+
+ return 1;
+}
+
+/* This function tests CPU reads and writes. */
+static int test_mapping(struct gralloctest_context *ctx)
+{
+ struct grallocinfo info;
+ uint32_t *ptr = NULL;
+ uint32_t magic_number = 0x000ABBA;
+
+ grallocinfo_init(&info, 512, 512, HAL_PIXEL_FORMAT_BGRA_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN);
+
+ CHECK(allocate(ctx->device, &info));
+ CHECK(lock(ctx->module, &info));
+
+ ptr = (uint32_t *)info.vaddr;
+ CHECK(ptr);
+ ptr[(info.w) / 2] = magic_number;
+
+ CHECK(unlock(ctx->module, &info));
+ info.vaddr = NULL;
+ ptr = NULL;
+
+ CHECK(lock(ctx->module, &info));
+ ptr = (uint32_t *)info.vaddr;
+ CHECK(ptr);
+ CHECK(ptr[info.w / 2] == magic_number);
+
+ CHECK(unlock(ctx->module, &info));
+ CHECK(deallocate(ctx->device, &info));
+
+ return 1;
+}
+
+/* This function tests the private API we use in ARC++ -- not part of official
+ * gralloc. */
+static int test_perform(struct gralloctest_context *ctx)
+{
+ int32_t format;
+ uint64_t id1, id2;
+ uint32_t stride, width, height;
+ struct grallocinfo info, duplicate;
+ struct gralloc_module_t *mod = ctx->module;
+
+ grallocinfo_init(&info, 650, 408, HAL_PIXEL_FORMAT_BGRA_8888, GRALLOC_USAGE_SW_READ_OFTEN);
+
+ CHECK(allocate(ctx->device, &info));
+
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_STRIDE, info.handle, &stride) == 0);
+ CHECK(stride == info.stride);
+
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_FORMAT, info.handle, &format) == 0);
+ CHECK(format == info.format);
+
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_DIMENSIONS, info.handle, &width, &height) == 0);
+ CHECK(width == info.w);
+ CHECK(height == info.h);
+
+ native_handle_t *native_handle = duplicate_buffer_handle(info.handle);
+ duplicate.handle = native_handle;
+
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_BACKING_STORE, duplicate.handle, &id2));
+ CHECK(register_buffer(mod, &duplicate));
+
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_BACKING_STORE, info.handle, &id1) == 0);
+ CHECK(mod->perform(mod, GRALLOC_DRM_GET_BACKING_STORE, duplicate.handle, &id2) == 0);
+ CHECK(id1 == id2);
+
+ CHECK(unregister_buffer(mod, &duplicate));
+ CHECK(deallocate(ctx->device, &info));
+
+ return 1;
+}
+
+/* This function tests that only YUV buffers work with *lock_ycbcr. */
+static int test_ycbcr(struct gralloctest_context *ctx)
+
+{
+ struct grallocinfo info;
+ grallocinfo_init(&info, 512, 512, HAL_PIXEL_FORMAT_YCbCr_420_888,
+ GRALLOC_USAGE_SW_READ_OFTEN);
+
+ CHECK(allocate(ctx->device, &info));
+
+ CHECK(lock(ctx->module, &info) == 0);
+ CHECK(lock_ycbcr(ctx->module, &info));
+ CHECK(info.ycbcr.y);
+ CHECK(info.ycbcr.cb);
+ CHECK(info.ycbcr.cr);
+ CHECK(unlock(ctx->module, &info));
+
+ CHECK(deallocate(ctx->device, &info));
+
+ info.format = HAL_PIXEL_FORMAT_BGRA_8888;
+ CHECK(allocate(ctx->device, &info));
+
+ CHECK(lock_ycbcr(ctx->module, &info) == 0);
+ CHECK(lock(ctx->module, &info));
+ CHECK(unlock(ctx->module, &info));
+
+ CHECK(deallocate(ctx->device, &info));
+
+ return 1;
+}
+
+/*
+ * This function tests a method ARC++ uses to query YUV buffer
+ * info -- not part of official gralloc API. This is used in
+ * Mali, Mesa, the ArcCodec and wayland_service.
+ */
+static int test_yuv_info(struct gralloctest_context *ctx)
+{
+ struct grallocinfo info;
+ uintptr_t y_size, c_stride, c_size, cr_offset, cb_offset;
+ uint32_t width, height;
+ width = height = 512;
+
+ /* <system/graphics.h> defines YV12 as having:
+ * - an even width
+ * - an even height
+ * - a horizontal stride multiple of 16 pixels
+ * - a vertical stride equal to the height
+ *
+ * y_size = stride * height.
+ * c_stride = ALIGN(stride/2, 16).
+ * c_size = c_stride * height/2.
+ * size = y_size + c_size * 2.
+ * cr_offset = y_size.
+ * cb_offset = y_size + c_size.
+ */
+
+ grallocinfo_init(&info, width, height, HAL_PIXEL_FORMAT_YV12, GRALLOC_USAGE_SW_READ_OFTEN);
+
+ CHECK(allocate(ctx->device, &info));
+
+ y_size = info.stride * height;
+ c_stride = ALIGN(info.stride / 2, 16);
+ c_size = c_stride * height / 2;
+ cr_offset = y_size;
+ cb_offset = y_size + c_size;
+
+ info.usage = 0;
+
+ /*
+ * Check if the (*lock_ycbcr) with usage of zero returns the
+ * offsets and strides of the YV12 buffer. This is unofficial
+ * behavior we are testing here.
+ */
+ CHECK(lock_ycbcr(ctx->module, &info));
+
+ CHECK(info.stride == info.ycbcr.ystride);
+ CHECK(c_stride == info.ycbcr.cstride);
+ CHECK(cr_offset == (uintptr_t)info.ycbcr.cr);
+ CHECK(cb_offset == (uintptr_t)info.ycbcr.cb);
+
+ CHECK(unlock(ctx->module, &info));
+
+ CHECK(deallocate(ctx->device, &info));
+
+ return 1;
+}
+
+/* This function tests asynchronous locking and unlocking of buffers. */
+static int test_async(struct gralloctest_context *ctx)
+
+{
+ struct grallocinfo rgba_info, ycbcr_info;
+ grallocinfo_init(&rgba_info, 512, 512, HAL_PIXEL_FORMAT_BGRA_8888,
+ GRALLOC_USAGE_SW_READ_OFTEN);
+ grallocinfo_init(&ycbcr_info, 512, 512, HAL_PIXEL_FORMAT_YCbCr_420_888,
+ GRALLOC_USAGE_SW_READ_OFTEN);
+
+ CHECK(allocate(ctx->device, &rgba_info));
+ CHECK(allocate(ctx->device, &ycbcr_info));
+
+ CHECK(lock_async(ctx->module, &rgba_info));
+ CHECK(lock_async_ycbcr(ctx->module, &ycbcr_info));
+
+ CHECK(rgba_info.vaddr);
+ CHECK(ycbcr_info.ycbcr.y);
+ CHECK(ycbcr_info.ycbcr.cb);
+ CHECK(ycbcr_info.ycbcr.cr);
+
+ /*
+ * Wait on the fence returned from unlock_async and check it doesn't
+ * return an error.
+ */
+ CHECK(unlock_async(ctx->module, &rgba_info));
+ CHECK(unlock_async(ctx->module, &ycbcr_info));
+
+ if (rgba_info.fence_fd >= 0) {
+ CHECK(sync_wait(rgba_info.fence_fd, 10000) >= 0);
+ CHECK(close(rgba_info.fence_fd) == 0);
+ }
+
+ if (ycbcr_info.fence_fd >= 0) {
+ CHECK(sync_wait(ycbcr_info.fence_fd, 10000) >= 0);
+ CHECK(close(ycbcr_info.fence_fd) == 0);
+ }
+
+ CHECK(deallocate(ctx->device, &rgba_info));
+ CHECK(deallocate(ctx->device, &ycbcr_info));
+
+ return 1;
+}
+
+static const struct gralloc_testcase tests[] = {
+ { "alloc_varying_sizes", test_alloc_varying_sizes, 1 },
+ { "alloc_combinations", test_alloc_combinations, 1 },
+ { "api", test_api, 1 },
+ { "gralloc_order", test_gralloc_order, 1 },
+ { "mapping", test_mapping, 1 },
+ { "perform", test_perform, 1 },
+ { "ycbcr", test_ycbcr, 2 },
+ { "yuv_info", test_yuv_info, 2 },
+ { "async", test_async, 3 },
+};
+
+static void print_help(const char *argv0)
+{
+ uint32_t i;
+ printf("usage: %s <test_name>\n\n", argv0);
+ printf("A valid name test is one the following:\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++)
+ printf("%s\n", tests[i].name);
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+ uint32_t num_run = 0;
+
+ setbuf(stdout, NULL);
+ if (argc == 2) {
+ uint32_t i;
+ char *name = argv[1];
+
+ struct gralloctest_context *ctx = test_init_gralloc();
+ if (!ctx) {
+ fprintf(stderr, "[ FAILED ] to initialize gralloc.\n");
+ return 1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (strcmp(tests[i].name, name) && strcmp("all", name))
+ continue;
+
+ int success = 1;
+ if (ctx->api >= tests[i].required_api)
+ success = tests[i].run_test(ctx);
+
+ printf("[ RUN ] gralloctest.%s\n", tests[i].name);
+ if (!success) {
+ fprintf(stderr, "[ FAILED ] gralloctest.%s\n", tests[i].name);
+ ret |= 1;
+ } else {
+ printf("[ PASSED ] gralloctest.%s\n", tests[i].name);
+ }
+
+ num_run++;
+ }
+
+ if (!test_close_gralloc(ctx)) {
+ fprintf(stderr, "[ FAILED ] to close gralloc.\n");
+ return 1;
+ }
+
+ if (!num_run)
+ goto print_usage;
+
+ return ret;
+ }
+
+print_usage:
+ print_help(argv[0]);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2017 Advanced Micro Devices. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_AMDGPU
+
+#include <assert.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <xf86drm.h>
+
+#include "dri.h"
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const struct {
+ uint32_t drm_format;
+ int dri_image_format;
+} drm_to_dri_image_formats[] = {
+ { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8 },
+ { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88 },
+ { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888 },
+ { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888 },
+ { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 },
+ { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 },
+ { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 },
+ { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 },
+};
+
+static int drm_format_to_dri_format(uint32_t drm_format)
+{
+ uint32_t i;
+ for (i = 0; i < ARRAY_SIZE(drm_to_dri_image_formats); i++) {
+ if (drm_to_dri_image_formats[i].drm_format == drm_format)
+ return drm_to_dri_image_formats[i].dri_image_format;
+ }
+
+ return 0;
+}
+
+static bool lookup_extension(const __DRIextension *const *extensions, const char *name,
+ int min_version, const __DRIextension **dst)
+{
+ while (*extensions) {
+ if ((*extensions)->name && !strcmp((*extensions)->name, name) &&
+ (*extensions)->version >= min_version) {
+ *dst = *extensions;
+ return true;
+ }
+
+ extensions++;
+ }
+
+ return false;
+}
+
+/*
+ * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have
+ * to import into minigbm.
+ */
+static int import_into_minigbm(struct dri_driver *dri, struct bo *bo)
+{
+ uint32_t handle;
+ int prime_fd, ret;
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_FD, &prime_fd))
+ return -errno;
+
+ ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle);
+ if (ret) {
+ drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ bo->handles[0].u32 = handle;
+ close(prime_fd);
+ return 0;
+}
+
+/*
+ * The caller is responsible for setting drv->priv to a structure that derives from dri_driver.
+ */
+int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix)
+{
+ char fname[128];
+ const __DRIextension **(*get_extensions)();
+ const __DRIextension *loader_extensions[] = { NULL };
+
+ struct dri_driver *dri = drv->priv;
+ dri->driver_handle = dlopen(dri_so_path, RTLD_NOW | RTLD_GLOBAL);
+ if (!dri->driver_handle)
+ return -ENODEV;
+
+ snprintf(fname, sizeof(fname), __DRI_DRIVER_GET_EXTENSIONS "_%s", driver_suffix);
+ get_extensions = dlsym(dri->driver_handle, fname);
+ if (!get_extensions)
+ goto free_handle;
+
+ dri->extensions = get_extensions();
+ if (!dri->extensions)
+ goto free_handle;
+
+ if (!lookup_extension(dri->extensions, __DRI_CORE, 2,
+ (const __DRIextension **)&dri->core_extension))
+ goto free_handle;
+
+ /* Version 4 for createNewScreen2 */
+ if (!lookup_extension(dri->extensions, __DRI_DRI2, 4,
+ (const __DRIextension **)&dri->dri2_extension))
+ goto free_handle;
+
+ dri->device = dri->dri2_extension->createNewScreen2(0, drv_get_fd(drv), loader_extensions,
+ dri->extensions, &dri->configs, NULL);
+ if (!dri->device)
+ goto free_handle;
+
+ dri->context =
+ dri->dri2_extension->createNewContext(dri->device, *dri->configs, NULL, NULL);
+
+ if (!dri->context)
+ goto free_screen;
+
+ if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI_IMAGE, 12,
+ (const __DRIextension **)&dri->image_extension))
+ goto free_context;
+
+ if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI2_FLUSH, 4,
+ (const __DRIextension **)&dri->flush_extension))
+ goto free_context;
+
+ return 0;
+
+free_context:
+ dri->core_extension->destroyContext(dri->context);
+free_screen:
+ dri->core_extension->destroyScreen(dri->device);
+free_handle:
+ dlclose(dri->driver_handle);
+ dri->driver_handle = NULL;
+ return -ENODEV;
+}
+
+/*
+ * The caller is responsible for freeing drv->priv.
+ */
+void dri_close(struct driver *drv)
+{
+ struct dri_driver *dri = drv->priv;
+
+ dri->core_extension->destroyContext(dri->context);
+ dri->core_extension->destroyScreen(dri->device);
+ dlclose(dri->driver_handle);
+ dri->driver_handle = NULL;
+}
+
+int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ unsigned int dri_use;
+ int ret, dri_format, stride, offset;
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->num_planes == 1);
+ dri_format = drm_format_to_dri_format(format);
+
+ /* Gallium drivers require shared to get the handle and stride. */
+ dri_use = __DRI_IMAGE_USE_SHARE;
+ if (use_flags & BO_USE_SCANOUT)
+ dri_use |= __DRI_IMAGE_USE_SCANOUT;
+ if (use_flags & BO_USE_CURSOR)
+ dri_use |= __DRI_IMAGE_USE_CURSOR;
+ if (use_flags & BO_USE_LINEAR)
+ dri_use |= __DRI_IMAGE_USE_LINEAR;
+
+ bo->priv = dri->image_extension->createImage(dri->device, width, height, dri_format,
+ dri_use, NULL);
+ if (!bo->priv) {
+ ret = -errno;
+ return ret;
+ }
+
+ ret = import_into_minigbm(dri, bo);
+ if (ret)
+ goto free_image;
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_STRIDE, &stride)) {
+ ret = -errno;
+ goto free_image;
+ }
+
+ if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) {
+ ret = -errno;
+ goto free_image;
+ }
+
+ bo->strides[0] = stride;
+ bo->sizes[0] = stride * height;
+ bo->offsets[0] = offset;
+ bo->total_size = offset + bo->sizes[0];
+ return 0;
+
+free_image:
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+}
+
+int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->num_planes == 1);
+
+ // clang-format off
+ bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height,
+ data->format, data->fds, bo->num_planes,
+ (int *)data->strides,
+ (int *)data->offsets, NULL);
+ // clang-format on
+ if (!bo->priv)
+ return -errno;
+
+ ret = import_into_minigbm(dri, bo);
+ if (ret) {
+ dri->image_extension->destroyImage(bo->priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dri_bo_destroy(struct bo *bo)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(bo->priv);
+ dri->image_extension->destroyImage(bo->priv);
+ bo->priv = NULL;
+ return 0;
+}
+
+/*
+ * Map an image plane.
+ *
+ * This relies on the underlying driver to do a decompressing and/or de-tiling
+ * blit if necessary,
+ *
+ * This function itself is not thread-safe; we rely on the fact that the caller
+ * locks a per-driver mutex.
+ */
+void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ /* GBM flags and DRI flags are the same. */
+ vma->addr =
+ dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->width, bo->height,
+ map_flags, (int *)&vma->map_strides[plane], &vma->priv);
+ if (!vma->addr)
+ return MAP_FAILED;
+
+ return vma->addr;
+}
+
+int dri_bo_unmap(struct bo *bo, struct vma *vma)
+{
+ struct dri_driver *dri = bo->drv->priv;
+
+ assert(vma->priv);
+ dri->image_extension->unmapImage(dri->context, bo->priv, vma->priv);
+
+ /*
+ * From gbm_dri.c in Mesa:
+ *
+ * "Not all DRI drivers use direct maps. They may queue up DMA operations
+ * on the mapping context. Since there is no explicit gbm flush mechanism,
+ * we need to flush here."
+ */
+
+ dri->flush_extension->flush_with_flags(dri->context, NULL, __DRI2_FLUSH_CONTEXT, 0);
+ return 0;
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2017 Advanced Micro Devices. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_AMDGPU
+
+typedef int GLint;
+typedef unsigned int GLuint;
+typedef unsigned char GLboolean;
+
+#include "GL/internal/dri_interface.h"
+#include "drv.h"
+
+struct dri_driver {
+ void *driver_handle;
+ __DRIscreen *device;
+ __DRIcontext *context; /* Needed for map/unmap operations. */
+ const __DRIextension **extensions;
+ const __DRIcoreExtension *core_extension;
+ const __DRIdri2Extension *dri2_extension;
+ const __DRIimageExtension *image_extension;
+ const __DRI2flushExtension *flush_extension;
+ const __DRIconfig **configs;
+};
+
+int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix);
+void dri_close(struct driver *drv);
+int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags);
+int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data);
+int dri_bo_destroy(struct bo *bo);
+void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
+int dri_bo_unmap(struct bo *bo, struct vma *vma);
+
+#endif
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
#include <xf86drm.h>
+#ifdef __ANDROID__
+#include <cutils/log.h>
+#include <libgen.h>
+#endif
+
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
#ifdef DRV_AMDGPU
-extern struct backend backend_amdgpu;
+extern const struct backend backend_amdgpu;
#endif
-extern struct backend backend_cirrus;
-extern struct backend backend_evdi;
+extern const struct backend backend_evdi;
#ifdef DRV_EXYNOS
-extern struct backend backend_exynos;
+extern const struct backend backend_exynos;
#endif
-extern struct backend backend_gma500;
#ifdef DRV_I915
-extern struct backend backend_i915;
+extern const struct backend backend_i915;
#endif
#ifdef DRV_MARVELL
-extern struct backend backend_marvell;
+extern const struct backend backend_marvell;
#endif
#ifdef DRV_MEDIATEK
-extern struct backend backend_mediatek;
+extern const struct backend backend_mediatek;
+#endif
+#ifdef DRV_MESON
+extern const struct backend backend_meson;
+#endif
+#ifdef DRV_MSM
+extern const struct backend backend_msm;
+#endif
+extern const struct backend backend_nouveau;
+#ifdef DRV_RADEON
+extern const struct backend backend_radeon;
#endif
-extern struct backend backend_nouveau;
#ifdef DRV_ROCKCHIP
-extern struct backend backend_rockchip;
+extern const struct backend backend_rockchip;
#endif
#ifdef DRV_TEGRA
-extern struct backend backend_tegra;
+extern const struct backend backend_tegra;
#endif
-extern struct backend backend_udl;
+extern const struct backend backend_udl;
#ifdef DRV_VC4
-extern struct backend backend_vc4;
+extern const struct backend backend_vc4;
#endif
-extern struct backend backend_vgem;
-extern struct backend backend_virtio_gpu;
+extern const struct backend backend_vgem;
+extern const struct backend backend_virtio_gpu;
-static struct backend *drv_get_backend(int fd)
+static const struct backend *drv_get_backend(int fd)
{
drmVersionPtr drm_version;
unsigned int i;
if (!drm_version)
return NULL;
- struct backend *backend_list[] = {
+ const struct backend *backend_list[] = {
#ifdef DRV_AMDGPU
&backend_amdgpu,
#endif
- &backend_cirrus, &backend_evdi,
+ &backend_evdi,
#ifdef DRV_EXYNOS
&backend_exynos,
#endif
- &backend_gma500,
#ifdef DRV_I915
&backend_i915,
#endif
#ifdef DRV_MEDIATEK
&backend_mediatek,
#endif
+#ifdef DRV_MESON
+ &backend_meson,
+#endif
+#ifdef DRV_MSM
+ &backend_msm,
+#endif
&backend_nouveau,
+#ifdef DRV_RADEON
+ &backend_radeon,
+#endif
#ifdef DRV_ROCKCHIP
&backend_rockchip,
#endif
if (!drv->buffer_table)
goto free_lock;
- drv->map_table = drmHashCreate();
- if (!drv->map_table)
+ drv->mappings = drv_array_init(sizeof(struct mapping));
+ if (!drv->mappings)
goto free_buffer_table;
- /* Start with a power of 2 number of allocations. */
- drv->backend->combos.allocations = 2;
- drv->backend->combos.size = 0;
- drv->backend->combos.data =
- calloc(drv->backend->combos.allocations, sizeof(struct combination));
- if (!drv->backend->combos.data)
- goto free_map_table;
+ drv->combos = drv_array_init(sizeof(struct combination));
+ if (!drv->combos)
+ goto free_mappings;
if (drv->backend->init) {
ret = drv->backend->init(drv);
if (ret) {
- free(drv->backend->combos.data);
- goto free_map_table;
+ drv_array_destroy(drv->combos);
+ goto free_mappings;
}
}
return drv;
-free_map_table:
- drmHashDestroy(drv->map_table);
+free_mappings:
+ drv_array_destroy(drv->mappings);
free_buffer_table:
drmHashDestroy(drv->buffer_table);
free_lock:
drv->backend->close(drv);
drmHashDestroy(drv->buffer_table);
- drmHashDestroy(drv->map_table);
-
- free(drv->backend->combos.data);
+ drv_array_destroy(drv->mappings);
+ drv_array_destroy(drv->combos);
pthread_mutex_unlock(&drv->driver_lock);
pthread_mutex_destroy(&drv->driver_lock);
return drv->backend->name;
}
-struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t usage)
+struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags)
{
struct combination *curr, *best;
- if (format == DRM_FORMAT_NONE || usage == BO_USE_NONE)
+ if (format == DRM_FORMAT_NONE || use_flags == BO_USE_NONE)
return 0;
best = NULL;
uint32_t i;
- for (i = 0; i < drv->backend->combos.size; i++) {
- curr = &drv->backend->combos.data[i];
- if ((format == curr->format) && usage == (curr->usage & usage))
+ for (i = 0; i < drv_array_size(drv->combos); i++) {
+ curr = drv_array_at_idx(drv->combos, i);
+ if ((format == curr->format) && use_flags == (curr->use_flags & use_flags))
if (!best || best->metadata.priority < curr->metadata.priority)
best = curr;
}
return best;
}
-struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format)
+struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
{
struct bo *bo;
bo->width = width;
bo->height = height;
bo->format = format;
+ bo->use_flags = use_flags;
bo->num_planes = drv_num_planes_from_format(format);
if (!bo->num_planes) {
}
struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
- uint64_t flags)
+ uint64_t use_flags)
{
int ret;
size_t plane;
struct bo *bo;
- bo = drv_bo_new(drv, width, height, format);
+ bo = drv_bo_new(drv, width, height, format, use_flags);
if (!bo)
return NULL;
- ret = drv->backend->bo_create(bo, width, height, format, flags);
+ ret = drv->backend->bo_create(bo, width, height, format, use_flags);
if (ret) {
free(bo);
pthread_mutex_lock(&drv->driver_lock);
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->num_planes; plane++) {
+ if (plane > 0)
+ assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
+
drv_increment_reference_count(drv, bo, plane);
+ }
pthread_mutex_unlock(&drv->driver_lock);
return NULL;
}
- bo = drv_bo_new(drv, width, height, format);
+ bo = drv_bo_new(drv, width, height, format, BO_USE_NONE);
if (!bo)
return NULL;
pthread_mutex_lock(&drv->driver_lock);
- for (plane = 0; plane < bo->num_planes; plane++)
+ for (plane = 0; plane < bo->num_planes; plane++) {
+ if (plane > 0)
+ assert(bo->offsets[plane] >= bo->offsets[plane - 1]);
+
drv_increment_reference_count(drv, bo, plane);
+ }
pthread_mutex_unlock(&drv->driver_lock);
pthread_mutex_unlock(&drv->driver_lock);
- if (total == 0)
+ if (total == 0) {
+ assert(drv_mapping_destroy(bo) == 0);
bo->drv->backend->bo_destroy(bo);
+ }
free(bo);
}
int ret;
size_t plane;
struct bo *bo;
+ off_t seek_end;
- bo = drv_bo_new(drv, data->width, data->height, data->format);
+ bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags);
if (!bo)
return NULL;
for (plane = 0; plane < bo->num_planes; plane++) {
bo->strides[plane] = data->strides[plane];
bo->offsets[plane] = data->offsets[plane];
- bo->sizes[plane] = data->sizes[plane];
bo->format_modifiers[plane] = data->format_modifiers[plane];
- bo->total_size += data->sizes[plane];
+
+ seek_end = lseek(data->fds[plane], 0, SEEK_END);
+ if (seek_end == (off_t)(-1)) {
+ drv_log("lseek() failed with %s\n", strerror(errno));
+ goto destroy_bo;
+ }
+
+ lseek(data->fds[plane], 0, SEEK_SET);
+ if (plane == bo->num_planes - 1 || data->offsets[plane + 1] == 0)
+ bo->sizes[plane] = seek_end - data->offsets[plane];
+ else
+ bo->sizes[plane] = data->offsets[plane + 1] - data->offsets[plane];
+
+ if ((int64_t)bo->offsets[plane] + bo->sizes[plane] > seek_end) {
+ drv_log("buffer size is too large.\n");
+ goto destroy_bo;
+ }
+
+ bo->total_size += bo->sizes[plane];
}
return bo;
+
+destroy_bo:
+ drv_bo_destroy(bo);
+ return NULL;
}
-void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, struct map_info **map_data, size_t plane)
+void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
+ struct mapping **map_data, size_t plane)
{
- void *ptr;
+ uint32_t i;
uint8_t *addr;
- size_t offset;
- struct map_info *data;
+ struct mapping mapping;
+
+ assert(rect->width >= 0);
+ assert(rect->height >= 0);
+ assert(rect->x + rect->width <= drv_bo_get_width(bo));
+ assert(rect->y + rect->height <= drv_bo_get_height(bo));
+ assert(BO_MAP_READ_WRITE & map_flags);
+ /* No CPU access for protected buffers. */
+ assert(!(bo->use_flags & BO_USE_PROTECTED));
- assert(width > 0);
- assert(height > 0);
- assert(x + width <= drv_bo_get_width(bo));
- assert(y + height <= drv_bo_get_height(bo));
+ memset(&mapping, 0, sizeof(mapping));
+ mapping.rect = *rect;
+ mapping.refcount = 1;
pthread_mutex_lock(&bo->drv->driver_lock);
- if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) {
- data = (struct map_info *)ptr;
- data->refcount++;
+ for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
+ struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
+ if (prior->vma->handle != bo->handles[plane].u32 ||
+ prior->vma->map_flags != map_flags)
+ continue;
+
+ if (rect->x != prior->rect.x || rect->y != prior->rect.y ||
+ rect->width != prior->rect.width || rect->height != prior->rect.height)
+ continue;
+
+ prior->refcount++;
+ *map_data = prior;
+ goto exact_match;
+ }
+
+ for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
+ struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i);
+ if (prior->vma->handle != bo->handles[plane].u32 ||
+ prior->vma->map_flags != map_flags)
+ continue;
+
+ prior->vma->refcount++;
+ mapping.vma = prior->vma;
goto success;
}
- data = calloc(1, sizeof(*data));
- addr = bo->drv->backend->bo_map(bo, data, plane);
+ mapping.vma = calloc(1, sizeof(*mapping.vma));
+ memcpy(mapping.vma->map_strides, bo->strides, sizeof(mapping.vma->map_strides));
+ addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags);
if (addr == MAP_FAILED) {
*map_data = NULL;
- free(data);
+ free(mapping.vma);
pthread_mutex_unlock(&bo->drv->driver_lock);
return MAP_FAILED;
}
- data->refcount = 1;
- data->addr = addr;
- data->handle = bo->handles[plane].u32;
- drmHashInsert(bo->drv->map_table, bo->handles[plane].u32, (void *)data);
+ mapping.vma->refcount = 1;
+ mapping.vma->addr = addr;
+ mapping.vma->handle = bo->handles[plane].u32;
+ mapping.vma->map_flags = map_flags;
success:
- *map_data = data;
- offset = drv_bo_get_plane_stride(bo, plane) * y;
- offset += drv_stride_from_format(bo->format, x, plane);
- addr = (uint8_t *)data->addr;
- addr += drv_bo_get_plane_offset(bo, plane) + offset;
+ *map_data = drv_array_append(bo->drv->mappings, &mapping);
+exact_match:
+ drv_bo_invalidate(bo, *map_data);
+ addr = (uint8_t *)((*map_data)->vma->addr);
+ addr += drv_bo_get_plane_offset(bo, plane);
pthread_mutex_unlock(&bo->drv->driver_lock);
-
return (void *)addr;
}
-int drv_bo_unmap(struct bo *bo, struct map_info *data)
+int drv_bo_unmap(struct bo *bo, struct mapping *mapping)
{
+ uint32_t i;
int ret = 0;
- assert(data);
- assert(data->refcount >= 0);
-
pthread_mutex_lock(&bo->drv->driver_lock);
- if (!--data->refcount) {
- if (bo->drv->backend->bo_unmap)
- ret = bo->drv->backend->bo_unmap(bo, data);
- else
- ret = munmap(data->addr, data->length);
- drmHashDelete(bo->drv->map_table, data->handle);
- free(data);
+ if (--mapping->refcount)
+ goto out;
+
+ if (!--mapping->vma->refcount) {
+ ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
+ free(mapping->vma);
}
+ for (i = 0; i < drv_array_size(bo->drv->mappings); i++) {
+ if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) {
+ drv_array_remove(bo->drv->mappings, i);
+ break;
+ }
+ }
+
+out:
pthread_mutex_unlock(&bo->drv->driver_lock);
+ return ret;
+}
+
+int drv_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret = 0;
+
+ assert(mapping);
+ assert(mapping->vma);
+ assert(mapping->refcount > 0);
+ assert(mapping->vma->refcount > 0);
+
+ if (bo->drv->backend->bo_invalidate)
+ ret = bo->drv->backend->bo_invalidate(bo, mapping);
+
+ return ret;
+}
+
+int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping)
+{
+ int ret = 0;
+
+ assert(mapping);
+ assert(mapping->vma);
+ assert(mapping->refcount > 0);
+ assert(mapping->vma->refcount > 0);
+ assert(!(bo->use_flags & BO_USE_PROTECTED));
+
+ if (bo->drv->backend->bo_flush)
+ ret = bo->drv->backend->bo_flush(bo, mapping);
+ else
+ ret = drv_bo_unmap(bo, mapping);
return ret;
}
ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd);
+ // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways
+ if (ret)
+ ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd);
+
return (ret) ? ret : fd;
}
return bo->format;
}
-uint32_t drv_resolve_format(struct driver *drv, uint32_t format)
+uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
if (drv->backend->resolve_format)
- return drv->backend->resolve_format(format);
+ return drv->backend->resolve_format(format, use_flags);
return format;
}
-size_t drv_num_planes_from_format(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_ABGR4444:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_AYUV:
- case DRM_FORMAT_BGR233:
- case DRM_FORMAT_BGR565:
- case DRM_FORMAT_BGR888:
- case DRM_FORMAT_BGRA1010102:
- case DRM_FORMAT_BGRA4444:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRX4444:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRX8888:
- case DRM_FORMAT_C8:
- case DRM_FORMAT_GR88:
- case DRM_FORMAT_R8:
- case DRM_FORMAT_RG88:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_RGBA4444:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_RGBX4444:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR4444:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- return 1;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- return 3;
- }
-
- fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
- return 0;
-}
-
-uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
-{
- assert(plane < drv_num_planes_from_format(format));
- uint32_t vertical_subsampling;
-
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- vertical_subsampling = (plane == 0) ? 1 : 2;
- break;
- default:
- vertical_subsampling = 1;
- }
-
- return stride * DIV_ROUND_UP(height, vertical_subsampling);
-}
-
uint32_t drv_num_buffers_per_bo(struct bo *bo)
{
uint32_t count = 0;
return count;
}
+
+void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...)
+{
+ char buf[50];
+ snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line);
+
+ va_list args;
+ va_start(args, format);
+#ifdef __ANDROID__
+ __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args);
+#else
+ fprintf(stderr, "%s ", buf);
+ vfprintf(stderr, format, args);
+#endif
+ va_end(args);
+}
#define BO_USE_EXTERNAL_DISP (1ull << 10)
#define BO_USE_PROTECTED (1ull << 11)
#define BO_USE_HW_VIDEO_ENCODER (1ull << 12)
-#define BO_USE_HW_CAMERA_WRITE (1ull << 13)
-#define BO_USE_HW_CAMERA_READ (1ull << 14)
-#define BO_USE_HW_CAMERA_ZSL (1ull << 15)
+#define BO_USE_CAMERA_WRITE (1ull << 13)
+#define BO_USE_CAMERA_READ (1ull << 14)
#define BO_USE_RENDERSCRIPT (1ull << 16)
#define BO_USE_TEXTURE (1ull << 17)
+#define BO_USE_HW_VIDEO_DECODER (1ull << 18)
+
+
+/* Map flags */
+#define BO_MAP_NONE 0
+#define BO_MAP_READ (1 << 0)
+#define BO_MAP_WRITE (1 << 1)
+#define BO_MAP_READ_WRITE (BO_MAP_READ | BO_MAP_WRITE)
/* This is our extension to <drm_fourcc.h>. We need to make sure we don't step
* on the namespace of already defined formats, which can be done by using invalid
int fds[DRV_MAX_PLANES];
uint32_t strides[DRV_MAX_PLANES];
uint32_t offsets[DRV_MAX_PLANES];
- uint32_t sizes[DRV_MAX_PLANES];
uint64_t format_modifiers[DRV_MAX_PLANES];
uint32_t width;
uint32_t height;
uint32_t format;
+ uint64_t use_flags;
};
-struct map_info {
+struct vma {
void *addr;
size_t length;
uint32_t handle;
+ uint32_t map_flags;
int32_t refcount;
+ uint32_t map_strides[DRV_MAX_PLANES];
void *priv;
};
+struct rectangle {
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+};
+
+struct mapping {
+ struct vma *vma;
+ struct rectangle rect;
+ uint32_t refcount;
+};
+
struct driver *drv_create(int fd);
void drv_destroy(struct driver *drv);
const char *drv_get_name(struct driver *drv);
-struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t usage);
+struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags);
-struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format);
+struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags);
struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format,
- uint64_t flags);
+ uint64_t use_flags);
struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint32_t height,
uint32_t format, const uint64_t *modifiers, uint32_t count);
struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data);
-void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, struct map_info **map_data, size_t plane);
+void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags,
+ struct mapping **map_data, size_t plane);
+
+int drv_bo_unmap(struct bo *bo, struct mapping *mapping);
-int drv_bo_unmap(struct bo *bo, struct map_info *map_data);
+int drv_bo_invalidate(struct bo *bo, struct mapping *mapping);
+
+int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping);
uint32_t drv_bo_get_width(struct bo *bo);
uint32_t drv_bo_get_format(struct bo *bo);
-uint32_t drv_bo_get_stride_in_pixels(struct bo *bo);
+uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane);
-uint32_t drv_resolve_format(struct driver *drv, uint32_t format);
+uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane);
-size_t drv_num_planes_from_format(uint32_t format);
+uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags);
-uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
+size_t drv_num_planes_from_format(uint32_t format);
uint32_t drv_num_buffers_per_bo(struct bo *bo);
+#define drv_log(format, ...) \
+ do { \
+ drv_log_prefix("minigbm", __FILE__, __LINE__, format, ##__VA_ARGS__); \
+ } while (0)
+
+__attribute__((format(printf, 4, 5))) void drv_log_prefix(const char *prefix, const char *file,
+ int line, const char *format, ...);
+
#ifdef __cplusplus
}
#endif
uint32_t sizes[DRV_MAX_PLANES];
uint32_t strides[DRV_MAX_PLANES];
uint64_t format_modifiers[DRV_MAX_PLANES];
+ uint64_t use_flags;
size_t total_size;
void *priv;
};
-struct driver {
- int fd;
- struct backend *backend;
- void *priv;
- void *buffer_table;
- void *map_table;
- pthread_mutex_t driver_lock;
-};
-
struct kms_item {
uint32_t format;
uint64_t modifier;
- uint64_t usage;
+ uint64_t use_flags;
};
struct format_metadata {
struct combination {
uint32_t format;
struct format_metadata metadata;
- uint64_t usage;
+ uint64_t use_flags;
};
-struct combinations {
- struct combination *data;
- uint32_t size;
- uint32_t allocations;
+struct driver {
+ int fd;
+ const struct backend *backend;
+ void *priv;
+ void *buffer_table;
+ struct drv_array *mappings;
+ struct drv_array *combos;
+ pthread_mutex_t driver_lock;
};
struct backend {
int (*init)(struct driver *drv);
void (*close)(struct driver *drv);
int (*bo_create)(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags);
+ uint64_t use_flags);
int (*bo_create_with_modifiers)(struct bo *bo, uint32_t width, uint32_t height,
uint32_t format, const uint64_t *modifiers, uint32_t count);
int (*bo_destroy)(struct bo *bo);
int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data);
- void *(*bo_map)(struct bo *bo, struct map_info *data, size_t plane);
- int (*bo_unmap)(struct bo *bo, struct map_info *data);
- uint32_t (*resolve_format)(uint32_t format);
- struct combinations combos;
+ void *(*bo_map)(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
+ int (*bo_unmap)(struct bo *bo, struct vma *vma);
+ int (*bo_invalidate)(struct bo *bo, struct mapping *mapping);
+ int (*bo_flush)(struct bo *bo, struct mapping *mapping);
+ uint32_t (*resolve_format)(uint32_t format, uint64_t use_flags);
};
// clang-format off
-#define BO_USE_RENDER_MASK BO_USE_LINEAR | BO_USE_RENDERING | BO_USE_SW_READ_OFTEN | \
- BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_RARELY | \
- BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+#define BO_USE_RENDER_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \
+ BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+
+#define BO_USE_TEXTURE_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \
+ BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
-#define BO_USE_TEXTURE_MASK BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
- BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE
+#define BO_USE_SW BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \
+ BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY
+
+#define BO_USE_SW_OFTEN BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN
+
+#define BO_USE_SW_RARELY BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY
+
+#ifndef DRM_FORMAT_MOD_LINEAR
+#define DRM_FORMAT_MOD_LINEAR DRM_FORMAT_MOD_NONE
+#endif
-#define LINEAR_METADATA (struct format_metadata) { 0, 1, DRM_FORMAT_MOD_NONE }
+#define LINEAR_METADATA (struct format_metadata) { 1, 0, DRM_FORMAT_MOD_LINEAR }
// clang-format on
#endif
static int evdi_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
return drv_modify_linear_combinations(drv);
}
-struct backend backend_evdi = {
+const struct backend backend_evdi = {
.name = "evdi",
.init = evdi_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
static int exynos_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
return drv_modify_linear_combinations(drv);
}
static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
size_t plane;
bo->total_size = bo->sizes[0] = height * bo->strides[0];
bo->offsets[0] = 0;
} else {
- fprintf(stderr, "drv: unsupported format %X\n", format);
+ drv_log("unsupported format %X\n", format);
assert(0);
return -EINVAL;
}
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_EXYNOS_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n",
- size);
+ drv_log("DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n", size);
goto cleanup_planes;
}
gem_close.handle = bo->handles[plane - 1].u32;
int gem_close_ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (gem_close_ret) {
- fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret);
+ drv_log("DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret);
}
}
* Use dumb mapping with exynos even though a GEM buffer is created.
* libdrm does the same thing in exynos_drm.c
*/
-struct backend backend_exynos = {
+const struct backend backend_exynos = {
.name = "exynos",
.init = exynos_init,
.bo_create = exynos_bo_create,
.bo_destroy = drv_gem_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/mman.h>
#include <xf86drm.h>
#include "drv.h"
PUBLIC int gbm_device_is_format_supported(struct gbm_device *gbm, uint32_t format, uint32_t usage)
{
- uint64_t drv_usage;
+ uint64_t use_flags;
if (usage & GBM_BO_USE_CURSOR && usage & GBM_BO_USE_RENDERING)
return 0;
- drv_usage = gbm_convert_flags(usage);
+ use_flags = gbm_convert_usage(usage);
- return (drv_get_combination(gbm->drv, format, drv_usage) != NULL);
+ return (drv_get_combination(gbm->drv, format, use_flags) != NULL);
}
PUBLIC struct gbm_device *gbm_create_device(int fd)
}
PUBLIC struct gbm_surface *gbm_surface_create(struct gbm_device *gbm, uint32_t width,
- uint32_t height, uint32_t format, uint32_t flags)
+ uint32_t height, uint32_t format, uint32_t usage)
{
struct gbm_surface *surface = (struct gbm_surface *)malloc(sizeof(*surface));
}
PUBLIC struct gbm_bo *gbm_bo_create(struct gbm_device *gbm, uint32_t width, uint32_t height,
- uint32_t format, uint32_t flags)
+ uint32_t format, uint32_t usage)
{
struct gbm_bo *bo;
- if (!gbm_device_is_format_supported(gbm, format, flags))
+ if (!gbm_device_is_format_supported(gbm, format, usage))
return NULL;
bo = gbm_bo_new(gbm, format);
if (!bo)
return NULL;
- bo->bo = drv_bo_create(gbm->drv, width, height, format, gbm_convert_flags(flags));
+ bo->bo = drv_bo_create(gbm->drv, width, height, format, gbm_convert_usage(usage));
if (!bo->bo) {
free(bo);
size_t num_planes, i;
memset(&drv_data, 0, sizeof(drv_data));
-
+ drv_data.use_flags = gbm_convert_usage(usage);
switch (type) {
case GBM_BO_IMPORT_FD:
gbm_format = fd_data->format;
drv_data.format = fd_data->format;
drv_data.fds[0] = fd_data->fd;
drv_data.strides[0] = fd_data->stride;
- drv_data.sizes[0] = fd_data->height * fd_data->stride;
break;
case GBM_BO_IMPORT_FD_PLANAR:
gbm_format = fd_planar_data->format;
drv_data.offsets[i] = fd_planar_data->offsets[i];
drv_data.strides[i] = fd_planar_data->strides[i];
drv_data.format_modifiers[i] = fd_planar_data->format_modifiers[i];
-
- drv_data.sizes[i] = drv_size_from_format(
- drv_data.format, drv_data.strides[i], drv_data.height, i);
}
for (i = num_planes; i < GBM_MAX_PLANES; i++)
}
PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height,
- uint32_t flags, uint32_t *stride, void **map_data, size_t plane)
+ uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane)
{
+ void *addr;
+ off_t offset;
+ uint32_t map_flags;
+ struct rectangle rect = { .x = x, .y = y, .width = width, .height = height };
if (!bo || width == 0 || height == 0 || !stride || !map_data)
return NULL;
- *stride = gbm_bo_get_plane_stride(bo, plane);
- return drv_bo_map(bo->bo, x, y, width, height, 0, (struct map_info **)map_data, plane);
+ map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE;
+ map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE;
+
+ addr = drv_bo_map(bo->bo, &rect, map_flags, (struct mapping **)map_data, plane);
+ if (addr == MAP_FAILED)
+ return MAP_FAILED;
+
+ *stride = ((struct mapping *)*map_data)->vma->map_strides[plane];
+
+ offset = *stride * rect.y;
+ offset += drv_stride_from_format(bo->gbm_format, rect.x, plane);
+ return (void *)((uint8_t *)addr + offset);
}
PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data)
{
assert(bo);
- drv_bo_unmap(bo->bo, map_data);
+ drv_bo_flush_or_unmap(bo->bo, map_data);
}
PUBLIC uint32_t gbm_bo_get_width(struct gbm_bo *bo)
* Buffer is guaranteed to be laid out linearly in memory. That is, the
* buffer is laid out as an array with 'height' blocks, each block with
* length 'stride'. Each stride is in the same order as the rows of the
- * buffer.
+ * buffer. This is intended to be used with buffers that will be accessed
+ * via dma-buf mmap().
*/
GBM_BO_USE_LINEAR = (1 << 4),
+ /**
+ * The buffer will be used as a texture that will be sampled from.
+ */
+ GBM_BO_USE_TEXTURING = (1 << 5),
+ /**
+ * The buffer will be written to by a camera subsystem.
+ */
+ GBM_BO_USE_CAMERA_WRITE = (1 << 6),
+ /**
+ * The buffer will be read from by a camera subsystem.
+ */
+ GBM_BO_USE_CAMERA_READ = (1 << 7),
+ /**
+ * Buffer inaccessible to unprivileged users.
+ */
+ GBM_BO_USE_PROTECTED = (1 << 8),
+ /**
+ * These flags specify the frequency of software access. These flags do not
+ * guarantee the buffer is linear, but do guarantee gbm_bo_map(..) will
+ * present a linear view.
+ */
+ GBM_BO_USE_SW_READ_OFTEN = (1 << 9),
+ GBM_BO_USE_SW_READ_RARELY = (1 << 10),
+ GBM_BO_USE_SW_WRITE_OFTEN = (1 << 11),
+ GBM_BO_USE_SW_WRITE_RARELY = (1 << 12),
+ /**
+ * The buffer will be written by a video decode accelerator.
+ */
+ GBM_BO_USE_HW_VIDEO_DECODER = (1 << 13),
};
int
#include "drv.h"
#include "gbm.h"
-uint64_t gbm_convert_flags(uint32_t flags)
+uint64_t gbm_convert_usage(uint32_t usage)
{
- uint64_t usage = BO_USE_NONE;
+ uint64_t use_flags = BO_USE_NONE;
- if (flags & GBM_BO_USE_SCANOUT)
- usage |= BO_USE_SCANOUT;
- if (flags & GBM_BO_USE_CURSOR)
- usage |= BO_USE_CURSOR;
- if (flags & GBM_BO_USE_CURSOR_64X64)
- usage |= BO_USE_CURSOR_64X64;
- if (flags & GBM_BO_USE_RENDERING)
- usage |= BO_USE_RENDERING;
- if (flags & GBM_BO_USE_LINEAR)
- usage |= BO_USE_LINEAR;
+ if (usage & GBM_BO_USE_SCANOUT)
+ use_flags |= BO_USE_SCANOUT;
+ if (usage & GBM_BO_USE_CURSOR)
+ use_flags |= BO_USE_CURSOR;
+ if (usage & GBM_BO_USE_CURSOR_64X64)
+ use_flags |= BO_USE_CURSOR_64X64;
+ if (usage & GBM_BO_USE_RENDERING)
+ use_flags |= BO_USE_RENDERING;
+ if (usage & GBM_BO_USE_TEXTURING)
+ use_flags |= BO_USE_TEXTURE;
+ if (usage & GBM_BO_USE_LINEAR)
+ use_flags |= BO_USE_LINEAR;
+ if (usage & GBM_BO_USE_CAMERA_WRITE)
+ use_flags |= BO_USE_CAMERA_WRITE;
+ if (usage & GBM_BO_USE_CAMERA_READ)
+ use_flags |= BO_USE_CAMERA_READ;
+ if (usage & GBM_BO_USE_PROTECTED)
+ use_flags |= BO_USE_PROTECTED;
+ if (usage & GBM_BO_USE_SW_READ_OFTEN)
+ use_flags |= BO_USE_SW_READ_OFTEN;
+ if (usage & GBM_BO_USE_SW_READ_RARELY)
+ use_flags |= BO_USE_SW_READ_RARELY;
+ if (usage & GBM_BO_USE_SW_WRITE_OFTEN)
+ use_flags |= BO_USE_SW_WRITE_OFTEN;
+ if (usage & GBM_BO_USE_SW_WRITE_RARELY)
+ use_flags |= BO_USE_SW_WRITE_RARELY;
+ if (usage & GBM_BO_USE_HW_VIDEO_DECODER)
+ use_flags |= BO_USE_HW_VIDEO_DECODER;
- return usage;
+ return use_flags;
}
#ifndef GBM_HELPERS_H
#define GBM_HELPERS_H
-uint64_t gbm_convert_flags(uint32_t flags);
+uint64_t gbm_convert_usage(uint32_t usage);
#endif
+++ /dev/null
-/*
- * Copyright 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#include "drv_priv.h"
-#include "helpers.h"
-#include "util.h"
-
-static const uint32_t render_target_formats[] = { DRM_FORMAT_RGBX8888 };
-
-static int gma500_init(struct driver *drv)
-{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
-
- return drv_modify_linear_combinations(drv);
-}
-
-struct backend backend_gma500 = {
- .name = "gma500",
- .init = gma500_init,
- .bo_create = drv_dumb_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
- .bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
-};
#include "helpers.h"
#include "util.h"
-static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
-{
-
- if (plane != 0) {
- switch (format) {
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU420_ANDROID:
- stride = DIV_ROUND_UP(stride, 2);
- break;
- }
- }
-
- return stride;
-}
-
-static uint32_t bpp_from_format(uint32_t format, size_t plane)
+struct planar_layout {
+ size_t num_planes;
+ int horizontal_subsampling[DRV_MAX_PLANES];
+ int vertical_subsampling[DRV_MAX_PLANES];
+ int bytes_per_pixel[DRV_MAX_PLANES];
+};
+
+// clang-format off
+
+static const struct planar_layout packed_1bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 1 }
+};
+
+static const struct planar_layout packed_2bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 2 }
+};
+
+static const struct planar_layout packed_3bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 3 }
+};
+
+static const struct planar_layout packed_4bpp_layout = {
+ .num_planes = 1,
+ .horizontal_subsampling = { 1 },
+ .vertical_subsampling = { 1 },
+ .bytes_per_pixel = { 4 }
+};
+
+static const struct planar_layout biplanar_yuv_420_layout = {
+ .num_planes = 2,
+ .horizontal_subsampling = { 1, 2 },
+ .vertical_subsampling = { 1, 2 },
+ .bytes_per_pixel = { 1, 2 }
+};
+
+static const struct planar_layout triplanar_yuv_420_layout = {
+ .num_planes = 3,
+ .horizontal_subsampling = { 1, 2, 2 },
+ .vertical_subsampling = { 1, 2, 2 },
+ .bytes_per_pixel = { 1, 1, 1 }
+};
+
+// clang-format on
+
+static const struct planar_layout *layout_from_format(uint32_t format)
{
- assert(plane < drv_num_planes_from_format(format));
-
switch (format) {
case DRM_FORMAT_BGR233:
case DRM_FORMAT_C8:
case DRM_FORMAT_R8:
case DRM_FORMAT_RGB332:
+ return &packed_1bpp_layout;
+
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU420_ANDROID:
- return 8;
+ return &triplanar_yuv_420_layout;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
- return (plane == 0) ? 8 : 4;
+ return &biplanar_yuv_420_layout;
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
- return 16;
+ return &packed_2bpp_layout;
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
- return 24;
+ return &packed_3bpp_layout;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XRGB8888:
- return 32;
+ return &packed_4bpp_layout;
+
+ default:
+ drv_log("UNKNOWN FORMAT %d\n", format);
+ return NULL;
}
+}
- fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format);
- return 0;
+size_t drv_num_planes_from_format(uint32_t format)
+{
+ const struct planar_layout *layout = layout_from_format(format);
+
+ /*
+ * drv_bo_new calls this function early to query number of planes and
+ * considers 0 planes to mean unknown format, so we have to support
+ * that. All other layout_from_format() queries can assume that the
+ * format is supported and that the return value is non-NULL.
+ */
+
+ return layout ? layout->num_planes : 0;
}
-uint32_t drv_bo_get_stride_in_pixels(struct bo *bo)
+uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane)
{
- uint32_t bytes_per_pixel = DIV_ROUND_UP(bpp_from_format(bo->format, 0), 8);
- return DIV_ROUND_UP(bo->strides[0], bytes_per_pixel);
+ const struct planar_layout *layout = layout_from_format(format);
+
+ assert(plane < layout->num_planes);
+
+ return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]);
+}
+
+uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane)
+{
+ const struct planar_layout *layout = layout_from_format(format);
+
+ assert(plane < layout->num_planes);
+
+ return layout->bytes_per_pixel[plane];
}
/*
*/
uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane)
{
- uint32_t stride = DIV_ROUND_UP(width * bpp_from_format(format, plane), 8);
+ const struct planar_layout *layout = layout_from_format(format);
+ assert(plane < layout->num_planes);
+
+ uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]);
+ uint32_t stride = plane_width * layout->bytes_per_pixel[plane];
/*
* The stride of Android YV12 buffers is required to be aligned to 16 bytes
return stride;
}
+uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane)
+{
+ return stride * drv_height_from_format(format, height, plane);
+}
+
+static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane)
+{
+ if (plane != 0) {
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ stride = DIV_ROUND_UP(stride, 2);
+ break;
+ }
+ }
+
+ return stride;
+}
+
/*
* This function fills in the buffer object given the driver aligned stride of
* the first plane, height and a format. This function assumes there is just
num_planes = drv_num_planes_from_format(format);
assert(num_planes);
- bo->total_size = 0;
+
+ /*
+ * HAL_PIXEL_FORMAT_YV12 requires that (see <system/graphics.h>):
+ * - the aligned height is same as the buffer's height.
+ * - the chroma stride is 16 bytes aligned, i.e., the luma's strides
+ * is 32 bytes aligned.
+ */
+ if (format == DRM_FORMAT_YVU420_ANDROID) {
+ assert(aligned_height == bo->height);
+ assert(stride == ALIGN(stride, 32));
+ }
for (p = 0; p < num_planes; p++) {
bo->strides[p] = subsample_stride(stride, format, p);
- bo->sizes[p] = drv_size_from_format(format, bo->strides[p], bo->height, p);
+ bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p);
bo->offsets[p] = offset;
offset += bo->sizes[p];
- bo->total_size += drv_size_from_format(format, bo->strides[p], aligned_height, p);
}
+ bo->total_size = offset;
return 0;
}
int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
int ret;
size_t plane;
* Android requires.
*/
aligned_width = ALIGN(width, 32);
+ }
+
+ if (format == DRM_FORMAT_YVU420_ANDROID || format == DRM_FORMAT_YVU420) {
aligned_height = 3 * DIV_ROUND_UP(height, 2);
}
memset(&create_dumb, 0, sizeof(create_dumb));
create_dumb.height = aligned_height;
create_dumb.width = aligned_width;
- create_dumb.bpp = bpp_from_format(format, 0);
+ create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8;
create_dumb.flags = 0;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_CREATE_DUMB failed\n");
+ drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno);
return ret;
}
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n",
- bo->handles[0].u32);
+ drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32);
return ret;
}
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
+ drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
bo->handles[plane].u32, ret);
error = ret;
}
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n",
- prime_handle.fd);
+ drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd);
/*
* Need to call GEM close on planes that were opened,
return 0;
}
-void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
size_t i;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MODE_MAP_DUMB failed \n");
+ drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n");
return MAP_FAILED;
}
for (i = 0; i < bo->num_planes; i++)
if (bo->handles[i].u32 == bo->handles[plane].u32)
- data->length += bo->sizes[i];
+ vma->length += bo->sizes[i];
- return mmap(0, data->length, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
map_dumb.offset);
}
+int drv_bo_munmap(struct bo *bo, struct vma *vma)
+{
+ return munmap(vma->addr, vma->length);
+}
+
+int drv_mapping_destroy(struct bo *bo)
+{
+ int ret;
+ size_t plane;
+ struct mapping *mapping;
+ uint32_t idx;
+
+ /*
+ * This function is called right before the buffer is destroyed. It will free any mappings
+ * associated with the buffer.
+ */
+
+ idx = 0;
+ for (plane = 0; plane < bo->num_planes; plane++) {
+ while (idx < drv_array_size(bo->drv->mappings)) {
+ mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx);
+ if (mapping->vma->handle != bo->handles[plane].u32) {
+ idx++;
+ continue;
+ }
+
+ if (!--mapping->vma->refcount) {
+ ret = bo->drv->backend->bo_unmap(bo, mapping->vma);
+ if (ret) {
+ drv_log("munmap failed\n");
+ return ret;
+ }
+
+ free(mapping->vma);
+ }
+
+ /* This shrinks and shifts the array, so don't increment idx. */
+ drv_array_remove(bo->drv->mappings, idx);
+ }
+ }
+
+ return 0;
+}
+
+int drv_get_prot(uint32_t map_flags)
+{
+ return (BO_MAP_WRITE & map_flags) ? PROT_WRITE | PROT_READ : PROT_READ;
+}
+
uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane)
{
void *count;
return ret;
}
-int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
- uint64_t usage)
+void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
+ struct format_metadata *metadata, uint64_t use_flags)
{
- struct combinations *combos = &drv->backend->combos;
- if (combos->size >= combos->allocations) {
- struct combination *new_data;
- combos->allocations *= 2;
- new_data = realloc(combos->data, combos->allocations * sizeof(*combos->data));
- if (!new_data)
- return -ENOMEM;
-
- combos->data = new_data;
- }
-
- combos->data[combos->size].format = format;
- combos->data[combos->size].metadata.priority = metadata->priority;
- combos->data[combos->size].metadata.tiling = metadata->tiling;
- combos->data[combos->size].metadata.modifier = metadata->modifier;
- combos->data[combos->size].usage = usage;
- combos->size++;
- return 0;
-}
-
-int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
- struct format_metadata *metadata, uint64_t usage)
-{
- int ret;
uint32_t i;
+
for (i = 0; i < num_formats; i++) {
- ret = drv_add_combination(drv, formats[i], metadata, usage);
- if (ret)
- return ret;
- }
+ struct combination combo = { .format = formats[i],
+ .metadata = *metadata,
+ .use_flags = use_flags };
- return 0;
+ drv_array_append(drv->combos, &combo);
+ }
}
void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
- uint64_t usage)
+ uint64_t use_flags)
{
uint32_t i;
struct combination *combo;
- /* Attempts to add the specified usage to an existing combination. */
- for (i = 0; i < drv->backend->combos.size; i++) {
- combo = &drv->backend->combos.data[i];
+ /* Attempts to add the specified flags to an existing combination. */
+ for (i = 0; i < drv_array_size(drv->combos); i++) {
+ combo = (struct combination *)drv_array_at_idx(drv->combos, i);
if (combo->format == format && combo->metadata.tiling == metadata->tiling &&
combo->metadata.modifier == metadata->modifier)
- combo->usage |= usage;
+ combo->use_flags |= use_flags;
}
}
-struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items)
+struct drv_array *drv_query_kms(struct driver *drv)
{
- uint64_t flag, usage;
- struct kms_item *items;
- uint32_t i, j, k, allocations, item_size;
+ struct drv_array *kms_items;
+ uint64_t plane_type, use_flag;
+ uint32_t i, j, k;
drmModePlanePtr plane;
drmModePropertyPtr prop;
drmModePlaneResPtr resources;
drmModeObjectPropertiesPtr props;
- /* Start with a power of 2 number of allocations. */
- allocations = 2;
- item_size = 0;
- items = calloc(allocations, sizeof(*items));
- if (!items)
+ kms_items = drv_array_init(sizeof(struct kms_item));
+ if (!kms_items)
goto out;
/*
prop = drmModeGetProperty(drv->fd, props->props[j]);
if (prop) {
if (strcmp(prop->name, "type") == 0) {
- flag = props->prop_values[j];
+ plane_type = props->prop_values[j];
}
drmModeFreeProperty(prop);
}
}
- switch (flag) {
+ switch (plane_type) {
case DRM_PLANE_TYPE_OVERLAY:
case DRM_PLANE_TYPE_PRIMARY:
- usage = BO_USE_SCANOUT;
+ use_flag = BO_USE_SCANOUT;
break;
case DRM_PLANE_TYPE_CURSOR:
- usage = BO_USE_CURSOR;
+ use_flag = BO_USE_CURSOR;
break;
default:
assert(0);
for (j = 0; j < plane->count_formats; j++) {
bool found = false;
- for (k = 0; k < item_size; k++) {
- if (items[k].format == plane->formats[j] &&
- items[k].modifier == DRM_FORMAT_MOD_NONE) {
- items[k].usage |= usage;
+ for (k = 0; k < drv_array_size(kms_items); k++) {
+ struct kms_item *item = drv_array_at_idx(kms_items, k);
+ if (item->format == plane->formats[j] &&
+ item->modifier == DRM_FORMAT_MOD_LINEAR) {
+ item->use_flags |= use_flag;
found = true;
break;
}
}
- if (!found && item_size >= allocations) {
- struct kms_item *new_data = NULL;
- allocations *= 2;
- new_data = realloc(items, allocations * sizeof(*items));
- if (!new_data) {
- item_size = 0;
- goto out;
- }
-
- items = new_data;
- }
-
if (!found) {
- items[item_size].format = plane->formats[j];
- items[item_size].modifier = DRM_FORMAT_MOD_NONE;
- items[item_size].usage = usage;
- item_size++;
+ struct kms_item item = { .format = plane->formats[j],
+ .modifier = DRM_FORMAT_MOD_LINEAR,
+ .use_flags = use_flag };
+
+ drv_array_append(kms_items, &item);
}
}
drmModeFreePlaneResources(resources);
out:
- if (items && item_size == 0) {
- free(items);
- items = NULL;
+ if (kms_items && !drv_array_size(kms_items)) {
+ drv_array_destroy(kms_items);
+ return NULL;
}
- *num_items = item_size;
- return items;
+ return kms_items;
}
int drv_modify_linear_combinations(struct driver *drv)
{
- uint32_t i, j, num_items;
- struct kms_item *items;
+ uint32_t i, j;
+ struct kms_item *item;
struct combination *combo;
+ struct drv_array *kms_items;
/*
* All current drivers can scanout linear XRGB8888/ARGB8888 as a primary
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
BO_USE_CURSOR | BO_USE_SCANOUT);
- items = drv_query_kms(drv, &num_items);
- if (!items || !num_items)
+ kms_items = drv_query_kms(drv);
+ if (!kms_items)
return 0;
- for (i = 0; i < num_items; i++) {
- for (j = 0; j < drv->backend->combos.size; j++) {
- combo = &drv->backend->combos.data[j];
- if (items[i].format == combo->format)
- combo->usage |= BO_USE_SCANOUT;
+ for (i = 0; i < drv_array_size(kms_items); i++) {
+ item = (struct kms_item *)drv_array_at_idx(kms_items, i);
+ for (j = 0; j < drv_array_size(drv->combos); j++) {
+ combo = drv_array_at_idx(drv->combos, j);
+ if (item->format == combo->format)
+ combo->use_flags |= BO_USE_SCANOUT;
}
}
- free(items);
+ drv_array_destroy(kms_items);
return 0;
}
+
+/*
+ * Pick the best modifier from modifiers, according to the ordering
+ * given by modifier_order.
+ */
+uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
+ const uint64_t *modifier_order, uint32_t order_count)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < order_count; i++) {
+ for (j = 0; j < count; j++) {
+ if (modifiers[j] == modifier_order[i]) {
+ return modifiers[j];
+ }
+ }
+ }
+
+ return DRM_FORMAT_MOD_LINEAR;
+}
#define HELPERS_H
#include "drv.h"
+#include "helpers_array.h"
-uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane);
+uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane);
+uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane);
int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format);
int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags);
+ uint64_t use_flags);
int drv_dumb_bo_destroy(struct bo *bo);
int drv_gem_bo_destroy(struct bo *bo);
int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data);
-void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane);
+void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags);
+int drv_bo_munmap(struct bo *bo, struct vma *vma);
+int drv_mapping_destroy(struct bo *bo);
+int drv_get_prot(uint32_t map_flags);
uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane);
void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane);
void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane);
uint32_t drv_log_base2(uint32_t value);
int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
uint64_t usage);
-int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
- struct format_metadata *metadata, uint64_t usage);
+void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats,
+ struct format_metadata *metadata, uint64_t usage);
void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata,
uint64_t usage);
-struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items);
+struct drv_array *drv_query_kms(struct driver *drv);
int drv_modify_linear_combinations(struct driver *drv);
+uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count,
+ const uint64_t *modifier_order, uint32_t order_count);
#endif
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "util.h"
+
+struct drv_array {
+ void **items;
+ uint32_t size;
+ uint32_t item_size;
+ uint32_t allocations;
+};
+
+struct drv_array *drv_array_init(uint32_t item_size)
+{
+ struct drv_array *array;
+
+ array = calloc(1, sizeof(*array));
+
+ /* Start with a power of 2 number of allocations. */
+ array->allocations = 2;
+ array->items = calloc(array->allocations, sizeof(*array->items));
+ array->item_size = item_size;
+ return array;
+}
+
+void *drv_array_append(struct drv_array *array, void *data)
+{
+ void *item;
+
+ if (array->size >= array->allocations) {
+ void **new_items = NULL;
+ array->allocations *= 2;
+ new_items = realloc(array->items, array->allocations * sizeof(*array->items));
+ assert(new_items);
+ array->items = new_items;
+ }
+
+ item = calloc(1, array->item_size);
+ memcpy(item, data, array->item_size);
+ array->items[array->size] = item;
+ array->size++;
+ return item;
+}
+
+void drv_array_remove(struct drv_array *array, uint32_t idx)
+{
+ uint32_t i;
+
+ assert(array);
+ assert(idx < array->size);
+
+ free(array->items[idx]);
+ array->items[idx] = NULL;
+
+ for (i = idx + 1; i < array->size; i++)
+ array->items[i - 1] = array->items[i];
+
+ array->size--;
+ if ((DIV_ROUND_UP(array->allocations, 2) > array->size) && array->allocations > 2) {
+ void **new_items = NULL;
+ array->allocations = DIV_ROUND_UP(array->allocations, 2);
+ new_items = realloc(array->items, array->allocations * sizeof(*array->items));
+ assert(new_items);
+ array->items = new_items;
+ }
+}
+
+void *drv_array_at_idx(struct drv_array *array, uint32_t idx)
+{
+ assert(idx < array->size);
+ return array->items[idx];
+}
+
+uint32_t drv_array_size(struct drv_array *array)
+{
+ return array->size;
+}
+
+void drv_array_destroy(struct drv_array *array)
+{
+ uint32_t i;
+
+ for (i = 0; i < array->size; i++)
+ free(array->items[i]);
+
+ free(array->items);
+ free(array);
+}
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+struct drv_array;
+
+struct drv_array *drv_array_init(uint32_t item_size);
+
+/* The data will be copied and appended to the array. */
+void *drv_array_append(struct drv_array *array, void *data);
+
+/* The data at the specified index will be freed -- the array will shrink. */
+void drv_array_remove(struct drv_array *array, uint32_t idx);
+
+void *drv_array_at_idx(struct drv_array *array, uint32_t idx);
+
+uint32_t drv_array_size(struct drv_array *array);
+
+/* The array and all associated data will be freed. */
+void drv_array_destroy(struct drv_array *array);
#ifdef DRV_I915
+#include <assert.h>
#include <errno.h>
#include <i915_drm.h>
+#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#define I915_CACHELINE_SIZE 64
#define I915_CACHELINE_MASK (I915_CACHELINE_SIZE - 1)
-static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
- DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XRGB8888 };
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XRGB2101010, DRM_FORMAT_XRGB8888 };
static const uint32_t tileable_texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8,
DRM_FORMAT_UYVY, DRM_FORMAT_YUYV };
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID,
+ DRM_FORMAT_NV12 };
struct i915_device {
uint32_t gen;
return 4;
}
+/*
+ * We allow allocation of ARGB formats for SCANOUT if the corresponding XRGB
+ * formats supports it. It's up to the caller (chrome ozone) to ultimately not
+ * scan out ARGB if the display controller only supports XRGB, but we'll allow
+ * the allocation of the bo here.
+ */
+static bool format_compatible(const struct combination *combo, uint32_t format)
+{
+ if (combo->format == format)
+ return true;
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ return combo->format == DRM_FORMAT_ARGB8888;
+ case DRM_FORMAT_XBGR8888:
+ return combo->format == DRM_FORMAT_ABGR8888;
+ case DRM_FORMAT_RGBX8888:
+ return combo->format == DRM_FORMAT_RGBA8888;
+ case DRM_FORMAT_BGRX8888:
+ return combo->format == DRM_FORMAT_BGRA8888;
+ default:
+ return false;
+ }
+}
+
static int i915_add_kms_item(struct driver *drv, const struct kms_item *item)
{
uint32_t i;
* Older hardware can't scanout Y-tiled formats. Newer devices can, and
* report this functionality via format modifiers.
*/
- for (i = 0; i < drv->backend->combos.size; i++) {
- combo = &drv->backend->combos.data[i];
- if (combo->format == item->format) {
- if ((combo->metadata.tiling == I915_TILING_Y &&
- item->modifier == I915_FORMAT_MOD_Y_TILED) ||
- (combo->metadata.tiling == I915_TILING_X &&
- item->modifier == I915_FORMAT_MOD_X_TILED)) {
- combo->metadata.modifier = item->modifier;
- combo->usage |= item->usage;
- } else if (combo->metadata.tiling != I915_TILING_Y) {
- combo->usage |= item->usage;
- }
+ for (i = 0; i < drv_array_size(drv->combos); i++) {
+ combo = (struct combination *)drv_array_at_idx(drv->combos, i);
+ if (!format_compatible(combo, item->format))
+ continue;
+
+ if (item->modifier == DRM_FORMAT_MOD_LINEAR &&
+ combo->metadata.tiling == I915_TILING_X) {
+ /*
+ * FIXME: drv_query_kms() does not report the available modifiers
+ * yet, but we know that all hardware can scanout from X-tiled
+ * buffers, so let's add this to our combinations, except for
+ * cursor, which must not be tiled.
+ */
+ combo->use_flags |= item->use_flags & ~BO_USE_CURSOR;
}
+
+ /* If we can scanout NV12, we support all tiling modes. */
+ if (item->format == DRM_FORMAT_NV12)
+ combo->use_flags |= item->use_flags;
+
+ if (combo->metadata.modifier == item->modifier)
+ combo->use_flags |= item->use_flags;
}
return 0;
static int i915_add_combinations(struct driver *drv)
{
int ret;
- uint32_t i, num_items;
- struct kms_item *items;
+ uint32_t i;
+ struct drv_array *kms_items;
struct format_metadata metadata;
- uint64_t render_flags, texture_flags;
+ uint64_t render_use_flags, texture_use_flags;
- render_flags = BO_USE_RENDER_MASK;
- texture_flags = BO_USE_TEXTURE_MASK;
+ render_use_flags = BO_USE_RENDER_MASK;
+ texture_use_flags = BO_USE_TEXTURE_MASK;
metadata.tiling = I915_TILING_NONE;
metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, render_use_flags);
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &metadata, texture_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &metadata, texture_use_flags);
- ret = drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(texture_source_formats), &metadata, texture_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, tileable_texture_source_formats,
+ ARRAY_SIZE(tileable_texture_source_formats), &metadata,
+ texture_use_flags);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- render_flags &= ~BO_USE_SW_WRITE_OFTEN;
- render_flags &= ~BO_USE_SW_READ_OFTEN;
- render_flags &= ~BO_USE_LINEAR;
+ /* IPU3 camera ISP supports only NV12 output. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
+ render_use_flags &= ~BO_USE_RENDERSCRIPT;
+ render_use_flags &= ~BO_USE_SW_WRITE_OFTEN;
+ render_use_flags &= ~BO_USE_SW_READ_OFTEN;
+ render_use_flags &= ~BO_USE_LINEAR;
- texture_flags &= ~BO_USE_SW_WRITE_OFTEN;
- texture_flags &= ~BO_USE_SW_READ_OFTEN;
- texture_flags &= ~BO_USE_LINEAR;
+ texture_use_flags &= ~BO_USE_RENDERSCRIPT;
+ texture_use_flags &= ~BO_USE_SW_WRITE_OFTEN;
+ texture_use_flags &= ~BO_USE_SW_READ_OFTEN;
+ texture_use_flags &= ~BO_USE_LINEAR;
metadata.tiling = I915_TILING_X;
metadata.priority = 2;
+ metadata.modifier = I915_FORMAT_MOD_X_TILED;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, render_use_flags);
- ret = drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(tileable_texture_source_formats), &metadata,
- texture_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, tileable_texture_source_formats,
+ ARRAY_SIZE(tileable_texture_source_formats), &metadata,
+ texture_use_flags);
metadata.tiling = I915_TILING_Y;
metadata.priority = 3;
+ metadata.modifier = I915_FORMAT_MOD_Y_TILED;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, render_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, render_use_flags);
- ret = drv_add_combinations(drv, tileable_texture_source_formats,
- ARRAY_SIZE(tileable_texture_source_formats), &metadata,
- texture_flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, tileable_texture_source_formats,
+ ARRAY_SIZE(tileable_texture_source_formats), &metadata,
+ texture_use_flags);
+
+ /* Support y-tiled NV12 for libva */
+ const uint32_t nv12_format = DRM_FORMAT_NV12;
+ drv_add_combinations(drv, &nv12_format, 1, &metadata,
+ BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER);
- items = drv_query_kms(drv, &num_items);
- if (!items || !num_items)
+ kms_items = drv_query_kms(drv);
+ if (!kms_items)
return 0;
- for (i = 0; i < num_items; i++) {
- ret = i915_add_kms_item(drv, &items[i]);
+ for (i = 0; i < drv_array_size(kms_items); i++) {
+ ret = i915_add_kms_item(drv, (struct kms_item *)drv_array_at_idx(kms_items, i));
if (ret) {
- free(items);
+ drv_array_destroy(kms_items);
return ret;
}
}
- free(items);
+ drv_array_destroy(kms_items);
return 0;
}
uint32_t *aligned_height)
{
struct i915_device *i915 = bo->drv->priv;
- uint32_t horizontal_alignment = 4;
- uint32_t vertical_alignment = 4;
+ uint32_t horizontal_alignment;
+ uint32_t vertical_alignment;
switch (tiling) {
default:
case I915_TILING_NONE:
+ /*
+ * The Intel GPU doesn't need any alignment in linear mode,
+ * but libva requires the allocation stride to be aligned to
+ * 16 bytes and height to 4 rows. Further, we round up the
+ * horizontal alignment so that row start on a cache line (64
+ * bytes).
+ */
horizontal_alignment = 64;
+ vertical_alignment = 4;
break;
case I915_TILING_X:
get_param.value = &device_id;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
- fprintf(stderr, "drv: Failed to get I915_PARAM_CHIPSET_ID\n");
+ drv_log("Failed to get I915_PARAM_CHIPSET_ID\n");
free(i915);
return -EINVAL;
}
get_param.value = &i915->has_llc;
ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param);
if (ret) {
- fprintf(stderr, "drv: Failed to get I915_PARAM_HAS_LLC\n");
+ drv_log("Failed to get I915_PARAM_HAS_LLC\n");
free(i915);
return -EINVAL;
}
return i915_add_combinations(drv);
}
-static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format)
+{
+ uint32_t offset;
+ size_t plane;
+ int ret;
+
+ offset = 0;
+ for (plane = 0; plane < drv_num_planes_from_format(format); plane++) {
+ uint32_t stride = drv_stride_from_format(format, width, plane);
+ uint32_t plane_height = drv_height_from_format(format, height, plane);
+
+ if (bo->tiling != I915_TILING_NONE)
+ assert(IS_ALIGNED(offset, 4096));
+
+ ret = i915_align_dimensions(bo, bo->tiling, &stride, &plane_height);
+ if (ret)
+ return ret;
+
+ bo->strides[plane] = stride;
+ bo->sizes[plane] = stride * plane_height;
+ bo->offsets[plane] = offset;
+ offset += bo->sizes[plane];
+ }
+
+ bo->total_size = offset;
+
+ return 0;
+}
+
+static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, uint64_t modifier)
{
int ret;
size_t plane;
- uint32_t stride;
struct drm_i915_gem_create gem_create;
struct drm_i915_gem_set_tiling gem_set_tiling;
- if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
bo->tiling = I915_TILING_NONE;
- else if (flags & BO_USE_SCANOUT)
+ break;
+ case I915_FORMAT_MOD_X_TILED:
bo->tiling = I915_TILING_X;
- else
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
bo->tiling = I915_TILING_Y;
-
- stride = drv_stride_from_format(format, width, 0);
- /*
- * Align the Y plane to 128 bytes so the chroma planes would be aligned
- * to 64 byte boundaries. This is an Intel HW requirement.
- */
- if (format == DRM_FORMAT_YVU420 || format == DRM_FORMAT_YVU420_ANDROID) {
- stride = ALIGN(stride, 128);
- bo->tiling = I915_TILING_NONE;
+ break;
}
- ret = i915_align_dimensions(bo, bo->tiling, &stride, &height);
- if (ret)
- return ret;
-
- drv_bo_from_format(bo, stride, height, format);
+ bo->format_modifiers[0] = modifier;
+
+ if (format == DRM_FORMAT_YVU420_ANDROID) {
+ /*
+ * We only need to be able to use this as a linear texture,
+ * which doesn't put any HW restrictions on how we lay it
+ * out. The Android format does require the stride to be a
+ * multiple of 16 and expects the Cr and Cb stride to be
+ * ALIGN(Y_stride / 2, 16), which we can make happen by
+ * aligning to 32 bytes here.
+ */
+ uint32_t stride = ALIGN(width, 32);
+ drv_bo_from_format(bo, stride, height, format);
+ } else {
+ i915_bo_from_format(bo, width, height, format);
+ }
memset(&gem_create, 0, sizeof(gem_create));
gem_create.size = bo->total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size);
return ret;
}
gem_close.handle = bo->handles[0].u32;
drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_TILING failed with %d", errno);
+ drv_log("DRM_IOCTL_I915_GEM_SET_TILING failed with %d\n", errno);
return -errno;
}
return 0;
}
+static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct combination *combo;
+
+ combo = drv_get_combination(bo->drv, format, use_flags);
+ if (!combo)
+ return -EINVAL;
+
+ return i915_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier);
+}
+
+static int i915_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
+ uint32_t format, const uint64_t *modifiers, uint32_t count)
+{
+ static const uint64_t modifier_order[] = {
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ };
+ uint64_t modifier;
+
+ modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order));
+
+ return i915_bo_create_for_modifier(bo, width, height, format, modifier);
+}
+
static void i915_close(struct driver *drv)
{
free(drv->priv);
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_GET_TILING failed.");
+ drv_gem_bo_destroy(bo);
+ drv_log("DRM_IOCTL_I915_GEM_GET_TILING failed.\n");
return ret;
}
return 0;
}
-static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
void *addr;
- struct drm_i915_gem_set_domain set_domain;
- memset(&set_domain, 0, sizeof(set_domain));
- set_domain.handle = bo->handles[0].u32;
if (bo->tiling == I915_TILING_NONE) {
struct drm_i915_gem_mmap gem_map;
memset(&gem_map, 0, sizeof(gem_map));
+ if ((bo->use_flags & BO_USE_SCANOUT) && !(bo->use_flags & BO_USE_RENDERSCRIPT))
+ gem_map.flags = I915_MMAP_WC;
+
gem_map.handle = bo->handles[0].u32;
gem_map.offset = 0;
gem_map.size = bo->total_size;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP failed\n");
+ drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n");
return MAP_FAILED;
}
addr = (void *)(uintptr_t)gem_map.addr_ptr;
- set_domain.read_domains = I915_GEM_DOMAIN_CPU;
- set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-
} else {
struct drm_i915_gem_mmap_gtt gem_map;
memset(&gem_map, 0, sizeof(gem_map));
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
+ drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n");
return MAP_FAILED;
}
- addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
-
- set_domain.read_domains = I915_GEM_DOMAIN_GTT;
- set_domain.write_domain = I915_GEM_DOMAIN_GTT;
}
if (addr == MAP_FAILED) {
- fprintf(stderr, "drv: i915 GEM mmap failed\n");
+ drv_log("i915 GEM mmap failed\n");
return addr;
}
+ vma->length = bo->total_size;
+ return addr;
+}
+
+static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_i915_gem_set_domain set_domain;
+
+ memset(&set_domain, 0, sizeof(set_domain));
+ set_domain.handle = bo->handles[0].u32;
+ if (bo->tiling == I915_TILING_NONE) {
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ } else {
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ if (mapping->vma->map_flags & BO_MAP_WRITE)
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ }
+
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_DOMAIN failed\n");
- return MAP_FAILED;
+ drv_log("DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret);
+ return ret;
}
- data->length = bo->total_size;
- return addr;
+ return 0;
}
-static int i915_bo_unmap(struct bo *bo, struct map_info *data)
+static int i915_bo_flush(struct bo *bo, struct mapping *mapping)
{
struct i915_device *i915 = bo->drv->priv;
if (!i915->has_llc && bo->tiling == I915_TILING_NONE)
- i915_clflush(data->addr, data->length);
+ i915_clflush(mapping->vma->addr, mapping->vma->length);
- return munmap(data->addr, data->length);
+ return 0;
}
-static uint32_t i915_resolve_format(uint32_t format)
+static uint32_t i915_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* KBL camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420_ANDROID;
+ /*
+ * KBL camera subsystem requires NV12. Our other use cases
+ * don't care:
+ * - Hardware video supports NV12,
+ * - USB Camera HALv3 supports NV12,
+ * - USB Camera HALv1 doesn't use this format.
+ * Moreover, NV12 is preferred for video, due to overlay
+ * support on SKL+.
+ */
+ return DRM_FORMAT_NV12;
default:
return format;
}
}
-struct backend backend_i915 = {
+const struct backend backend_i915 = {
.name = "i915",
.init = i915_init,
.close = i915_close,
.bo_create = i915_bo_create,
+ .bo_create_with_modifiers = i915_bo_create_with_modifiers,
.bo_destroy = drv_gem_bo_destroy,
.bo_import = i915_bo_import,
.bo_map = i915_bo_map,
- .bo_unmap = i915_bo_unmap,
+ .bo_unmap = drv_bo_munmap,
+ .bo_invalidate = i915_bo_invalidate,
+ .bo_flush = i915_bo_flush,
.resolve_format = i915_resolve_format,
};
static int marvell_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
return drv_add_linear_combinations(drv, render_target_formats,
ARRAY_SIZE(render_target_formats));
}
-struct backend backend_marvell = {
+const struct backend backend_marvell = {
.name = "marvell",
.init = marvell_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
#endif
#include "helpers.h"
#include "util.h"
+struct mediatek_private_map_data {
+ void *cached_addr;
+ void *gem_addr;
+};
+
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+ DRM_FORMAT_BGR888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 };
static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
DRM_FORMAT_YVU420_ANDROID };
static int mediatek_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
return drv_modify_linear_combinations(drv);
}
static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
int ret;
size_t plane;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n", gem_create.size);
return ret;
}
return 0;
}
-static void *mediatek_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *mediatek_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_mtk_gem_map_off gem_map;
+ struct mediatek_private_map_data *priv;
memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n");
+ drv_log("DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n");
return MAP_FAILED;
}
- data->length = bo->total_size;
+ void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+
+ vma->length = bo->total_size;
+
+ if (bo->use_flags & BO_USE_RENDERSCRIPT) {
+ priv = calloc(1, sizeof(*priv));
+ priv->cached_addr = calloc(1, bo->total_size);
+ priv->gem_addr = addr;
+ vma->priv = priv;
+ addr = priv->cached_addr;
+ }
+
+ return addr;
+}
+
+static int mediatek_bo_unmap(struct bo *bo, struct vma *vma)
+{
+ if (vma->priv) {
+ struct mediatek_private_map_data *priv = vma->priv;
+ vma->addr = priv->gem_addr;
+ free(priv->cached_addr);
+ free(priv);
+ vma->priv = NULL;
+ }
+
+ return munmap(vma->addr, vma->length);
+}
+
+static int mediatek_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ if (mapping->vma->priv) {
+ struct mediatek_private_map_data *priv = mapping->vma->priv;
+ memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
+ }
+
+ return 0;
+}
+
+static int mediatek_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ struct mediatek_private_map_data *priv = mapping->vma->priv;
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
+ memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
- return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
- gem_map.offset);
+ return 0;
}
-static uint32_t mediatek_resolve_format(uint32_t format)
+static uint32_t mediatek_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420_ANDROID;
+ return DRM_FORMAT_YVU420;
default:
return format;
}
}
-struct backend backend_mediatek = {
+const struct backend backend_mediatek = {
.name = "mediatek",
.init = mediatek_init,
.bo_create = mediatek_bo_create,
.bo_destroy = drv_gem_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = mediatek_bo_map,
+ .bo_unmap = mediatek_bo_unmap,
+ .bo_invalidate = mediatek_bo_invalidate,
+ .bo_flush = mediatek_bo_flush,
.resolve_format = mediatek_resolve_format,
};
--- /dev/null
+/*
+ * Copyright 2018 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_MESON
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int meson_init(struct driver *drv)
+{
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+const struct backend backend_meson = {
+ .name = "meson",
+ .init = meson_init,
+ .bo_create = drv_dumb_bo_create,
+ .bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
+};
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef DRV_MSM
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+#define MESA_LLVMPIPE_TILE_ORDER 6
+#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB8888 };
+
+static int msm_init(struct driver *drv)
+{
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+static int msm_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t flags)
+{
+ width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
+ height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
+
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
+ if (bo->format == DRM_FORMAT_YVU420_ANDROID)
+ height = bo->height;
+
+ return drv_dumb_bo_create(bo, width, height, format, flags);
+}
+
+const struct backend backend_msm = {
+ .name = "msm",
+ .init = msm_init,
+ .bo_create = msm_bo_create,
+ .bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
+};
+
+#endif /* DRV_MSM */
static int nouveau_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
return drv_modify_linear_combinations(drv);
}
-struct backend backend_nouveau = {
+const struct backend backend_nouveau = {
.name = "nouveau",
.init = nouveau_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
# found in the LICENSE file.
find \
'(' -name '*.[ch]' -or -name '*.cc' ')' \
- -not -name 'gbm.h' \
+ -not -name 'gbm.h' -not -name 'virgl_hw.h' \
-exec clang-format -style=file -i {} +
--- /dev/null
+/*
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "drv_priv.h"
+#include "helpers.h"
+#include "util.h"
+
+static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888 };
+
+static int radeon_init(struct driver *drv)
+{
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+const struct backend backend_radeon = {
+ .name = "radeon",
+ .init = radeon_init,
+ .bo_create = drv_dumb_bo_create,
+ .bo_destroy = drv_dumb_bo_destroy,
+ .bo_import = drv_prime_bo_import,
+ .bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
+};
#include "helpers.h"
#include "util.h"
+struct rockchip_private_map_data {
+ void *cached_addr;
+ void *gem_addr;
+};
+
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+ DRM_FORMAT_BGR888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 };
static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12,
DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
static int rockchip_add_kms_item(struct driver *drv, const struct kms_item *item)
{
- int ret;
uint32_t i, j;
- uint64_t flags;
+ uint64_t use_flags;
struct combination *combo;
struct format_metadata metadata;
- for (i = 0; i < drv->backend->combos.size; i++) {
- combo = &drv->backend->combos.data[i];
+ for (i = 0; i < drv_array_size(drv->combos); i++) {
+ combo = (struct combination *)drv_array_at_idx(drv->combos, i);
if (combo->format == item->format) {
if (item->modifier == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) {
- flags = BO_USE_RENDERING | BO_USE_SCANOUT | BO_USE_TEXTURE;
+ use_flags = BO_USE_RENDERING | BO_USE_SCANOUT | BO_USE_TEXTURE;
metadata.modifier = item->modifier;
metadata.tiling = 0;
metadata.priority = 2;
for (j = 0; j < ARRAY_SIZE(texture_source_formats); j++) {
if (item->format == texture_source_formats[j])
- flags &= ~BO_USE_RENDERING;
+ use_flags &= ~BO_USE_RENDERING;
}
- ret = drv_add_combination(drv, item[i].format, &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, &item->format, 1, &metadata, use_flags);
} else {
- combo->usage |= item->usage;
+ combo->use_flags |= item->use_flags;
}
}
}
static int rockchip_init(struct driver *drv)
{
int ret;
- uint32_t i, num_items;
- struct kms_item *items;
+ uint32_t i;
+ struct drv_array *kms_items;
struct format_metadata metadata;
metadata.tiling = 0;
metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, BO_USE_RENDER_MASK);
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &metadata, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &metadata, BO_USE_TEXTURE_MASK);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- items = drv_query_kms(drv, &num_items);
- if (!items || !num_items)
+ /* Camera ISP supports only NV12 output. */
+ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+ /*
+ * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
+ * from camera.
+ */
+ drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
+
+ kms_items = drv_query_kms(drv);
+ if (!kms_items)
return 0;
- for (i = 0; i < num_items; i++) {
- ret = rockchip_add_kms_item(drv, &items[i]);
+ for (i = 0; i < drv_array_size(kms_items); i++) {
+ ret = rockchip_add_kms_item(drv, (struct kms_item *)drv_array_at_idx(kms_items, i));
if (ret) {
- free(items);
+ drv_array_destroy(kms_items);
return ret;
}
}
- free(items);
+ drv_array_destroy(kms_items);
return 0;
}
static bool has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier)
{
uint32_t i;
-
for (i = 0; i < count; i++)
if (list[i] == modifier)
return true;
* pick that */
afbc_bo_from_format(bo, width, height, format);
} else {
- if (!has_modifier(modifiers, count, DRM_FORMAT_MOD_NONE)) {
+ if (!has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) {
errno = EINVAL;
- fprintf(stderr, "no usable modifier found\n");
+ drv_log("no usable modifier found\n");
return -1;
}
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n",
- gem_create.size);
+ drv_log("DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n",
+ (unsigned long long)gem_create.size);
return ret;
}
}
static int rockchip_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
- uint64_t modifiers[] = { DRM_FORMAT_MOD_NONE };
-
+ uint64_t modifiers[] = { DRM_FORMAT_MOD_LINEAR };
return rockchip_bo_create_with_modifiers(bo, width, height, format, modifiers,
ARRAY_SIZE(modifiers));
}
-static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_rockchip_gem_map_off gem_map;
+ struct rockchip_private_map_data *priv;
/* We can only map buffers created with SW access flags, which should
* have no modifiers (ie, not AFBC). */
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
+ drv_log("DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n");
return MAP_FAILED;
}
- data->length = bo->total_size;
+ void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+
+ vma->length = bo->total_size;
+
+ if (bo->use_flags & BO_USE_RENDERSCRIPT) {
+ priv = calloc(1, sizeof(*priv));
+ priv->cached_addr = calloc(1, bo->total_size);
+ priv->gem_addr = addr;
+ vma->priv = priv;
+ addr = priv->cached_addr;
+ }
- return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
- gem_map.offset);
+ return addr;
+}
+
+static int rockchip_bo_unmap(struct bo *bo, struct vma *vma)
+{
+ if (vma->priv) {
+ struct rockchip_private_map_data *priv = vma->priv;
+ vma->addr = priv->gem_addr;
+ free(priv->cached_addr);
+ free(priv);
+ vma->priv = NULL;
+ }
+
+ return munmap(vma->addr, vma->length);
+}
+
+static int rockchip_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ if (mapping->vma->priv) {
+ struct rockchip_private_map_data *priv = mapping->vma->priv;
+ memcpy(priv->cached_addr, priv->gem_addr, bo->total_size);
+ }
+
+ return 0;
+}
+
+static int rockchip_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ struct rockchip_private_map_data *priv = mapping->vma->priv;
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
+ memcpy(priv->gem_addr, priv->cached_addr, bo->total_size);
+
+ return 0;
}
-static uint32_t rockchip_resolve_format(uint32_t format)
+static uint32_t rockchip_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
+ /* Camera subsystem requires NV12. */
+ if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
+ return DRM_FORMAT_NV12;
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
}
}
-struct backend backend_rockchip = {
+const struct backend backend_rockchip = {
.name = "rockchip",
.init = rockchip_init,
.bo_create = rockchip_bo_create,
.bo_destroy = drv_gem_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = rockchip_bo_map,
+ .bo_unmap = rockchip_bo_unmap,
+ .bo_invalidate = rockchip_bo_invalidate,
+ .bo_flush = rockchip_bo_flush,
.resolve_format = rockchip_resolve_format,
};
#ifdef DRV_TEGRA
+#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum tegra_map_type type,
uint32_t bytes_per_pixel, uint32_t gob_top, uint32_t gob_left,
- uint32_t gob_size_pixels)
+ uint32_t gob_size_pixels, uint8_t *tiled_last)
{
uint8_t *tmp;
uint32_t x, y, k;
x = gob_left + (((k >> 3) & 8) | ((k >> 1) & 4) | (k & 3));
y = gob_top + ((k >> 7 << 3) | ((k >> 3) & 6) | ((k >> 2) & 1));
- tmp = untiled + (y * bo->strides[0]) + (x * bytes_per_pixel);
+ if (tiled >= tiled_last)
+ return;
+
+ if (x >= bo->width || y >= bo->height) {
+ tiled += bytes_per_pixel;
+ continue;
+ }
+
+ tmp = untiled + y * bo->strides[0] + x * bytes_per_pixel;
if (type == TEGRA_READ_TILED_BUFFER)
memcpy(tmp, tiled, bytes_per_pixel);
uint32_t gob_width, gob_height, gob_size_bytes, gob_size_pixels, gob_count_x, gob_count_y,
gob_top, gob_left;
uint32_t i, j, offset;
- uint8_t *tmp;
+ uint8_t *tmp, *tiled_last;
uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0);
/*
gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
gob_count_y = DIV_ROUND_UP(bo->height, gob_height);
+ tiled_last = tiled + bo->total_size;
+
offset = 0;
for (j = 0; j < gob_count_y; j++) {
gob_top = j * gob_height;
gob_left = i * gob_width;
transfer_tile(bo, tmp, untiled, type, bytes_per_pixel, gob_top, gob_left,
- gob_size_pixels);
+ gob_size_pixels, tiled_last);
offset += gob_size_bytes;
}
static int tegra_init(struct driver *drv)
{
- int ret;
struct format_metadata metadata;
- uint64_t flags = BO_USE_RENDER_MASK;
+ uint64_t use_flags = BO_USE_RENDER_MASK;
metadata.tiling = NV_MEM_KIND_PITCH;
metadata.priority = 1;
- metadata.modifier = DRM_FORMAT_MOD_NONE;
+ metadata.modifier = DRM_FORMAT_MOD_LINEAR;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
- flags &= ~BO_USE_SW_WRITE_OFTEN;
- flags &= ~BO_USE_SW_READ_OFTEN;
- flags &= ~BO_USE_LINEAR;
+ use_flags &= ~BO_USE_SW_WRITE_OFTEN;
+ use_flags &= ~BO_USE_SW_READ_OFTEN;
+ use_flags &= ~BO_USE_LINEAR;
metadata.tiling = NV_MEM_KIND_C32_2CRA;
metadata.priority = 2;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &metadata, flags);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &metadata, use_flags);
drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT);
drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT);
}
static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
uint32_t size, stride, block_height_log2 = 0;
enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
struct drm_tegra_gem_create gem_create;
int ret;
- if (flags & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
+ if (use_flags &
+ (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN))
compute_layout_linear(width, height, format, &stride, &size);
else
compute_layout_blocklinear(width, height, format, &kind, &block_height_log2,
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
+ drv_log("DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size);
return ret;
}
return 0;
}
-static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data)
+{
+ int ret;
+ struct drm_tegra_gem_get_tiling gem_get_tiling;
+
+ ret = drv_prime_bo_import(bo, data);
+ if (ret)
+ return ret;
+
+ /* TODO(gsingh): export modifiers and get rid of backdoor tiling. */
+ memset(&gem_get_tiling, 0, sizeof(gem_get_tiling));
+ gem_get_tiling.handle = bo->handles[0].u32;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_GET_TILING, &gem_get_tiling);
+ if (ret) {
+ drv_gem_bo_destroy(bo);
+ return ret;
+ }
+
+ /* NOTE(djmk): we only know about one tiled format, so if our drmIoctl call tells us we are
+ tiled, assume it is this format (NV_MEM_KIND_C32_2CRA) otherwise linear (KIND_PITCH). */
+ if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_PITCH) {
+ bo->tiling = NV_MEM_KIND_PITCH;
+ } else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) {
+ bo->tiling = NV_MEM_KIND_C32_2CRA;
+ } else {
+ drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode);
+ drv_gem_bo_destroy(bo);
+ assert(0);
+ }
+
+ bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
+ return 0;
+}
+
+static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_tegra_gem_mmap gem_map;
ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map));
if (ret < 0) {
- fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n");
+ drv_log("DRM_TEGRA_GEM_MMAP failed\n");
return MAP_FAILED;
}
- void *addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
gem_map.offset);
-
- data->length = bo->total_size;
-
+ vma->length = bo->total_size;
if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
priv = calloc(1, sizeof(*priv));
priv->untiled = calloc(1, bo->total_size);
priv->tiled = addr;
- data->priv = priv;
+ vma->priv = priv;
transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER);
addr = priv->untiled;
}
return addr;
}
-static int tegra_bo_unmap(struct bo *bo, struct map_info *data)
+static int tegra_bo_unmap(struct bo *bo, struct vma *vma)
{
- if (data->priv) {
- struct tegra_private_map_data *priv = data->priv;
- transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
- data->addr = priv->tiled;
+ if (vma->priv) {
+ struct tegra_private_map_data *priv = vma->priv;
+ vma->addr = priv->tiled;
free(priv->untiled);
free(priv);
- data->priv = NULL;
+ vma->priv = NULL;
}
- return munmap(data->addr, data->length);
+ return munmap(vma->addr, vma->length);
+}
+
+static int tegra_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ struct tegra_private_map_data *priv = mapping->vma->priv;
+
+ if (priv && (mapping->vma->map_flags & BO_MAP_WRITE))
+ transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER);
+
+ return 0;
}
-struct backend backend_tegra = {
+const struct backend backend_tegra = {
.name = "tegra",
.init = tegra_init,
.bo_create = tegra_bo_create,
.bo_destroy = drv_gem_bo_destroy,
- .bo_import = drv_prime_bo_import,
+ .bo_import = tegra_bo_import,
.bo_map = tegra_bo_map,
.bo_unmap = tegra_bo_unmap,
+ .bo_flush = tegra_bo_flush,
};
#endif
static int udl_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
return drv_modify_linear_combinations(drv);
}
-struct backend backend_udl = {
+const struct backend backend_udl = {
.name = "udl",
.init = udl_init,
.bo_create = drv_dumb_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A)))
#define PUBLIC __attribute__((visibility("default")))
-#define ALIGN(A, B) (((A) + (B)-1) / (B) * (B))
+#define ALIGN(A, B) (((A) + (B)-1) & ~((B)-1))
+#define IS_ALIGNED(A, B) (ALIGN((A), (B)) == (A))
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
#endif
static int vc4_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
return drv_modify_linear_combinations(drv);
}
static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
int ret;
size_t plane;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create);
if (ret) {
- fprintf(stderr, "drv: DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n",
- bo->total_size);
+ drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->total_size);
return ret;
}
return 0;
}
-static void *vc4_bo_map(struct bo *bo, struct map_info *data, size_t plane)
+static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
struct drm_vc4_mmap_bo bo_map;
ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map));
if (ret) {
- fprintf(stderr, "drv: DRM_VC4_MMAP_BO failed\n");
+ drv_log("DRM_VC4_MMAP_BO failed\n");
return MAP_FAILED;
}
- data->length = bo->total_size;
-
- return mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->drv->fd,
+ vma->length = bo->total_size;
+ return mmap(NULL, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
bo_map.offset);
}
-struct backend backend_vc4 = {
+const struct backend backend_vc4 = {
.name = "vc4",
.init = vc4_init,
.bo_create = vc4_bo_create,
.bo_import = drv_prime_bo_import,
.bo_destroy = drv_gem_bo_destroy,
.bo_map = vc4_bo_map,
+ .bo_unmap = drv_bo_munmap,
};
#endif
static int vgem_init(struct driver *drv)
{
- int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
+ &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
return drv_modify_linear_combinations(drv);
}
static int vgem_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t flags)
{
- int ret = drv_dumb_bo_create(bo, ALIGN(width, MESA_LLVMPIPE_TILE_SIZE),
- ALIGN(height, MESA_LLVMPIPE_TILE_SIZE), format, flags);
- return ret;
+ width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
+ height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
+
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
+ if (bo->format == DRM_FORMAT_YVU420_ANDROID)
+ height = bo->height;
+
+ return drv_dumb_bo_create(bo, width, height, format, flags);
}
-static uint32_t vgem_resolve_format(uint32_t format)
+static uint32_t vgem_resolve_format(uint32_t format, uint64_t flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420_ANDROID;
+ return DRM_FORMAT_YVU420;
default:
return format;
}
}
-struct backend backend_vgem = {
+const struct backend backend_vgem = {
.name = "vgem",
.init = vgem_init,
.bo_create = vgem_bo_create,
.bo_destroy = drv_dumb_bo_destroy,
.bo_import = drv_prime_bo_import,
.bo_map = drv_dumb_bo_map,
+ .bo_unmap = drv_bo_munmap,
.resolve_format = vgem_resolve_format,
};
--- /dev/null
+/*
+ * Copyright 2014, 2015 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef VIRGL_HW_H
+#define VIRGL_HW_H
+
+struct virgl_box {
+ uint32_t x, y, z;
+ uint32_t w, h, d;
+};
+
+/* formats known by the HW device - based on gallium subset */
+enum virgl_formats {
+ VIRGL_FORMAT_B8G8R8A8_UNORM = 1,
+ VIRGL_FORMAT_B8G8R8X8_UNORM = 2,
+ VIRGL_FORMAT_A8R8G8B8_UNORM = 3,
+ VIRGL_FORMAT_X8R8G8B8_UNORM = 4,
+ VIRGL_FORMAT_B5G5R5A1_UNORM = 5,
+ VIRGL_FORMAT_B4G4R4A4_UNORM = 6,
+ VIRGL_FORMAT_B5G6R5_UNORM = 7,
+ VIRGL_FORMAT_L8_UNORM = 9, /**< ubyte luminance */
+ VIRGL_FORMAT_A8_UNORM = 10, /**< ubyte alpha */
+ VIRGL_FORMAT_L8A8_UNORM = 12, /**< ubyte alpha, luminance */
+ VIRGL_FORMAT_L16_UNORM = 13, /**< ushort luminance */
+
+ VIRGL_FORMAT_Z16_UNORM = 16,
+ VIRGL_FORMAT_Z32_UNORM = 17,
+ VIRGL_FORMAT_Z32_FLOAT = 18,
+ VIRGL_FORMAT_Z24_UNORM_S8_UINT = 19,
+ VIRGL_FORMAT_S8_UINT_Z24_UNORM = 20,
+ VIRGL_FORMAT_Z24X8_UNORM = 21,
+ VIRGL_FORMAT_S8_UINT = 23, /**< ubyte stencil */
+
+ VIRGL_FORMAT_R32_FLOAT = 28,
+ VIRGL_FORMAT_R32G32_FLOAT = 29,
+ VIRGL_FORMAT_R32G32B32_FLOAT = 30,
+ VIRGL_FORMAT_R32G32B32A32_FLOAT = 31,
+
+ VIRGL_FORMAT_R16_UNORM = 48,
+ VIRGL_FORMAT_R16G16_UNORM = 49,
+
+ VIRGL_FORMAT_R16G16B16A16_UNORM = 51,
+
+ VIRGL_FORMAT_R16_SNORM = 56,
+ VIRGL_FORMAT_R16G16_SNORM = 57,
+ VIRGL_FORMAT_R16G16B16A16_SNORM = 59,
+
+ VIRGL_FORMAT_R8_UNORM = 64,
+ VIRGL_FORMAT_R8G8_UNORM = 65,
+
+ VIRGL_FORMAT_R8G8B8A8_UNORM = 67,
+
+ VIRGL_FORMAT_R8_SNORM = 74,
+ VIRGL_FORMAT_R8G8_SNORM = 75,
+ VIRGL_FORMAT_R8G8B8_SNORM = 76,
+ VIRGL_FORMAT_R8G8B8A8_SNORM = 77,
+
+ VIRGL_FORMAT_R16_FLOAT = 91,
+ VIRGL_FORMAT_R16G16_FLOAT = 92,
+ VIRGL_FORMAT_R16G16B16_FLOAT = 93,
+ VIRGL_FORMAT_R16G16B16A16_FLOAT = 94,
+
+ VIRGL_FORMAT_L8_SRGB = 95,
+ VIRGL_FORMAT_L8A8_SRGB = 96,
+ VIRGL_FORMAT_B8G8R8A8_SRGB = 100,
+ VIRGL_FORMAT_B8G8R8X8_SRGB = 101,
+
+ /* compressed formats */
+ VIRGL_FORMAT_DXT1_RGB = 105,
+ VIRGL_FORMAT_DXT1_RGBA = 106,
+ VIRGL_FORMAT_DXT3_RGBA = 107,
+ VIRGL_FORMAT_DXT5_RGBA = 108,
+
+ /* sRGB, compressed */
+ VIRGL_FORMAT_DXT1_SRGB = 109,
+ VIRGL_FORMAT_DXT1_SRGBA = 110,
+ VIRGL_FORMAT_DXT3_SRGBA = 111,
+ VIRGL_FORMAT_DXT5_SRGBA = 112,
+
+ /* rgtc compressed */
+ VIRGL_FORMAT_RGTC1_UNORM = 113,
+ VIRGL_FORMAT_RGTC1_SNORM = 114,
+ VIRGL_FORMAT_RGTC2_UNORM = 115,
+ VIRGL_FORMAT_RGTC2_SNORM = 116,
+
+ VIRGL_FORMAT_A8B8G8R8_UNORM = 121,
+ VIRGL_FORMAT_B5G5R5X1_UNORM = 122,
+ VIRGL_FORMAT_R11G11B10_FLOAT = 124,
+ VIRGL_FORMAT_R9G9B9E5_FLOAT = 125,
+ VIRGL_FORMAT_Z32_FLOAT_S8X24_UINT = 126,
+
+ VIRGL_FORMAT_B10G10R10A2_UNORM = 131,
+ VIRGL_FORMAT_R8G8B8X8_UNORM = 134,
+ VIRGL_FORMAT_B4G4R4X4_UNORM = 135,
+ VIRGL_FORMAT_B2G3R3_UNORM = 139,
+
+ VIRGL_FORMAT_L16A16_UNORM = 140,
+ VIRGL_FORMAT_A16_UNORM = 141,
+
+ VIRGL_FORMAT_A8_SNORM = 147,
+ VIRGL_FORMAT_L8_SNORM = 148,
+ VIRGL_FORMAT_L8A8_SNORM = 149,
+
+ VIRGL_FORMAT_A16_SNORM = 151,
+ VIRGL_FORMAT_L16_SNORM = 152,
+ VIRGL_FORMAT_L16A16_SNORM = 153,
+
+ VIRGL_FORMAT_A16_FLOAT = 155,
+ VIRGL_FORMAT_L16_FLOAT = 156,
+ VIRGL_FORMAT_L16A16_FLOAT = 157,
+
+ VIRGL_FORMAT_A32_FLOAT = 159,
+ VIRGL_FORMAT_L32_FLOAT = 160,
+ VIRGL_FORMAT_L32A32_FLOAT = 161,
+
+ VIRGL_FORMAT_R8_UINT = 177,
+ VIRGL_FORMAT_R8G8_UINT = 178,
+ VIRGL_FORMAT_R8G8B8_UINT = 179,
+ VIRGL_FORMAT_R8G8B8A8_UINT = 180,
+
+ VIRGL_FORMAT_R8_SINT = 181,
+ VIRGL_FORMAT_R8G8_SINT = 182,
+ VIRGL_FORMAT_R8G8B8_SINT = 183,
+ VIRGL_FORMAT_R8G8B8A8_SINT = 184,
+
+ VIRGL_FORMAT_R16_UINT = 185,
+ VIRGL_FORMAT_R16G16_UINT = 186,
+ VIRGL_FORMAT_R16G16B16_UINT = 187,
+ VIRGL_FORMAT_R16G16B16A16_UINT = 188,
+
+ VIRGL_FORMAT_R16_SINT = 189,
+ VIRGL_FORMAT_R16G16_SINT = 190,
+ VIRGL_FORMAT_R16G16B16_SINT = 191,
+ VIRGL_FORMAT_R16G16B16A16_SINT = 192,
+ VIRGL_FORMAT_R32_UINT = 193,
+ VIRGL_FORMAT_R32G32_UINT = 194,
+ VIRGL_FORMAT_R32G32B32_UINT = 195,
+ VIRGL_FORMAT_R32G32B32A32_UINT = 196,
+
+ VIRGL_FORMAT_R32_SINT = 197,
+ VIRGL_FORMAT_R32G32_SINT = 198,
+ VIRGL_FORMAT_R32G32B32_SINT = 199,
+ VIRGL_FORMAT_R32G32B32A32_SINT = 200,
+
+ VIRGL_FORMAT_A8_UINT = 201,
+ VIRGL_FORMAT_L8_UINT = 203,
+ VIRGL_FORMAT_L8A8_UINT = 204,
+
+ VIRGL_FORMAT_A8_SINT = 205,
+ VIRGL_FORMAT_L8_SINT = 207,
+ VIRGL_FORMAT_L8A8_SINT = 208,
+
+ VIRGL_FORMAT_A16_UINT = 209,
+ VIRGL_FORMAT_L16_UINT = 211,
+ VIRGL_FORMAT_L16A16_UINT = 212,
+
+ VIRGL_FORMAT_A16_SINT = 213,
+ VIRGL_FORMAT_L16_SINT = 215,
+ VIRGL_FORMAT_L16A16_SINT = 216,
+
+ VIRGL_FORMAT_A32_UINT = 217,
+ VIRGL_FORMAT_L32_UINT = 219,
+ VIRGL_FORMAT_L32A32_UINT = 220,
+
+ VIRGL_FORMAT_A32_SINT = 221,
+ VIRGL_FORMAT_L32_SINT = 223,
+ VIRGL_FORMAT_L32A32_SINT = 224,
+
+ VIRGL_FORMAT_B10G10R10A2_UINT = 225,
+ VIRGL_FORMAT_R8G8B8X8_SNORM = 229,
+
+ VIRGL_FORMAT_R8G8B8X8_SRGB = 230,
+
+ VIRGL_FORMAT_B10G10R10X2_UNORM = 233,
+ VIRGL_FORMAT_R16G16B16X16_UNORM = 234,
+ VIRGL_FORMAT_R16G16B16X16_SNORM = 235,
+ VIRGL_FORMAT_MAX,
+};
+
+#define VIRGL_BIND_DEPTH_STENCIL (1 << 0)
+#define VIRGL_BIND_RENDER_TARGET (1 << 1)
+#define VIRGL_BIND_SAMPLER_VIEW (1 << 3)
+#define VIRGL_BIND_VERTEX_BUFFER (1 << 4)
+#define VIRGL_BIND_INDEX_BUFFER (1 << 5)
+#define VIRGL_BIND_CONSTANT_BUFFER (1 << 6)
+#define VIRGL_BIND_DISPLAY_TARGET (1 << 7)
+#define VIRGL_BIND_STREAM_OUTPUT (1 << 11)
+#define VIRGL_BIND_CURSOR (1 << 16)
+#define VIRGL_BIND_CUSTOM (1 << 17)
+#define VIRGL_BIND_SCANOUT (1 << 18)
+
+struct virgl_caps_bool_set1 {
+ unsigned indep_blend_enable:1;
+ unsigned indep_blend_func:1;
+ unsigned cube_map_array:1;
+ unsigned shader_stencil_export:1;
+ unsigned conditional_render:1;
+ unsigned start_instance:1;
+ unsigned primitive_restart:1;
+ unsigned blend_eq_sep:1;
+ unsigned instanceid:1;
+ unsigned vertex_element_instance_divisor:1;
+ unsigned seamless_cube_map:1;
+ unsigned occlusion_query:1;
+ unsigned timer_query:1;
+ unsigned streamout_pause_resume:1;
+ unsigned texture_multisample:1;
+ unsigned fragment_coord_conventions:1;
+ unsigned depth_clip_disable:1;
+ unsigned seamless_cube_map_per_texture:1;
+ unsigned ubo:1;
+ unsigned color_clamping:1; /* not in GL 3.1 core profile */
+ unsigned poly_stipple:1; /* not in GL 3.1 core profile */
+ unsigned mirror_clamp:1;
+ unsigned texture_query_lod:1;
+};
+
+/* endless expansion capabilites - current gallium has 252 formats */
+struct virgl_supported_format_mask {
+ uint32_t bitmask[16];
+};
+/* capabilities set 2 - version 1 - 32-bit and float values */
+struct virgl_caps_v1 {
+ uint32_t max_version;
+ struct virgl_supported_format_mask sampler;
+ struct virgl_supported_format_mask render;
+ struct virgl_supported_format_mask depthstencil;
+ struct virgl_supported_format_mask vertexbuffer;
+ struct virgl_caps_bool_set1 bset;
+ uint32_t glsl_level;
+ uint32_t max_texture_array_layers;
+ uint32_t max_streamout_buffers;
+ uint32_t max_dual_source_render_targets;
+ uint32_t max_render_targets;
+ uint32_t max_samples;
+ uint32_t prim_mask;
+ uint32_t max_tbo_size;
+ uint32_t max_uniform_blocks;
+ uint32_t max_viewports;
+ uint32_t max_texture_gather_components;
+};
+
+union virgl_caps {
+ uint32_t max_version;
+ struct virgl_caps_v1 v1;
+};
+
+enum virgl_errors {
+ VIRGL_ERROR_NONE,
+ VIRGL_ERROR_UNKNOWN,
+ VIRGL_ERROR_UNKNOWN_RESOURCE_FORMAT,
+};
+
+enum virgl_ctx_errors {
+ VIRGL_ERROR_CTX_NONE,
+ VIRGL_ERROR_CTX_UNKNOWN,
+ VIRGL_ERROR_CTX_ILLEGAL_SHADER,
+ VIRGL_ERROR_CTX_ILLEGAL_HANDLE,
+ VIRGL_ERROR_CTX_ILLEGAL_RESOURCE,
+ VIRGL_ERROR_CTX_ILLEGAL_SURFACE,
+ VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT,
+ VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER,
+};
+
+
+#define VIRGL_RESOURCE_Y_0_TOP (1 << 0)
+#endif
/*
- * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Copyright 2017 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <virtgpu_drm.h>
+#include <xf86drm.h>
+
#include "drv_priv.h"
#include "helpers.h"
#include "util.h"
+#include "virgl_hw.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 0x1000
+#endif
+#define PIPE_TEXTURE_2D 2
#define MESA_LLVMPIPE_TILE_ORDER 6
#define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
- DRM_FORMAT_XRGB8888 };
+ DRM_FORMAT_BGR888, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 };
+
+static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU420_ANDROID };
+
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_RG88 };
+
+struct virtio_gpu_priv {
+ int has_3d;
+};
+
+static uint32_t translate_format(uint32_t drm_fourcc, uint32_t plane)
+{
+ switch (drm_fourcc) {
+ case DRM_FORMAT_XRGB8888:
+ return VIRGL_FORMAT_B8G8R8X8_UNORM;
+ case DRM_FORMAT_ARGB8888:
+ return VIRGL_FORMAT_B8G8R8A8_UNORM;
+ case DRM_FORMAT_XBGR8888:
+ return VIRGL_FORMAT_R8G8B8X8_UNORM;
+ case DRM_FORMAT_ABGR8888:
+ return VIRGL_FORMAT_R8G8B8A8_UNORM;
+ case DRM_FORMAT_RGB565:
+ return VIRGL_FORMAT_B5G6R5_UNORM;
+ case DRM_FORMAT_R8:
+ return VIRGL_FORMAT_R8_UNORM;
+ case DRM_FORMAT_RG88:
+ return VIRGL_FORMAT_R8G8_UNORM;
+ default:
+ return 0;
+ }
+}
+
+static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
+ height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
+
+ /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */
+ if (bo->format == DRM_FORMAT_YVU420_ANDROID)
+ height = bo->height;
+
+ return drv_dumb_bo_create(bo, width, height, format, use_flags);
+}
+
+static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ int ret;
+ ssize_t plane;
+ ssize_t num_planes = drv_num_planes_from_format(format);
+ uint32_t stride0;
+
+ for (plane = 0; plane < num_planes; plane++) {
+ uint32_t stride = drv_stride_from_format(format, width, plane);
+ uint32_t size = drv_size_from_format(format, stride, height, plane);
+ uint32_t res_format = translate_format(format, plane);
+ struct drm_virtgpu_resource_create res_create;
+
+ memset(&res_create, 0, sizeof(res_create));
+ size = ALIGN(size, PAGE_SIZE);
+ /*
+ * Setting the target is intended to ensure this resource gets bound as a 2D
+ * texture in the host renderer's GL state. All of these resource properties are
+ * sent unchanged by the kernel to the host, which in turn sends them unchanged to
+ * virglrenderer. When virglrenderer makes a resource, it will convert the target
+ * enum to the equivalent one in GL and then bind the resource to that target.
+ */
+ res_create.target = PIPE_TEXTURE_2D;
+ res_create.format = res_format;
+ res_create.bind = VIRGL_BIND_RENDER_TARGET;
+ res_create.width = width;
+ res_create.height = height;
+ res_create.depth = 1;
+ res_create.array_size = 1;
+ res_create.last_level = 0;
+ res_create.nr_samples = 0;
+ res_create.stride = stride;
+ res_create.size = size;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n",
+ strerror(errno));
+ goto fail;
+ }
+
+ bo->handles[plane].u32 = res_create.bo_handle;
+ }
+
+ stride0 = drv_stride_from_format(format, width, 0);
+ drv_bo_from_format(bo, stride0, height, format);
+
+ for (plane = 0; plane < num_planes; plane++)
+ bo->offsets[plane] = 0;
+
+ return 0;
+
+fail:
+ for (plane--; plane >= 0; plane--) {
+ struct drm_gem_close gem_close;
+ memset(&gem_close, 0, sizeof(gem_close));
+ gem_close.handle = bo->handles[plane].u32;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ }
+
+ return ret;
+}
+
+static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ int ret;
+ struct drm_virtgpu_map gem_map;
+
+ memset(&gem_map, 0, sizeof(gem_map));
+ gem_map.handle = bo->handles[0].u32;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
+ return MAP_FAILED;
+ }
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_YVU420_ANDROID };
+ vma->length = bo->total_size;
+ return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+}
static int virtio_gpu_init(struct driver *drv)
{
int ret;
- ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
- &LINEAR_METADATA, BO_USE_RENDER_MASK);
- if (ret)
- return ret;
+ struct virtio_gpu_priv *priv;
+ struct drm_virtgpu_getparam args;
- ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
- &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
- if (ret)
- return ret;
+ priv = calloc(1, sizeof(*priv));
+ drv->priv = priv;
+
+ memset(&args, 0, sizeof(args));
+ args.param = VIRTGPU_PARAM_3D_FEATURES;
+ args.value = (uint64_t)(uintptr_t)&priv->has_3d;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
+ if (ret) {
+ drv_log("virtio 3D acceleration is not available\n");
+ /* Be paranoid */
+ priv->has_3d = 0;
+ }
+
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ if (priv->has_3d)
+ drv_add_combinations(drv, texture_source_formats,
+ ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+ else
+ drv_add_combinations(drv, dumb_texture_source_formats,
+ ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
return drv_modify_linear_combinations(drv);
}
+static void virtio_gpu_close(struct driver *drv)
+{
+ free(drv->priv);
+ drv->priv = NULL;
+}
+
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
- uint32_t flags)
+ uint64_t use_flags)
{
- int ret = drv_dumb_bo_create(bo, ALIGN(width, MESA_LLVMPIPE_TILE_SIZE),
- ALIGN(height, MESA_LLVMPIPE_TILE_SIZE), format, flags);
- return ret;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_create(bo, width, height, format, use_flags);
+ else
+ return virtio_dumb_bo_create(bo, width, height, format, use_flags);
+}
+
+static int virtio_gpu_bo_destroy(struct bo *bo)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return drv_gem_bo_destroy(bo);
+ else
+ return drv_dumb_bo_destroy(bo);
+}
+
+static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_map(bo, vma, plane, map_flags);
+ else
+ return drv_dumb_bo_map(bo, vma, plane, map_flags);
+}
+
+static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_virtgpu_3d_transfer_from_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
+
+ memset(&xfer, 0, sizeof(xfer));
+ xfer.bo_handle = mapping->vma->handle;
+ xfer.box.x = mapping->rect.x;
+ xfer.box.y = mapping->rect.y;
+ xfer.box.w = mapping->rect.width;
+ xfer.box.h = mapping->rect.height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_virtgpu_3d_transfer_to_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
+
+ if (!(mapping->vma->map_flags & BO_MAP_WRITE))
+ return 0;
+
+ memset(&xfer, 0, sizeof(xfer));
+ xfer.bo_handle = mapping->vma->handle;
+ xfer.box.x = mapping->rect.x;
+ xfer.box.y = mapping->rect.y;
+ xfer.box.w = mapping->rect.width;
+ xfer.box.h = mapping->rect.height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ return 0;
}
-static uint32_t virtio_gpu_resolve_format(uint32_t format)
+static uint32_t virtio_gpu_resolve_format(uint32_t format, uint64_t use_flags)
{
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/*HACK: See b/28671744 */
return DRM_FORMAT_XBGR8888;
case DRM_FORMAT_FLEX_YCbCr_420_888:
- return DRM_FORMAT_YVU420_ANDROID;
+ return DRM_FORMAT_YVU420;
default:
return format;
}
}
-struct backend backend_virtio_gpu = {
+const struct backend backend_virtio_gpu = {
.name = "virtio_gpu",
.init = virtio_gpu_init,
+ .close = virtio_gpu_close,
.bo_create = virtio_gpu_bo_create,
- .bo_destroy = drv_dumb_bo_destroy,
+ .bo_destroy = virtio_gpu_bo_destroy,
.bo_import = drv_prime_bo_import,
- .bo_map = drv_dumb_bo_map,
+ .bo_map = virtio_gpu_bo_map,
+ .bo_unmap = drv_bo_munmap,
+ .bo_invalidate = virtio_gpu_bo_invalidate,
+ .bo_flush = virtio_gpu_bo_flush,
.resolve_format = virtio_gpu_resolve_format,
};