#include <assert.h>
#include <errno.h>
+#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <xf86drm.h>
#include "drv_priv.h"
+#include "external/virgl_hw.h"
+#include "external/virgl_protocol.h"
+#include "external/virtgpu_drm.h"
#include "helpers.h"
#include "util.h"
-#include "virgl_hw.h"
-#include "virtgpu_drm.h"
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
enum feature_id {
feat_3d,
feat_capset_fix,
+ feat_resource_blob,
+ feat_host_visible,
+ feat_host_cross_device,
feat_max,
};
x, #x, 0 \
}
-static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
- FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
+static struct feature features[] = {
+ FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+ FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
+ FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
+};
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
int caps_is_v2;
union virgl_caps caps;
int host_gbm_enabled;
+ atomic_int next_blob_id;
};
static uint32_t translate_format(uint32_t drm_fourcc)
case DRM_FORMAT_ABGR8888:
return VIRGL_FORMAT_R8G8B8A8_UNORM;
case DRM_FORMAT_ABGR16161616F:
- return VIRGL_FORMAT_R16G16B16A16_UNORM;
+ return VIRGL_FORMAT_R16G16B16A16_FLOAT;
case DRM_FORMAT_RGB565:
return VIRGL_FORMAT_B5G6R5_UNORM;
case DRM_FORMAT_R8:
handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
+ if (use_flags & BO_USE_PROTECTED) {
+ handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
+ } else {
+ // Make sure we don't set both flags, since that could be mistaken for
+ // protected. Give OFTEN priority over RARELY.
+ if (use_flags & BO_USE_SW_READ_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_READ_RARELY);
+ }
+ if (use_flags & BO_USE_SW_WRITE_OFTEN) {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
+ } else {
+ handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
+ VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
+ }
+ }
- // All host drivers only support linear camera buffer formats. If
- // that changes, this will need to be modified.
- handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
- handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
+ handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
+ handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
+ handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
+ VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
if (use_flags) {
drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
int ret;
size_t i;
uint32_t stride;
- struct drm_virtgpu_resource_create res_create;
+ struct drm_virtgpu_resource_create res_create = { 0 };
struct bo_metadata emulated_metadata;
if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
* virglrenderer. When virglrenderer makes a resource, it will convert the target
* enum to the equivalent one in GL and then bind the resource to that target.
*/
- memset(&res_create, 0, sizeof(res_create));
res_create.target = PIPE_TEXTURE_2D;
res_create.format = translate_format(format);
static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
{
int ret;
- struct drm_virtgpu_map gem_map;
+ struct drm_virtgpu_map gem_map = { 0 };
- memset(&gem_map, 0, sizeof(gem_map));
gem_map.handle = bo->handles[0].u32;
-
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
if (ret) {
drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
{
int ret;
- struct drm_virtgpu_get_caps cap_args;
+ struct drm_virtgpu_get_caps cap_args = { 0 };
*caps_is_v2 = 0;
- memset(&cap_args, 0, sizeof(cap_args));
cap_args.addr = (unsigned long long)caps;
if (features[feat_capset_fix].enabled) {
*caps_is_v2 = 1;
virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
+ drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
+ BO_USE_HW_VIDEO_ENCODER);
drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
- BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
- drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
- drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
- BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ BO_USE_HW_VIDEO_ENCODER);
return drv_modify_linear_combinations(drv);
}
drv->priv = NULL;
}
+static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+ int ret;
+ uint32_t stride;
+ uint32_t cur_blob_id;
+ uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ if (bo->meta.use_flags & BO_USE_SW_MASK)
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
+ if (bo->meta.use_flags & BO_USE_NON_GPU_HW)
+ blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
+
+ cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
+ stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
+ drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
+ bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
+ bo->meta.tiling = blob_flags;
+
+ cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
+ cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
+ cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
+ cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
+ cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
+ cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
+ cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
+ cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
+
+ drm_rc_blob.cmd = (uint64_t)&cmd;
+ drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
+ drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+ drm_rc_blob.blob_flags = blob_flags;
+ drm_rc_blob.blob_id = cur_blob_id;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+ return 0;
+}
+
+static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // TODO(gurchetansingh): remove once all minigbm users are blob-safe
+#ifndef VIRTIO_GPU_NEXT
+ return false;
+#endif
+
+ // Only use blob when host gbm is available
+ if (!priv->host_gbm_enabled)
+ return false;
+
+ // Use regular resources if only the GPU needs efficient access
+ if (!(use_flags &
+ (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW)))
+ return false;
+
+ switch (format) {
+ case DRM_FORMAT_YVU420_ANDROID:
+ case DRM_FORMAT_R8:
+ // Formats with strictly defined strides are supported
+ return true;
+ case DRM_FORMAT_NV12:
+ // Knowing buffer metadata at buffer creation isn't yet supported, so buffers
+ // can't be properly mapped into the guest.
+ return (use_flags & BO_USE_SW_MASK) == 0;
+ default:
+ return false;
+ }
+}
+
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
+ if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+ should_use_blob(bo->drv, format, use_flags))
+ return virtio_gpu_bo_create_blob(bo->drv, bo);
+
if (features[feat_3d].enabled)
return virtio_virgl_bo_create(bo, width, height, format, use_flags);
else
{
int ret;
size_t i;
- struct drm_virtgpu_3d_transfer_from_host xfer;
- struct drm_virtgpu_3d_wait waitcmd;
+ struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
+ struct drm_virtgpu_3d_wait waitcmd = { 0 };
struct virtio_transfers_params xfer_params;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
return 0;
- memset(&xfer, 0, sizeof(xfer));
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
xfer.bo_handle = mapping->vma->handle;
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
+
if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
// Unfortunately, the kernel doesn't actually pass the guest layer_stride
// and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
// The transfer needs to complete before invalidate returns so that any host changes
// are visible and to ensure the host doesn't overwrite subsequent guest changes.
// TODO(b/136733358): Support returning fences from transfers
- memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = mapping->vma->handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
if (ret) {
{
int ret;
size_t i;
- struct drm_virtgpu_3d_transfer_to_host xfer;
- struct drm_virtgpu_3d_wait waitcmd;
+ struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
+ struct drm_virtgpu_3d_wait waitcmd = { 0 };
struct virtio_transfers_params xfer_params;
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
- memset(&xfer, 0, sizeof(xfer));
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
xfer.bo_handle = mapping->vma->handle;
+ if (mapping->rect.x || mapping->rect.y) {
+ /*
+ * virglrenderer uses the box parameters and assumes that offset == 0 for planar
+ * images
+ */
+ if (bo->meta.num_planes == 1) {
+ xfer.offset =
+ (bo->meta.strides[0] * mapping->rect.y) +
+ drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
+ }
+ }
+
// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
// guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
// the level to work around this.
// buffer, we need to wait for the transfer to complete for consistency.
// TODO(b/136733358): Support returning fences from transfers
if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
- memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = mapping->vma->handle;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
uint32_t offsets[DRV_MAX_PLANES])
{
int ret;
- struct drm_virtgpu_resource_info res_info;
+ struct drm_virtgpu_resource_info res_info = { 0 };
if (!features[feat_3d].enabled)
return 0;
- memset(&res_info, 0, sizeof(res_info));
res_info.bo_handle = bo->handles[0].u32;
ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
if (ret) {