#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
__u32 res_handle;
__u32 size;
union {
- __u32 stride;
+ __u32 blob_mem;
__u32 strides[4]; /* strides[0] is accessible with stride. */
- };
+ };
__u32 num_planes;
__u32 offsets[4];
__u64 format_modifier;
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
#if defined(__cplusplus)
}
#endif
enum feature_id {
feat_3d,
feat_capset_fix,
+ feat_resource_blob,
+ feat_host_visible,
+ feat_host_cross_device,
feat_max,
};
x, #x, 0 \
}
-static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
- FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
+static struct feature features[] = {
+ FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+ FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
+ FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
+};
static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
drv->priv = NULL;
}
+static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+ int ret;
+ uint32_t stride;
+ uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+ struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+
+ stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
+ drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
+ bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
+ bo->meta.tiling = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+
+ cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
+ cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
+ cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
+ cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
+ cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
+ cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
+ cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
+
+ drm_rc_blob.cmd = (uint64_t)&cmd;
+ drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
+ drm_rc_blob.size = bo->meta.total_size;
+ drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+ drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+ if (ret < 0) {
+ drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+ return -errno;
+ }
+
+ for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+ bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+ return 0;
+}
+
+static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // TODO(gurchetansingh): remove once all minigbm users are blob-safe
+#ifndef VIRTIO_GPU_NEXT
+ return false;
+#endif
+
+ // Only use blob when host gbm is available
+ if (!priv->host_gbm_enabled)
+ return false;
+
+ // Focus on SW read/write apps for now
+ if (use_flags & (BO_USE_RENDERING | BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER))
+ return false;
+
+ // Simple, strictly defined formats for now
+ if (format != DRM_FORMAT_YVU420_ANDROID || format != DRM_FORMAT_R8)
+ return false;
+
+ if (use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR |
+ BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ))
+ return true;
+
+ return false;
+}
+
static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
uint64_t use_flags)
{
+ if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+ should_use_blob(bo->drv, format, use_flags))
+ return virtio_gpu_bo_create_blob(bo->drv, bo);
+
if (features[feat_3d].enabled)
return virtio_virgl_bo_create(bo, width, height, format, use_flags);
else
BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
return 0;
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
if (!(mapping->vma->map_flags & BO_MAP_WRITE))
return 0;
+ if (features[feat_resource_blob].enabled &&
+ (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return 0;
+
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;