+ return drv_dumb_bo_create(bo, width, height, format, use_flags);
+}
+
+static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ int ret;
+ ssize_t plane;
+ ssize_t num_planes = drv_num_planes_from_format(format);
+ uint32_t stride0;
+
+ for (plane = 0; plane < num_planes; plane++) {
+ uint32_t stride = drv_stride_from_format(format, width, plane);
+ uint32_t size = drv_size_from_format(format, stride, height, plane);
+ uint32_t res_format = translate_format(format, plane);
+ struct drm_virtgpu_resource_create res_create;
+
+ memset(&res_create, 0, sizeof(res_create));
+ size = ALIGN(size, PAGE_SIZE);
+ /*
+ * Setting the target is intended to ensure this resource gets bound as a 2D
+ * texture in the host renderer's GL state. All of these resource properties are
+ * sent unchanged by the kernel to the host, which in turn sends them unchanged to
+ * virglrenderer. When virglrenderer makes a resource, it will convert the target
+ * enum to the equivalent one in GL and then bind the resource to that target.
+ */
+ res_create.target = PIPE_TEXTURE_2D;
+ res_create.format = res_format;
+ res_create.bind = VIRGL_BIND_RENDER_TARGET;
+ res_create.width = width;
+ res_create.height = height;
+ res_create.depth = 1;
+ res_create.array_size = 1;
+ res_create.last_level = 0;
+ res_create.nr_samples = 0;
+ res_create.stride = stride;
+ res_create.size = size;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n",
+ strerror(errno));
+ goto fail;
+ }
+
+ bo->handles[plane].u32 = res_create.bo_handle;
+ }
+
+ stride0 = drv_stride_from_format(format, width, 0);
+ drv_bo_from_format(bo, stride0, height, format);
+
+ for (plane = 0; plane < num_planes; plane++)
+ bo->offsets[plane] = 0;
+
+ return 0;
+
+fail:
+ for (plane--; plane >= 0; plane--) {
+ struct drm_gem_close gem_close;
+ memset(&gem_close, 0, sizeof(gem_close));
+ gem_close.handle = bo->handles[plane].u32;
+ drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+ }
+
+ return ret;
+}
+
+static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ int ret;
+ struct drm_virtgpu_map gem_map;
+
+ memset(&gem_map, 0, sizeof(gem_map));
+ gem_map.handle = bo->handles[0].u32;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
+ return MAP_FAILED;
+ }
+
+ vma->length = bo->total_size;
+ return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
+ gem_map.offset);
+}
+
+static int virtio_gpu_init(struct driver *drv)
+{
+ int ret;
+ struct virtio_gpu_priv *priv;
+ struct drm_virtgpu_getparam args;
+
+ priv = calloc(1, sizeof(*priv));
+ drv->priv = priv;
+
+ memset(&args, 0, sizeof(args));
+ args.param = VIRTGPU_PARAM_3D_FEATURES;
+ args.value = (uint64_t)(uintptr_t)&priv->has_3d;
+ ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
+ if (ret) {
+ drv_log("virtio 3D acceleration is not available\n");
+ /* Be paranoid */
+ priv->has_3d = 0;
+ }
+
+ drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
+ &LINEAR_METADATA, BO_USE_RENDER_MASK);
+
+ if (priv->has_3d)
+ drv_add_combinations(drv, texture_source_formats,
+ ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+ else
+ drv_add_combinations(drv, dumb_texture_source_formats,
+ ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
+ BO_USE_TEXTURE_MASK);
+
+ return drv_modify_linear_combinations(drv);
+}
+
+static void virtio_gpu_close(struct driver *drv)
+{
+ free(drv->priv);
+ drv->priv = NULL;
+}
+
+static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_create(bo, width, height, format, use_flags);
+ else
+ return virtio_dumb_bo_create(bo, width, height, format, use_flags);
+}
+
+static int virtio_gpu_bo_destroy(struct bo *bo)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return drv_gem_bo_destroy(bo);
+ else
+ return drv_dumb_bo_destroy(bo);
+}
+
+static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+ if (priv->has_3d)
+ return virtio_virgl_bo_map(bo, vma, plane, map_flags);
+ else
+ return drv_dumb_bo_map(bo, vma, plane, map_flags);
+}
+
+static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_virtgpu_3d_transfer_from_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
+
+ memset(&xfer, 0, sizeof(xfer));
+ xfer.bo_handle = mapping->vma->handle;
+ xfer.box.x = mapping->rect.x;
+ xfer.box.y = mapping->rect.y;
+ xfer.box.w = mapping->rect.width;
+ xfer.box.h = mapping->rect.height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
+{
+ int ret;
+ struct drm_virtgpu_3d_transfer_to_host xfer;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
+
+ if (!priv->has_3d)
+ return 0;
+
+ if (!(mapping->vma->map_flags & BO_MAP_WRITE))
+ return 0;
+
+ memset(&xfer, 0, sizeof(xfer));
+ xfer.bo_handle = mapping->vma->handle;
+ xfer.box.x = mapping->rect.x;
+ xfer.box.y = mapping->rect.y;
+ xfer.box.w = mapping->rect.width;
+ xfer.box.h = mapping->rect.height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
+ return ret;
+ }
+
+ return 0;