* found in the LICENSE file.
*/
+#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888 };
-static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
- DRM_FORMAT_NV12,
- DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t dumb_texture_source_formats[] = {
+ DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
+ DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
+};
-static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
- DRM_FORMAT_YVU420_ANDROID };
+static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
+ DRM_FORMAT_R8, DRM_FORMAT_R16,
+ DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
struct virtio_gpu_priv {
int caps_is_v2;
union virgl_caps caps;
+ int host_gbm_enabled;
};
static uint32_t translate_format(uint32_t drm_fourcc)
{
switch (drm_fourcc) {
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
+ return VIRGL_FORMAT_R8G8B8_UNORM;
case DRM_FORMAT_XRGB8888:
return VIRGL_FORMAT_B8G8R8X8_UNORM;
case DRM_FORMAT_ARGB8888:
return VIRGL_FORMAT_R8G8B8X8_UNORM;
case DRM_FORMAT_ABGR8888:
return VIRGL_FORMAT_R8G8B8A8_UNORM;
+ case DRM_FORMAT_ABGR16161616F:
+ return VIRGL_FORMAT_R16G16B16A16_UNORM;
case DRM_FORMAT_RGB565:
return VIRGL_FORMAT_B5G6R5_UNORM;
case DRM_FORMAT_R8:
return VIRGL_FORMAT_R8G8_UNORM;
case DRM_FORMAT_NV12:
return VIRGL_FORMAT_NV12;
+ case DRM_FORMAT_NV21:
+ return VIRGL_FORMAT_NV21;
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YVU420_ANDROID:
return VIRGL_FORMAT_YV12;
}
}
-static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
- uint32_t drm_format)
+static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
+ uint32_t drm_format)
{
uint32_t virgl_format = translate_format(drm_format);
if (!virgl_format) {
return supported->bitmask[bitmask_index] & (1 << bit_index);
}
+// The metadata generated here for emulated buffers is slightly different than the metadata
+// generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
+// functions below, the emulated buffers are oversized. For example, ignoring stride alignment
+// requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
+// drv_bo_from_format:
+//
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | U | U | U | U | U | U |
+// | U | U | U | V | V | V |
+// | V | V | V | V | V | V |
+//
+// where each plane immediately follows the previous plane in memory. This layout makes it
+// difficult to compute the transfers needed for example when the middle 2x2 region of the
+// image is locked and needs to be flushed/invalidated.
+//
+// Emulated multi-plane buffers instead have a layout of:
+//
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | Y | Y | Y | Y | Y | Y |
+// | U | U | U | | | |
+// | U | U | U | | | |
+// | U | U | U | | | |
+// | V | V | V | | | |
+// | V | V | V | | | |
+// | V | V | V | | | |
+//
+// where each plane is placed as a sub-image (albeit with a very large stride) in order to
+// simplify transfers into 3 sub-image transfers for the above example.
+//
+// Additional note: the V-plane is not placed to the right of the U-plane due to some
+// observed failures in media framework code which assumes the V-plane is not
+// "row-interlaced" with the U-plane.
+static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
+{
+ uint32_t y_plane_height;
+ uint32_t c_plane_height;
+ uint32_t original_width = bo->meta.width;
+ uint32_t original_height = bo->meta.height;
+
+ metadata->format = DRM_FORMAT_R8;
+ switch (bo->meta.format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ // Bi-planar
+ metadata->num_planes = 2;
+
+ y_plane_height = original_height;
+ c_plane_height = DIV_ROUND_UP(original_height, 2);
+
+ metadata->width = original_width;
+ metadata->height = y_plane_height + c_plane_height;
+
+ // Y-plane (full resolution)
+ metadata->strides[0] = metadata->width;
+ metadata->offsets[0] = 0;
+ metadata->sizes[0] = metadata->width * y_plane_height;
+
+ // CbCr-plane (half resolution, interleaved, placed below Y-plane)
+ metadata->strides[1] = metadata->width;
+ metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
+ metadata->sizes[1] = metadata->width * c_plane_height;
+
+ metadata->total_size = metadata->width * metadata->height;
+ break;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ // Tri-planar
+ metadata->num_planes = 3;
+
+ y_plane_height = original_height;
+ c_plane_height = DIV_ROUND_UP(original_height, 2);
+
+ metadata->width = ALIGN(original_width, 32);
+ metadata->height = y_plane_height + (2 * c_plane_height);
+
+ // Y-plane (full resolution)
+ metadata->strides[0] = metadata->width;
+ metadata->offsets[0] = 0;
+ metadata->sizes[0] = metadata->width * original_height;
+
+ // Cb-plane (half resolution, placed below Y-plane)
+ metadata->strides[1] = metadata->width;
+ metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
+ metadata->sizes[1] = metadata->width * c_plane_height;
+
+ // Cr-plane (half resolution, placed below Cb-plane)
+ metadata->strides[2] = metadata->width;
+ metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
+ metadata->sizes[2] = metadata->width * c_plane_height;
+
+ metadata->total_size = metadata->width * metadata->height;
+ break;
+ default:
+ break;
+ }
+}
+
+struct virtio_transfers_params {
+ size_t xfers_needed;
+ struct rectangle xfer_boxes[DRV_MAX_PLANES];
+};
+
+static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
+ const struct rectangle *transfer_box,
+ struct virtio_transfers_params *xfer_params)
+{
+ uint32_t y_plane_height;
+ uint32_t c_plane_height;
+ struct bo_metadata emulated_metadata;
+
+ if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
+ transfer_box->height == bo->meta.height) {
+ virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+
+ xfer_params->xfers_needed = 1;
+ xfer_params->xfer_boxes[0].x = 0;
+ xfer_params->xfer_boxes[0].y = 0;
+ xfer_params->xfer_boxes[0].width = emulated_metadata.width;
+ xfer_params->xfer_boxes[0].height = emulated_metadata.height;
+
+ return;
+ }
+
+ switch (bo->meta.format) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ // Bi-planar
+ xfer_params->xfers_needed = 2;
+
+ y_plane_height = bo->meta.height;
+ c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
+
+ // Y-plane (full resolution)
+ xfer_params->xfer_boxes[0].x = transfer_box->x;
+ xfer_params->xfer_boxes[0].y = transfer_box->y;
+ xfer_params->xfer_boxes[0].width = transfer_box->width;
+ xfer_params->xfer_boxes[0].height = transfer_box->height;
+
+ // CbCr-plane (half resolution, interleaved, placed below Y-plane)
+ xfer_params->xfer_boxes[1].x = transfer_box->x;
+ xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
+ xfer_params->xfer_boxes[1].width = transfer_box->width;
+ xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ break;
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU420_ANDROID:
+ // Tri-planar
+ xfer_params->xfers_needed = 3;
+
+ y_plane_height = bo->meta.height;
+ c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
+
+ // Y-plane (full resolution)
+ xfer_params->xfer_boxes[0].x = transfer_box->x;
+ xfer_params->xfer_boxes[0].y = transfer_box->y;
+ xfer_params->xfer_boxes[0].width = transfer_box->width;
+ xfer_params->xfer_boxes[0].height = transfer_box->height;
+
+ // Cb-plane (half resolution, placed below Y-plane)
+ xfer_params->xfer_boxes[1].x = transfer_box->x;
+ xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
+ xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
+ xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ // Cr-plane (half resolution, placed below Cb-plane)
+ xfer_params->xfer_boxes[2].x = transfer_box->x;
+ xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
+ xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
+ xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
+
+ break;
+ }
+}
+
+static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ if (priv->caps.max_version == 0) {
+ return true;
+ }
+
+ if ((use_flags & BO_USE_RENDERING) &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
+ return false;
+ }
+
+ if ((use_flags & BO_USE_TEXTURE) &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
+ return false;
+ }
+
+ if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
+ !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
+ return false;
+ }
+
+ return true;
+}
+
+// For virtio backends that do not support formats natively (e.g. multi-planar formats are not
+// supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
+// format and usage combination can be handled as a blob (byte buffer).
+static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
+ uint32_t drm_format,
+ uint64_t use_flags)
+{
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+ // Only enable emulation on non-gbm virtio backends.
+ if (priv->host_gbm_enabled) {
+ return false;
+ }
+
+ if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
+ return false;
+ }
+
+ if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
+ return false;
+ }
+
+ return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
+ drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
+}
+
// Adds the given buffer combination to the list of supported buffer combinations if the
// combination is supported by the virtio backend.
static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
- if ((use_flags & BO_USE_RENDERING) &&
- !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
- drv_log("Skipping unsupported render format: %d\n", drm_format);
- return;
+ if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
+ !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
+ drv_log("Scanout format: %d\n", drm_format);
+ use_flags &= ~BO_USE_SCANOUT;
}
- if ((use_flags & BO_USE_TEXTURE) &&
- !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
- drv_log("Skipping unsupported texture format: %d\n", drm_format);
+ if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
+ !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
+ use_flags)) {
+ drv_log("Skipping unsupported combination format:%d\n", drm_format);
return;
}
- if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
- !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
- drv_log("Unsupported scanout format: %d\n", drm_format);
- use_flags &= ~BO_USE_SCANOUT;
- }
}
drv_add_combination(drv, drm_format, metadata, use_flags);
uint64_t use_flags)
{
int ret;
+ size_t i;
uint32_t stride;
struct drm_virtgpu_resource_create res_create;
+ struct bo_metadata emulated_metadata;
- stride = drv_stride_from_format(format, width, 0);
- drv_bo_from_format(bo, stride, height, format);
+ if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
+ stride = drv_stride_from_format(format, width, 0);
+ drv_bo_from_format(bo, stride, height, format);
+ } else {
+ assert(
+ virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
+
+ virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
+
+ format = emulated_metadata.format;
+ width = emulated_metadata.width;
+ height = emulated_metadata.height;
+ for (i = 0; i < emulated_metadata.num_planes; i++) {
+ bo->meta.strides[i] = emulated_metadata.strides[i];
+ bo->meta.offsets[i] = emulated_metadata.offsets[i];
+ bo->meta.sizes[i] = emulated_metadata.sizes[i];
+ }
+ bo->meta.total_size = emulated_metadata.total_size;
+ }
/*
* Setting the target is intended to ensure this resource gets bound as a 2D
return ret;
}
-static int virtio_gpu_init(struct driver *drv)
+static void virtio_gpu_init_features_and_caps(struct driver *drv)
{
- int ret;
- struct virtio_gpu_priv *priv;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
- priv = calloc(1, sizeof(*priv));
- drv->priv = priv;
for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
struct drm_virtgpu_getparam params = { 0 };
params.param = features[i].feature;
params.value = (uint64_t)(uintptr_t)&features[i].enabled;
- ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
+ int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
if (ret)
drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
}
if (features[feat_3d].enabled) {
virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
+ }
+
+ // Multi-planar formats are currently only supported in virglrenderer through gbm.
+ priv->host_gbm_enabled =
+ virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
+}
+static int virtio_gpu_init(struct driver *drv)
+{
+ struct virtio_gpu_priv *priv;
+
+ priv = calloc(1, sizeof(*priv));
+ drv->priv = priv;
+
+ virtio_gpu_init_features_and_caps(drv);
+
+ if (features[feat_3d].enabled) {
/* This doesn't mean host can scanout everything, it just means host
* hypervisor can show it. */
virtio_gpu_add_combinations(drv, render_target_formats,
&LINEAR_METADATA, BO_USE_TEXTURE_MASK);
virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_SW_MASK | BO_USE_LINEAR);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_LINEAR);
}
/* Android CTS tests require this. */
+ virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
+ virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
+ BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
+ drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
+ BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
+ drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
+ BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
return drv_modify_linear_combinations(drv);
}
static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
{
int ret;
+ size_t i;
struct drm_virtgpu_3d_transfer_from_host xfer;
struct drm_virtgpu_3d_wait waitcmd;
+ struct virtio_transfers_params xfer_params;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
if (!features[feat_3d].enabled)
return 0;
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
- xfer.box.x = mapping->rect.x;
- xfer.box.y = mapping->rect.y;
- xfer.box.w = mapping->rect.width;
- xfer.box.h = mapping->rect.height;
- xfer.box.d = 1;
if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
- // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
- // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
- // based resources, we can work around this by using the level field to pass
- // the stride to virglrenderer's gbm transfer code. However, we need to avoid
- // doing this for resources which don't rely on that transfer code, which is
- // resources with the BO_USE_RENDERING flag set.
+ // Unfortunately, the kernel doesn't actually pass the guest layer_stride
+ // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
+ // For gbm based resources, we can work around this by using the level field
+ // to pass the stride to virglrenderer's gbm transfer code. However, we need
+ // to avoid doing this for resources which don't rely on that transfer code,
+ // which is resources with the BO_USE_RENDERING flag set.
// TODO(b/145993887): Send also stride when the patches are landed
- xfer.level = bo->meta.strides[0];
+ if (priv->host_gbm_enabled) {
+ xfer.level = bo->meta.strides[0];
+ }
}
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
- return -errno;
+ if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
+ bo->meta.use_flags)) {
+ xfer_params.xfers_needed = 1;
+ xfer_params.xfer_boxes[0] = mapping->rect;
+ } else {
+ assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
+
+ virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ }
+
+ for (i = 0; i < xfer_params.xfers_needed; i++) {
+ xfer.box.x = xfer_params.xfer_boxes[i].x;
+ xfer.box.y = xfer_params.xfer_boxes[i].y;
+ xfer.box.w = xfer_params.xfer_boxes[i].width;
+ xfer.box.h = xfer_params.xfer_boxes[i].height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
+ strerror(errno));
+ return -errno;
+ }
}
// The transfer needs to complete before invalidate returns so that any host changes
static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
{
int ret;
+ size_t i;
struct drm_virtgpu_3d_transfer_to_host xfer;
struct drm_virtgpu_3d_wait waitcmd;
+ struct virtio_transfers_params xfer_params;
+ struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
if (!features[feat_3d].enabled)
return 0;
memset(&xfer, 0, sizeof(xfer));
xfer.bo_handle = mapping->vma->handle;
- xfer.box.x = mapping->rect.x;
- xfer.box.y = mapping->rect.y;
- xfer.box.w = mapping->rect.width;
- xfer.box.h = mapping->rect.height;
- xfer.box.d = 1;
// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
// guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
// the level to work around this.
- xfer.level = bo->meta.strides[0];
+ if (priv->host_gbm_enabled) {
+ xfer.level = bo->meta.strides[0];
+ }
- ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
- if (ret) {
- drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
- return -errno;
+ if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
+ bo->meta.use_flags)) {
+ xfer_params.xfers_needed = 1;
+ xfer_params.xfer_boxes[0] = mapping->rect;
+ } else {
+ assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
+ bo->meta.use_flags));
+
+ virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
+ }
+
+ for (i = 0; i < xfer_params.xfers_needed; i++) {
+ xfer.box.x = xfer_params.xfer_boxes[i].x;
+ xfer.box.y = xfer_params.xfer_boxes[i].y;
+ xfer.box.w = xfer_params.xfer_boxes[i].width;
+ xfer.box.h = xfer_params.xfer_boxes[i].height;
+ xfer.box.d = 1;
+
+ ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
+ if (ret) {
+ drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
+ strerror(errno));
+ return -errno;
+ }
}
// If the buffer is only accessed by the host GPU, then the flush is ordered
static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
{
-
switch (format) {
case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
/* Camera subsystem requires NV12. */
if (features[feat_3d].enabled)
return DRM_FORMAT_NV12;
else
- return DRM_FORMAT_YVU420;
+ return DRM_FORMAT_YVU420_ANDROID;
default:
return format;
}