2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
16 #include "external/virgl_hw.h"
17 #include "external/virgl_protocol.h"
18 #include "external/virtgpu_drm.h"
23 #define PAGE_SIZE 0x1000
25 #define PIPE_TEXTURE_2D 2
27 #define MESA_LLVMPIPE_TILE_ORDER 6
28 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
48 static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
49 FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
51 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
52 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
53 DRM_FORMAT_XRGB8888 };
55 static const uint32_t dumb_texture_source_formats[] = {
56 DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
57 DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
60 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
61 DRM_FORMAT_R8, DRM_FORMAT_R16,
62 DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
64 struct virtio_gpu_priv {
66 union virgl_caps caps;
70 static uint32_t translate_format(uint32_t drm_fourcc)
73 case DRM_FORMAT_BGR888:
74 case DRM_FORMAT_RGB888:
75 return VIRGL_FORMAT_R8G8B8_UNORM;
76 case DRM_FORMAT_XRGB8888:
77 return VIRGL_FORMAT_B8G8R8X8_UNORM;
78 case DRM_FORMAT_ARGB8888:
79 return VIRGL_FORMAT_B8G8R8A8_UNORM;
80 case DRM_FORMAT_XBGR8888:
81 return VIRGL_FORMAT_R8G8B8X8_UNORM;
82 case DRM_FORMAT_ABGR8888:
83 return VIRGL_FORMAT_R8G8B8A8_UNORM;
84 case DRM_FORMAT_ABGR16161616F:
85 return VIRGL_FORMAT_R16G16B16A16_UNORM;
86 case DRM_FORMAT_RGB565:
87 return VIRGL_FORMAT_B5G6R5_UNORM;
89 return VIRGL_FORMAT_R8_UNORM;
91 return VIRGL_FORMAT_R8G8_UNORM;
93 return VIRGL_FORMAT_NV12;
95 return VIRGL_FORMAT_NV21;
96 case DRM_FORMAT_YVU420:
97 case DRM_FORMAT_YVU420_ANDROID:
98 return VIRGL_FORMAT_YV12;
104 static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
107 uint32_t virgl_format = translate_format(drm_format);
112 uint32_t bitmask_index = virgl_format / 32;
113 uint32_t bit_index = virgl_format % 32;
114 return supported->bitmask[bitmask_index] & (1 << bit_index);
117 // The metadata generated here for emulated buffers is slightly different than the metadata
118 // generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
119 // functions below, the emulated buffers are oversized. For example, ignoring stride alignment
120 // requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
121 // drv_bo_from_format:
123 // | Y | Y | Y | Y | Y | Y |
124 // | Y | Y | Y | Y | Y | Y |
125 // | Y | Y | Y | Y | Y | Y |
126 // | Y | Y | Y | Y | Y | Y |
127 // | Y | Y | Y | Y | Y | Y |
128 // | Y | Y | Y | Y | Y | Y |
129 // | U | U | U | U | U | U |
130 // | U | U | U | V | V | V |
131 // | V | V | V | V | V | V |
133 // where each plane immediately follows the previous plane in memory. This layout makes it
134 // difficult to compute the transfers needed for example when the middle 2x2 region of the
135 // image is locked and needs to be flushed/invalidated.
137 // Emulated multi-plane buffers instead have a layout of:
139 // | Y | Y | Y | Y | Y | Y |
140 // | Y | Y | Y | Y | Y | Y |
141 // | Y | Y | Y | Y | Y | Y |
142 // | Y | Y | Y | Y | Y | Y |
143 // | Y | Y | Y | Y | Y | Y |
144 // | Y | Y | Y | Y | Y | Y |
145 // | U | U | U | | | |
146 // | U | U | U | | | |
147 // | U | U | U | | | |
148 // | V | V | V | | | |
149 // | V | V | V | | | |
150 // | V | V | V | | | |
152 // where each plane is placed as a sub-image (albeit with a very large stride) in order to
153 // simplify transfers into 3 sub-image transfers for the above example.
155 // Additional note: the V-plane is not placed to the right of the U-plane due to some
156 // observed failures in media framework code which assumes the V-plane is not
157 // "row-interlaced" with the U-plane.
158 static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
160 uint32_t y_plane_height;
161 uint32_t c_plane_height;
162 uint32_t original_width = bo->meta.width;
163 uint32_t original_height = bo->meta.height;
165 metadata->format = DRM_FORMAT_R8;
166 switch (bo->meta.format) {
167 case DRM_FORMAT_NV12:
168 case DRM_FORMAT_NV21:
170 metadata->num_planes = 2;
172 y_plane_height = original_height;
173 c_plane_height = DIV_ROUND_UP(original_height, 2);
175 metadata->width = original_width;
176 metadata->height = y_plane_height + c_plane_height;
178 // Y-plane (full resolution)
179 metadata->strides[0] = metadata->width;
180 metadata->offsets[0] = 0;
181 metadata->sizes[0] = metadata->width * y_plane_height;
183 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
184 metadata->strides[1] = metadata->width;
185 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
186 metadata->sizes[1] = metadata->width * c_plane_height;
188 metadata->total_size = metadata->width * metadata->height;
190 case DRM_FORMAT_YVU420:
191 case DRM_FORMAT_YVU420_ANDROID:
193 metadata->num_planes = 3;
195 y_plane_height = original_height;
196 c_plane_height = DIV_ROUND_UP(original_height, 2);
198 metadata->width = ALIGN(original_width, 32);
199 metadata->height = y_plane_height + (2 * c_plane_height);
201 // Y-plane (full resolution)
202 metadata->strides[0] = metadata->width;
203 metadata->offsets[0] = 0;
204 metadata->sizes[0] = metadata->width * original_height;
206 // Cb-plane (half resolution, placed below Y-plane)
207 metadata->strides[1] = metadata->width;
208 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
209 metadata->sizes[1] = metadata->width * c_plane_height;
211 // Cr-plane (half resolution, placed below Cb-plane)
212 metadata->strides[2] = metadata->width;
213 metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
214 metadata->sizes[2] = metadata->width * c_plane_height;
216 metadata->total_size = metadata->width * metadata->height;
223 struct virtio_transfers_params {
225 struct rectangle xfer_boxes[DRV_MAX_PLANES];
228 static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
229 const struct rectangle *transfer_box,
230 struct virtio_transfers_params *xfer_params)
232 uint32_t y_plane_height;
233 uint32_t c_plane_height;
234 struct bo_metadata emulated_metadata;
236 if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
237 transfer_box->height == bo->meta.height) {
238 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
240 xfer_params->xfers_needed = 1;
241 xfer_params->xfer_boxes[0].x = 0;
242 xfer_params->xfer_boxes[0].y = 0;
243 xfer_params->xfer_boxes[0].width = emulated_metadata.width;
244 xfer_params->xfer_boxes[0].height = emulated_metadata.height;
249 switch (bo->meta.format) {
250 case DRM_FORMAT_NV12:
251 case DRM_FORMAT_NV21:
253 xfer_params->xfers_needed = 2;
255 y_plane_height = bo->meta.height;
256 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
258 // Y-plane (full resolution)
259 xfer_params->xfer_boxes[0].x = transfer_box->x;
260 xfer_params->xfer_boxes[0].y = transfer_box->y;
261 xfer_params->xfer_boxes[0].width = transfer_box->width;
262 xfer_params->xfer_boxes[0].height = transfer_box->height;
264 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
265 xfer_params->xfer_boxes[1].x = transfer_box->x;
266 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
267 xfer_params->xfer_boxes[1].width = transfer_box->width;
268 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
271 case DRM_FORMAT_YVU420:
272 case DRM_FORMAT_YVU420_ANDROID:
274 xfer_params->xfers_needed = 3;
276 y_plane_height = bo->meta.height;
277 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
279 // Y-plane (full resolution)
280 xfer_params->xfer_boxes[0].x = transfer_box->x;
281 xfer_params->xfer_boxes[0].y = transfer_box->y;
282 xfer_params->xfer_boxes[0].width = transfer_box->width;
283 xfer_params->xfer_boxes[0].height = transfer_box->height;
285 // Cb-plane (half resolution, placed below Y-plane)
286 xfer_params->xfer_boxes[1].x = transfer_box->x;
287 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
288 xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
289 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
291 // Cr-plane (half resolution, placed below Cb-plane)
292 xfer_params->xfer_boxes[2].x = transfer_box->x;
293 xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
294 xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
295 xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
301 static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
304 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
306 if (priv->caps.max_version == 0) {
310 if ((use_flags & BO_USE_RENDERING) &&
311 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
315 if ((use_flags & BO_USE_TEXTURE) &&
316 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
320 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
321 !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
328 // For virtio backends that do not support formats natively (e.g. multi-planar formats are not
329 // supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
330 // format and usage combination can be handled as a blob (byte buffer).
331 static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
335 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
337 // Only enable emulation on non-gbm virtio backends.
338 if (priv->host_gbm_enabled) {
342 if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
346 if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
350 return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
351 drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
354 // Adds the given buffer combination to the list of supported buffer combinations if the
355 // combination is supported by the virtio backend.
356 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
357 struct format_metadata *metadata, uint64_t use_flags)
359 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
361 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
362 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
363 !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
364 drv_log("Scanout format: %d\n", drm_format);
365 use_flags &= ~BO_USE_SCANOUT;
368 if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
369 !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
371 drv_log("Skipping unsupported combination format:%d\n", drm_format);
376 drv_add_combination(drv, drm_format, metadata, use_flags);
379 // Adds each given buffer combination to the list of supported buffer combinations if the
380 // combination supported by the virtio backend.
381 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
382 uint32_t num_formats, struct format_metadata *metadata,
387 for (i = 0; i < num_formats; i++) {
388 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
392 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
395 if (bo->meta.format != DRM_FORMAT_R8) {
396 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
397 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
400 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
403 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
406 if ((*flag) & check_flag) {
407 (*flag) &= ~check_flag;
408 (*bind) |= virgl_bind;
412 static uint32_t use_flags_to_bind(uint64_t use_flags)
414 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
415 uint32_t bind = VIRGL_BIND_SHARED;
417 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
418 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
419 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
420 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
421 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
423 if (use_flags & BO_USE_PROTECTED) {
424 handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
426 // Make sure we don't set both flags, since that could be mistaken for
427 // protected. Give OFTEN priority over RARELY.
428 if (use_flags & BO_USE_SW_READ_OFTEN) {
429 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
430 VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
432 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
433 VIRGL_BIND_MINIGBM_SW_READ_RARELY);
435 if (use_flags & BO_USE_SW_WRITE_OFTEN) {
436 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
437 VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
439 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
440 VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
444 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
445 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
446 handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
447 VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
448 handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
449 VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
452 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
458 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
464 struct drm_virtgpu_resource_create res_create;
465 struct bo_metadata emulated_metadata;
467 if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
468 stride = drv_stride_from_format(format, width, 0);
469 drv_bo_from_format(bo, stride, height, format);
472 virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
474 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
476 format = emulated_metadata.format;
477 width = emulated_metadata.width;
478 height = emulated_metadata.height;
479 for (i = 0; i < emulated_metadata.num_planes; i++) {
480 bo->meta.strides[i] = emulated_metadata.strides[i];
481 bo->meta.offsets[i] = emulated_metadata.offsets[i];
482 bo->meta.sizes[i] = emulated_metadata.sizes[i];
484 bo->meta.total_size = emulated_metadata.total_size;
488 * Setting the target is intended to ensure this resource gets bound as a 2D
489 * texture in the host renderer's GL state. All of these resource properties are
490 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
491 * virglrenderer. When virglrenderer makes a resource, it will convert the target
492 * enum to the equivalent one in GL and then bind the resource to that target.
494 memset(&res_create, 0, sizeof(res_create));
496 res_create.target = PIPE_TEXTURE_2D;
497 res_create.format = translate_format(format);
498 res_create.bind = use_flags_to_bind(use_flags);
499 res_create.width = width;
500 res_create.height = height;
503 res_create.depth = 1;
504 res_create.array_size = 1;
505 res_create.last_level = 0;
506 res_create.nr_samples = 0;
508 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
509 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
511 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
515 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
516 bo->handles[plane].u32 = res_create.bo_handle;
521 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
524 struct drm_virtgpu_map gem_map;
526 memset(&gem_map, 0, sizeof(gem_map));
527 gem_map.handle = bo->handles[0].u32;
529 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
531 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
535 vma->length = bo->meta.total_size;
536 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
540 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
543 struct drm_virtgpu_get_caps cap_args;
546 memset(&cap_args, 0, sizeof(cap_args));
547 cap_args.addr = (unsigned long long)caps;
548 if (features[feat_capset_fix].enabled) {
550 cap_args.cap_set_id = 2;
551 cap_args.size = sizeof(union virgl_caps);
553 cap_args.cap_set_id = 1;
554 cap_args.size = sizeof(struct virgl_caps_v1);
557 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
559 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
563 cap_args.cap_set_id = 1;
564 cap_args.size = sizeof(struct virgl_caps_v1);
566 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
568 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
575 static void virtio_gpu_init_features_and_caps(struct driver *drv)
577 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
579 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
580 struct drm_virtgpu_getparam params = { 0 };
582 params.param = features[i].feature;
583 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
584 int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
586 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
589 if (features[feat_3d].enabled) {
590 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
593 // Multi-planar formats are currently only supported in virglrenderer through gbm.
594 priv->host_gbm_enabled =
595 virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
598 static int virtio_gpu_init(struct driver *drv)
600 struct virtio_gpu_priv *priv;
602 priv = calloc(1, sizeof(*priv));
605 virtio_gpu_init_features_and_caps(drv);
607 if (features[feat_3d].enabled) {
608 /* This doesn't mean host can scanout everything, it just means host
609 * hypervisor can show it. */
610 virtio_gpu_add_combinations(drv, render_target_formats,
611 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
612 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
613 virtio_gpu_add_combinations(drv, texture_source_formats,
614 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
615 BO_USE_TEXTURE_MASK);
617 /* Virtio primary plane only allows this format. */
618 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
619 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
620 /* Virtio cursor plane only allows this format and Chrome cannot live without
621 * ARGB888 renderable format. */
622 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
623 BO_USE_RENDER_MASK | BO_USE_CURSOR);
624 /* Android needs more, but they cannot be bound as scanouts anymore after
625 * "drm/virtio: fix DRM_FORMAT_* handling" */
626 virtio_gpu_add_combinations(drv, render_target_formats,
627 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
629 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
630 ARRAY_SIZE(dumb_texture_source_formats),
631 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
632 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
633 BO_USE_SW_MASK | BO_USE_LINEAR);
634 virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
635 BO_USE_SW_MASK | BO_USE_LINEAR);
638 /* Android CTS tests require this. */
639 virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
640 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
641 virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
642 BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
644 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
645 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
646 BO_USE_HW_VIDEO_ENCODER);
647 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
648 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
649 BO_USE_HW_VIDEO_ENCODER);
650 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
651 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
652 BO_USE_HW_VIDEO_ENCODER);
653 drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
654 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
655 BO_USE_HW_VIDEO_ENCODER);
656 drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
657 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
658 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
659 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
660 BO_USE_HW_VIDEO_ENCODER);
661 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
662 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
663 BO_USE_HW_VIDEO_ENCODER);
664 drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
665 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
666 BO_USE_HW_VIDEO_ENCODER);
668 return drv_modify_linear_combinations(drv);
671 static void virtio_gpu_close(struct driver *drv)
677 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
680 if (features[feat_3d].enabled)
681 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
683 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
686 static int virtio_gpu_bo_destroy(struct bo *bo)
688 if (features[feat_3d].enabled)
689 return drv_gem_bo_destroy(bo);
691 return drv_dumb_bo_destroy(bo);
694 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
696 if (features[feat_3d].enabled)
697 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
699 return drv_dumb_bo_map(bo, vma, plane, map_flags);
702 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
706 struct drm_virtgpu_3d_transfer_from_host xfer;
707 struct drm_virtgpu_3d_wait waitcmd;
708 struct virtio_transfers_params xfer_params;
709 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
711 if (!features[feat_3d].enabled)
714 // Invalidate is only necessary if the host writes to the buffer.
715 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
716 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
719 memset(&xfer, 0, sizeof(xfer));
720 xfer.bo_handle = mapping->vma->handle;
722 if (mapping->rect.x || mapping->rect.y) {
724 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
727 if (bo->meta.num_planes == 1) {
729 (bo->meta.strides[0] * mapping->rect.y) +
730 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
734 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
735 // Unfortunately, the kernel doesn't actually pass the guest layer_stride
736 // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
737 // For gbm based resources, we can work around this by using the level field
738 // to pass the stride to virglrenderer's gbm transfer code. However, we need
739 // to avoid doing this for resources which don't rely on that transfer code,
740 // which is resources with the BO_USE_RENDERING flag set.
741 // TODO(b/145993887): Send also stride when the patches are landed
742 if (priv->host_gbm_enabled) {
743 xfer.level = bo->meta.strides[0];
747 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
748 bo->meta.use_flags)) {
749 xfer_params.xfers_needed = 1;
750 xfer_params.xfer_boxes[0] = mapping->rect;
752 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
753 bo->meta.use_flags));
755 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
758 for (i = 0; i < xfer_params.xfers_needed; i++) {
759 xfer.box.x = xfer_params.xfer_boxes[i].x;
760 xfer.box.y = xfer_params.xfer_boxes[i].y;
761 xfer.box.w = xfer_params.xfer_boxes[i].width;
762 xfer.box.h = xfer_params.xfer_boxes[i].height;
765 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
767 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
773 // The transfer needs to complete before invalidate returns so that any host changes
774 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
775 // TODO(b/136733358): Support returning fences from transfers
776 memset(&waitcmd, 0, sizeof(waitcmd));
777 waitcmd.handle = mapping->vma->handle;
778 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
780 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
787 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
791 struct drm_virtgpu_3d_transfer_to_host xfer;
792 struct drm_virtgpu_3d_wait waitcmd;
793 struct virtio_transfers_params xfer_params;
794 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
796 if (!features[feat_3d].enabled)
799 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
802 memset(&xfer, 0, sizeof(xfer));
803 xfer.bo_handle = mapping->vma->handle;
805 if (mapping->rect.x || mapping->rect.y) {
807 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
810 if (bo->meta.num_planes == 1) {
812 (bo->meta.strides[0] * mapping->rect.y) +
813 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
817 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
818 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
819 // the level to work around this.
820 if (priv->host_gbm_enabled) {
821 xfer.level = bo->meta.strides[0];
824 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
825 bo->meta.use_flags)) {
826 xfer_params.xfers_needed = 1;
827 xfer_params.xfer_boxes[0] = mapping->rect;
829 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
830 bo->meta.use_flags));
832 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
835 for (i = 0; i < xfer_params.xfers_needed; i++) {
836 xfer.box.x = xfer_params.xfer_boxes[i].x;
837 xfer.box.y = xfer_params.xfer_boxes[i].y;
838 xfer.box.w = xfer_params.xfer_boxes[i].width;
839 xfer.box.h = xfer_params.xfer_boxes[i].height;
842 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
844 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
850 // If the buffer is only accessed by the host GPU, then the flush is ordered
851 // with subsequent commands. However, if other host hardware can access the
852 // buffer, we need to wait for the transfer to complete for consistency.
853 // TODO(b/136733358): Support returning fences from transfers
854 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
855 memset(&waitcmd, 0, sizeof(waitcmd));
856 waitcmd.handle = mapping->vma->handle;
858 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
860 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
868 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
871 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
872 /* Camera subsystem requires NV12. */
873 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
874 return DRM_FORMAT_NV12;
875 /*HACK: See b/28671744 */
876 return DRM_FORMAT_XBGR8888;
877 case DRM_FORMAT_FLEX_YCbCr_420_888:
879 * All of our host drivers prefer NV12 as their flexible media format.
880 * If that changes, this will need to be modified.
882 if (features[feat_3d].enabled)
883 return DRM_FORMAT_NV12;
885 return DRM_FORMAT_YVU420_ANDROID;
891 static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
892 uint32_t offsets[DRV_MAX_PLANES])
895 struct drm_virtgpu_resource_info res_info;
897 if (!features[feat_3d].enabled)
900 memset(&res_info, 0, sizeof(res_info));
901 res_info.bo_handle = bo->handles[0].u32;
902 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
904 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
908 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
910 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
913 if (res_info.strides[plane]) {
914 strides[plane] = res_info.strides[plane];
915 offsets[plane] = res_info.offsets[plane];
922 const struct backend backend_virtio_gpu = {
923 .name = "virtio_gpu",
924 .init = virtio_gpu_init,
925 .close = virtio_gpu_close,
926 .bo_create = virtio_gpu_bo_create,
927 .bo_destroy = virtio_gpu_bo_destroy,
928 .bo_import = drv_prime_bo_import,
929 .bo_map = virtio_gpu_bo_map,
930 .bo_unmap = drv_bo_munmap,
931 .bo_invalidate = virtio_gpu_bo_invalidate,
932 .bo_flush = virtio_gpu_bo_flush,
933 .resolve_format = virtio_gpu_resolve_format,
934 .resource_info = virtio_gpu_resource_info,