2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
16 #include "external/virgl_hw.h"
17 #include "external/virgl_protocol.h"
18 #include "external/virtgpu_drm.h"
23 #define PAGE_SIZE 0x1000
25 #define PIPE_TEXTURE_2D 2
27 #define MESA_LLVMPIPE_TILE_ORDER 6
28 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
41 feat_host_cross_device,
51 static struct feature features[] = {
52 FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
53 FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
54 FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
57 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
58 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
59 DRM_FORMAT_XRGB8888 };
61 static const uint32_t dumb_texture_source_formats[] = {
62 DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
63 DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
66 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
67 DRM_FORMAT_R8, DRM_FORMAT_R16,
68 DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
70 struct virtio_gpu_priv {
72 union virgl_caps caps;
76 static uint32_t translate_format(uint32_t drm_fourcc)
79 case DRM_FORMAT_BGR888:
80 case DRM_FORMAT_RGB888:
81 return VIRGL_FORMAT_R8G8B8_UNORM;
82 case DRM_FORMAT_XRGB8888:
83 return VIRGL_FORMAT_B8G8R8X8_UNORM;
84 case DRM_FORMAT_ARGB8888:
85 return VIRGL_FORMAT_B8G8R8A8_UNORM;
86 case DRM_FORMAT_XBGR8888:
87 return VIRGL_FORMAT_R8G8B8X8_UNORM;
88 case DRM_FORMAT_ABGR8888:
89 return VIRGL_FORMAT_R8G8B8A8_UNORM;
90 case DRM_FORMAT_ABGR16161616F:
91 return VIRGL_FORMAT_R16G16B16A16_UNORM;
92 case DRM_FORMAT_RGB565:
93 return VIRGL_FORMAT_B5G6R5_UNORM;
95 return VIRGL_FORMAT_R8_UNORM;
97 return VIRGL_FORMAT_R8G8_UNORM;
99 return VIRGL_FORMAT_NV12;
100 case DRM_FORMAT_NV21:
101 return VIRGL_FORMAT_NV21;
102 case DRM_FORMAT_YVU420:
103 case DRM_FORMAT_YVU420_ANDROID:
104 return VIRGL_FORMAT_YV12;
110 static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
113 uint32_t virgl_format = translate_format(drm_format);
118 uint32_t bitmask_index = virgl_format / 32;
119 uint32_t bit_index = virgl_format % 32;
120 return supported->bitmask[bitmask_index] & (1 << bit_index);
123 // The metadata generated here for emulated buffers is slightly different than the metadata
124 // generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
125 // functions below, the emulated buffers are oversized. For example, ignoring stride alignment
126 // requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
127 // drv_bo_from_format:
129 // | Y | Y | Y | Y | Y | Y |
130 // | Y | Y | Y | Y | Y | Y |
131 // | Y | Y | Y | Y | Y | Y |
132 // | Y | Y | Y | Y | Y | Y |
133 // | Y | Y | Y | Y | Y | Y |
134 // | Y | Y | Y | Y | Y | Y |
135 // | U | U | U | U | U | U |
136 // | U | U | U | V | V | V |
137 // | V | V | V | V | V | V |
139 // where each plane immediately follows the previous plane in memory. This layout makes it
140 // difficult to compute the transfers needed for example when the middle 2x2 region of the
141 // image is locked and needs to be flushed/invalidated.
143 // Emulated multi-plane buffers instead have a layout of:
145 // | Y | Y | Y | Y | Y | Y |
146 // | Y | Y | Y | Y | Y | Y |
147 // | Y | Y | Y | Y | Y | Y |
148 // | Y | Y | Y | Y | Y | Y |
149 // | Y | Y | Y | Y | Y | Y |
150 // | Y | Y | Y | Y | Y | Y |
151 // | U | U | U | | | |
152 // | U | U | U | | | |
153 // | U | U | U | | | |
154 // | V | V | V | | | |
155 // | V | V | V | | | |
156 // | V | V | V | | | |
158 // where each plane is placed as a sub-image (albeit with a very large stride) in order to
159 // simplify transfers into 3 sub-image transfers for the above example.
161 // Additional note: the V-plane is not placed to the right of the U-plane due to some
162 // observed failures in media framework code which assumes the V-plane is not
163 // "row-interlaced" with the U-plane.
164 static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
166 uint32_t y_plane_height;
167 uint32_t c_plane_height;
168 uint32_t original_width = bo->meta.width;
169 uint32_t original_height = bo->meta.height;
171 metadata->format = DRM_FORMAT_R8;
172 switch (bo->meta.format) {
173 case DRM_FORMAT_NV12:
174 case DRM_FORMAT_NV21:
176 metadata->num_planes = 2;
178 y_plane_height = original_height;
179 c_plane_height = DIV_ROUND_UP(original_height, 2);
181 metadata->width = original_width;
182 metadata->height = y_plane_height + c_plane_height;
184 // Y-plane (full resolution)
185 metadata->strides[0] = metadata->width;
186 metadata->offsets[0] = 0;
187 metadata->sizes[0] = metadata->width * y_plane_height;
189 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
190 metadata->strides[1] = metadata->width;
191 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
192 metadata->sizes[1] = metadata->width * c_plane_height;
194 metadata->total_size = metadata->width * metadata->height;
196 case DRM_FORMAT_YVU420:
197 case DRM_FORMAT_YVU420_ANDROID:
199 metadata->num_planes = 3;
201 y_plane_height = original_height;
202 c_plane_height = DIV_ROUND_UP(original_height, 2);
204 metadata->width = ALIGN(original_width, 32);
205 metadata->height = y_plane_height + (2 * c_plane_height);
207 // Y-plane (full resolution)
208 metadata->strides[0] = metadata->width;
209 metadata->offsets[0] = 0;
210 metadata->sizes[0] = metadata->width * original_height;
212 // Cb-plane (half resolution, placed below Y-plane)
213 metadata->strides[1] = metadata->width;
214 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
215 metadata->sizes[1] = metadata->width * c_plane_height;
217 // Cr-plane (half resolution, placed below Cb-plane)
218 metadata->strides[2] = metadata->width;
219 metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
220 metadata->sizes[2] = metadata->width * c_plane_height;
222 metadata->total_size = metadata->width * metadata->height;
229 struct virtio_transfers_params {
231 struct rectangle xfer_boxes[DRV_MAX_PLANES];
234 static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
235 const struct rectangle *transfer_box,
236 struct virtio_transfers_params *xfer_params)
238 uint32_t y_plane_height;
239 uint32_t c_plane_height;
240 struct bo_metadata emulated_metadata;
242 if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
243 transfer_box->height == bo->meta.height) {
244 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
246 xfer_params->xfers_needed = 1;
247 xfer_params->xfer_boxes[0].x = 0;
248 xfer_params->xfer_boxes[0].y = 0;
249 xfer_params->xfer_boxes[0].width = emulated_metadata.width;
250 xfer_params->xfer_boxes[0].height = emulated_metadata.height;
255 switch (bo->meta.format) {
256 case DRM_FORMAT_NV12:
257 case DRM_FORMAT_NV21:
259 xfer_params->xfers_needed = 2;
261 y_plane_height = bo->meta.height;
262 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
264 // Y-plane (full resolution)
265 xfer_params->xfer_boxes[0].x = transfer_box->x;
266 xfer_params->xfer_boxes[0].y = transfer_box->y;
267 xfer_params->xfer_boxes[0].width = transfer_box->width;
268 xfer_params->xfer_boxes[0].height = transfer_box->height;
270 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
271 xfer_params->xfer_boxes[1].x = transfer_box->x;
272 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
273 xfer_params->xfer_boxes[1].width = transfer_box->width;
274 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
277 case DRM_FORMAT_YVU420:
278 case DRM_FORMAT_YVU420_ANDROID:
280 xfer_params->xfers_needed = 3;
282 y_plane_height = bo->meta.height;
283 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
285 // Y-plane (full resolution)
286 xfer_params->xfer_boxes[0].x = transfer_box->x;
287 xfer_params->xfer_boxes[0].y = transfer_box->y;
288 xfer_params->xfer_boxes[0].width = transfer_box->width;
289 xfer_params->xfer_boxes[0].height = transfer_box->height;
291 // Cb-plane (half resolution, placed below Y-plane)
292 xfer_params->xfer_boxes[1].x = transfer_box->x;
293 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
294 xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
295 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
297 // Cr-plane (half resolution, placed below Cb-plane)
298 xfer_params->xfer_boxes[2].x = transfer_box->x;
299 xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
300 xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
301 xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
307 static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
310 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
312 if (priv->caps.max_version == 0) {
316 if ((use_flags & BO_USE_RENDERING) &&
317 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
321 if ((use_flags & BO_USE_TEXTURE) &&
322 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
326 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
327 !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
334 // For virtio backends that do not support formats natively (e.g. multi-planar formats are not
335 // supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
336 // format and usage combination can be handled as a blob (byte buffer).
337 static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
341 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
343 // Only enable emulation on non-gbm virtio backends.
344 if (priv->host_gbm_enabled) {
348 if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
352 if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
356 return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
357 drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
360 // Adds the given buffer combination to the list of supported buffer combinations if the
361 // combination is supported by the virtio backend.
362 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
363 struct format_metadata *metadata, uint64_t use_flags)
365 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
367 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
368 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
369 !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
370 drv_log("Scanout format: %d\n", drm_format);
371 use_flags &= ~BO_USE_SCANOUT;
374 if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
375 !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
377 drv_log("Skipping unsupported combination format:%d\n", drm_format);
382 drv_add_combination(drv, drm_format, metadata, use_flags);
385 // Adds each given buffer combination to the list of supported buffer combinations if the
386 // combination supported by the virtio backend.
387 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
388 uint32_t num_formats, struct format_metadata *metadata,
393 for (i = 0; i < num_formats; i++) {
394 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
398 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
401 if (bo->meta.format != DRM_FORMAT_R8) {
402 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
403 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
406 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
409 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
412 if ((*flag) & check_flag) {
413 (*flag) &= ~check_flag;
414 (*bind) |= virgl_bind;
418 static uint32_t use_flags_to_bind(uint64_t use_flags)
420 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
421 uint32_t bind = VIRGL_BIND_SHARED;
423 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
424 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
425 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
426 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
427 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
429 if (use_flags & BO_USE_PROTECTED) {
430 handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
432 // Make sure we don't set both flags, since that could be mistaken for
433 // protected. Give OFTEN priority over RARELY.
434 if (use_flags & BO_USE_SW_READ_OFTEN) {
435 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
436 VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
438 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
439 VIRGL_BIND_MINIGBM_SW_READ_RARELY);
441 if (use_flags & BO_USE_SW_WRITE_OFTEN) {
442 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
443 VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
445 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
446 VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
450 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
451 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
452 handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
453 VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
454 handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
455 VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
458 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
464 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
470 struct drm_virtgpu_resource_create res_create;
471 struct bo_metadata emulated_metadata;
473 if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
474 stride = drv_stride_from_format(format, width, 0);
475 drv_bo_from_format(bo, stride, height, format);
478 virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
480 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
482 format = emulated_metadata.format;
483 width = emulated_metadata.width;
484 height = emulated_metadata.height;
485 for (i = 0; i < emulated_metadata.num_planes; i++) {
486 bo->meta.strides[i] = emulated_metadata.strides[i];
487 bo->meta.offsets[i] = emulated_metadata.offsets[i];
488 bo->meta.sizes[i] = emulated_metadata.sizes[i];
490 bo->meta.total_size = emulated_metadata.total_size;
494 * Setting the target is intended to ensure this resource gets bound as a 2D
495 * texture in the host renderer's GL state. All of these resource properties are
496 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
497 * virglrenderer. When virglrenderer makes a resource, it will convert the target
498 * enum to the equivalent one in GL and then bind the resource to that target.
500 memset(&res_create, 0, sizeof(res_create));
502 res_create.target = PIPE_TEXTURE_2D;
503 res_create.format = translate_format(format);
504 res_create.bind = use_flags_to_bind(use_flags);
505 res_create.width = width;
506 res_create.height = height;
509 res_create.depth = 1;
510 res_create.array_size = 1;
511 res_create.last_level = 0;
512 res_create.nr_samples = 0;
514 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
515 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
517 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
521 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
522 bo->handles[plane].u32 = res_create.bo_handle;
527 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
530 struct drm_virtgpu_map gem_map;
532 memset(&gem_map, 0, sizeof(gem_map));
533 gem_map.handle = bo->handles[0].u32;
535 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
537 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
541 vma->length = bo->meta.total_size;
542 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
546 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
549 struct drm_virtgpu_get_caps cap_args;
552 memset(&cap_args, 0, sizeof(cap_args));
553 cap_args.addr = (unsigned long long)caps;
554 if (features[feat_capset_fix].enabled) {
556 cap_args.cap_set_id = 2;
557 cap_args.size = sizeof(union virgl_caps);
559 cap_args.cap_set_id = 1;
560 cap_args.size = sizeof(struct virgl_caps_v1);
563 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
565 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
569 cap_args.cap_set_id = 1;
570 cap_args.size = sizeof(struct virgl_caps_v1);
572 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
574 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
581 static void virtio_gpu_init_features_and_caps(struct driver *drv)
583 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
585 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
586 struct drm_virtgpu_getparam params = { 0 };
588 params.param = features[i].feature;
589 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
590 int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
592 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
595 if (features[feat_3d].enabled) {
596 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
599 // Multi-planar formats are currently only supported in virglrenderer through gbm.
600 priv->host_gbm_enabled =
601 virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
604 static int virtio_gpu_init(struct driver *drv)
606 struct virtio_gpu_priv *priv;
608 priv = calloc(1, sizeof(*priv));
611 virtio_gpu_init_features_and_caps(drv);
613 if (features[feat_3d].enabled) {
614 /* This doesn't mean host can scanout everything, it just means host
615 * hypervisor can show it. */
616 virtio_gpu_add_combinations(drv, render_target_formats,
617 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
618 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
619 virtio_gpu_add_combinations(drv, texture_source_formats,
620 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
621 BO_USE_TEXTURE_MASK);
623 /* Virtio primary plane only allows this format. */
624 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
625 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
626 /* Virtio cursor plane only allows this format and Chrome cannot live without
627 * ARGB888 renderable format. */
628 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
629 BO_USE_RENDER_MASK | BO_USE_CURSOR);
630 /* Android needs more, but they cannot be bound as scanouts anymore after
631 * "drm/virtio: fix DRM_FORMAT_* handling" */
632 virtio_gpu_add_combinations(drv, render_target_formats,
633 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
635 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
636 ARRAY_SIZE(dumb_texture_source_formats),
637 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
638 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
639 BO_USE_SW_MASK | BO_USE_LINEAR);
640 virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
641 BO_USE_SW_MASK | BO_USE_LINEAR);
644 /* Android CTS tests require this. */
645 virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
646 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
647 virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
648 BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
650 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
651 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
652 BO_USE_HW_VIDEO_ENCODER);
653 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
654 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
655 BO_USE_HW_VIDEO_ENCODER);
656 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
657 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
658 BO_USE_HW_VIDEO_ENCODER);
659 drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
660 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
661 BO_USE_HW_VIDEO_ENCODER);
662 drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
663 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
664 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
665 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
666 BO_USE_HW_VIDEO_ENCODER);
667 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
668 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
669 BO_USE_HW_VIDEO_ENCODER);
670 drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
671 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
672 BO_USE_HW_VIDEO_ENCODER);
674 return drv_modify_linear_combinations(drv);
677 static void virtio_gpu_close(struct driver *drv)
683 static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
687 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
688 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
690 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
691 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
692 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
695 stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
696 drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
697 bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
698 bo->meta.tiling = blob_flags;
700 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
701 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
702 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
703 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
704 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
705 cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
706 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
708 drm_rc_blob.cmd = (uint64_t)&cmd;
709 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
710 drm_rc_blob.size = bo->meta.total_size;
711 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
712 drm_rc_blob.blob_flags = blob_flags;
714 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
716 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
720 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
721 bo->handles[plane].u32 = drm_rc_blob.bo_handle;
726 static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
728 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
730 // TODO(gurchetansingh): remove once all minigbm users are blob-safe
731 #ifndef VIRTIO_GPU_NEXT
735 // Only use blob when host gbm is available
736 if (!priv->host_gbm_enabled)
739 // Focus on non-GPU apps for now
740 if (use_flags & (BO_USE_RENDERING | BO_USE_TEXTURE))
743 // Simple, strictly defined formats for now
744 if (format != DRM_FORMAT_YVU420_ANDROID && format != DRM_FORMAT_R8)
748 (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW))
754 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
757 if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
758 should_use_blob(bo->drv, format, use_flags))
759 return virtio_gpu_bo_create_blob(bo->drv, bo);
761 if (features[feat_3d].enabled)
762 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
764 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
767 static int virtio_gpu_bo_destroy(struct bo *bo)
769 if (features[feat_3d].enabled)
770 return drv_gem_bo_destroy(bo);
772 return drv_dumb_bo_destroy(bo);
775 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
777 if (features[feat_3d].enabled)
778 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
780 return drv_dumb_bo_map(bo, vma, plane, map_flags);
783 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
787 struct drm_virtgpu_3d_transfer_from_host xfer;
788 struct drm_virtgpu_3d_wait waitcmd;
789 struct virtio_transfers_params xfer_params;
790 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
792 if (!features[feat_3d].enabled)
795 // Invalidate is only necessary if the host writes to the buffer.
796 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
797 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
800 if (features[feat_resource_blob].enabled &&
801 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
804 memset(&xfer, 0, sizeof(xfer));
805 xfer.bo_handle = mapping->vma->handle;
807 if (mapping->rect.x || mapping->rect.y) {
809 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
812 if (bo->meta.num_planes == 1) {
814 (bo->meta.strides[0] * mapping->rect.y) +
815 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
819 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
820 // Unfortunately, the kernel doesn't actually pass the guest layer_stride
821 // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
822 // For gbm based resources, we can work around this by using the level field
823 // to pass the stride to virglrenderer's gbm transfer code. However, we need
824 // to avoid doing this for resources which don't rely on that transfer code,
825 // which is resources with the BO_USE_RENDERING flag set.
826 // TODO(b/145993887): Send also stride when the patches are landed
827 if (priv->host_gbm_enabled) {
828 xfer.level = bo->meta.strides[0];
832 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
833 bo->meta.use_flags)) {
834 xfer_params.xfers_needed = 1;
835 xfer_params.xfer_boxes[0] = mapping->rect;
837 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
838 bo->meta.use_flags));
840 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
843 for (i = 0; i < xfer_params.xfers_needed; i++) {
844 xfer.box.x = xfer_params.xfer_boxes[i].x;
845 xfer.box.y = xfer_params.xfer_boxes[i].y;
846 xfer.box.w = xfer_params.xfer_boxes[i].width;
847 xfer.box.h = xfer_params.xfer_boxes[i].height;
850 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
852 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
858 // The transfer needs to complete before invalidate returns so that any host changes
859 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
860 // TODO(b/136733358): Support returning fences from transfers
861 memset(&waitcmd, 0, sizeof(waitcmd));
862 waitcmd.handle = mapping->vma->handle;
863 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
865 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
872 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
876 struct drm_virtgpu_3d_transfer_to_host xfer;
877 struct drm_virtgpu_3d_wait waitcmd;
878 struct virtio_transfers_params xfer_params;
879 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
881 if (!features[feat_3d].enabled)
884 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
887 if (features[feat_resource_blob].enabled &&
888 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
891 memset(&xfer, 0, sizeof(xfer));
892 xfer.bo_handle = mapping->vma->handle;
894 if (mapping->rect.x || mapping->rect.y) {
896 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
899 if (bo->meta.num_planes == 1) {
901 (bo->meta.strides[0] * mapping->rect.y) +
902 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
906 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
907 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
908 // the level to work around this.
909 if (priv->host_gbm_enabled) {
910 xfer.level = bo->meta.strides[0];
913 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
914 bo->meta.use_flags)) {
915 xfer_params.xfers_needed = 1;
916 xfer_params.xfer_boxes[0] = mapping->rect;
918 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
919 bo->meta.use_flags));
921 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
924 for (i = 0; i < xfer_params.xfers_needed; i++) {
925 xfer.box.x = xfer_params.xfer_boxes[i].x;
926 xfer.box.y = xfer_params.xfer_boxes[i].y;
927 xfer.box.w = xfer_params.xfer_boxes[i].width;
928 xfer.box.h = xfer_params.xfer_boxes[i].height;
931 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
933 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
939 // If the buffer is only accessed by the host GPU, then the flush is ordered
940 // with subsequent commands. However, if other host hardware can access the
941 // buffer, we need to wait for the transfer to complete for consistency.
942 // TODO(b/136733358): Support returning fences from transfers
943 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
944 memset(&waitcmd, 0, sizeof(waitcmd));
945 waitcmd.handle = mapping->vma->handle;
947 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
949 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
957 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
960 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
961 /* Camera subsystem requires NV12. */
962 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
963 return DRM_FORMAT_NV12;
964 /*HACK: See b/28671744 */
965 return DRM_FORMAT_XBGR8888;
966 case DRM_FORMAT_FLEX_YCbCr_420_888:
968 * All of our host drivers prefer NV12 as their flexible media format.
969 * If that changes, this will need to be modified.
971 if (features[feat_3d].enabled)
972 return DRM_FORMAT_NV12;
974 return DRM_FORMAT_YVU420_ANDROID;
980 static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
981 uint32_t offsets[DRV_MAX_PLANES])
984 struct drm_virtgpu_resource_info res_info;
986 if (!features[feat_3d].enabled)
989 memset(&res_info, 0, sizeof(res_info));
990 res_info.bo_handle = bo->handles[0].u32;
991 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
993 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
997 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
999 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
1002 if (res_info.strides[plane]) {
1003 strides[plane] = res_info.strides[plane];
1004 offsets[plane] = res_info.offsets[plane];
1011 const struct backend backend_virtio_gpu = {
1012 .name = "virtio_gpu",
1013 .init = virtio_gpu_init,
1014 .close = virtio_gpu_close,
1015 .bo_create = virtio_gpu_bo_create,
1016 .bo_destroy = virtio_gpu_bo_destroy,
1017 .bo_import = drv_prime_bo_import,
1018 .bo_map = virtio_gpu_bo_map,
1019 .bo_unmap = drv_bo_munmap,
1020 .bo_invalidate = virtio_gpu_bo_invalidate,
1021 .bo_flush = virtio_gpu_bo_flush,
1022 .resolve_format = virtio_gpu_resolve_format,
1023 .resource_info = virtio_gpu_resource_info,