2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
17 #include "external/virgl_hw.h"
18 #include "external/virgl_protocol.h"
19 #include "external/virtgpu_drm.h"
24 #define PAGE_SIZE 0x1000
26 #define PIPE_TEXTURE_2D 2
28 #define MESA_LLVMPIPE_TILE_ORDER 6
29 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
42 feat_host_cross_device,
52 static struct feature features[] = {
53 FEATURE(VIRTGPU_PARAM_3D_FEATURES), FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
54 FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
55 FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
58 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
59 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
60 DRM_FORMAT_XRGB8888 };
62 static const uint32_t dumb_texture_source_formats[] = {
63 DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
64 DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
67 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
68 DRM_FORMAT_R8, DRM_FORMAT_R16,
69 DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
71 struct virtio_gpu_priv {
73 union virgl_caps caps;
75 atomic_int next_blob_id;
78 static uint32_t translate_format(uint32_t drm_fourcc)
81 case DRM_FORMAT_BGR888:
82 case DRM_FORMAT_RGB888:
83 return VIRGL_FORMAT_R8G8B8_UNORM;
84 case DRM_FORMAT_XRGB8888:
85 return VIRGL_FORMAT_B8G8R8X8_UNORM;
86 case DRM_FORMAT_ARGB8888:
87 return VIRGL_FORMAT_B8G8R8A8_UNORM;
88 case DRM_FORMAT_XBGR8888:
89 return VIRGL_FORMAT_R8G8B8X8_UNORM;
90 case DRM_FORMAT_ABGR8888:
91 return VIRGL_FORMAT_R8G8B8A8_UNORM;
92 case DRM_FORMAT_ABGR16161616F:
93 return VIRGL_FORMAT_R16G16B16A16_FLOAT;
94 case DRM_FORMAT_RGB565:
95 return VIRGL_FORMAT_B5G6R5_UNORM;
97 return VIRGL_FORMAT_R8_UNORM;
99 return VIRGL_FORMAT_R8G8_UNORM;
100 case DRM_FORMAT_NV12:
101 return VIRGL_FORMAT_NV12;
102 case DRM_FORMAT_NV21:
103 return VIRGL_FORMAT_NV21;
104 case DRM_FORMAT_YVU420:
105 case DRM_FORMAT_YVU420_ANDROID:
106 return VIRGL_FORMAT_YV12;
112 static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
115 uint32_t virgl_format = translate_format(drm_format);
120 uint32_t bitmask_index = virgl_format / 32;
121 uint32_t bit_index = virgl_format % 32;
122 return supported->bitmask[bitmask_index] & (1 << bit_index);
125 // The metadata generated here for emulated buffers is slightly different than the metadata
126 // generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
127 // functions below, the emulated buffers are oversized. For example, ignoring stride alignment
128 // requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
129 // drv_bo_from_format:
131 // | Y | Y | Y | Y | Y | Y |
132 // | Y | Y | Y | Y | Y | Y |
133 // | Y | Y | Y | Y | Y | Y |
134 // | Y | Y | Y | Y | Y | Y |
135 // | Y | Y | Y | Y | Y | Y |
136 // | Y | Y | Y | Y | Y | Y |
137 // | U | U | U | U | U | U |
138 // | U | U | U | V | V | V |
139 // | V | V | V | V | V | V |
141 // where each plane immediately follows the previous plane in memory. This layout makes it
142 // difficult to compute the transfers needed for example when the middle 2x2 region of the
143 // image is locked and needs to be flushed/invalidated.
145 // Emulated multi-plane buffers instead have a layout of:
147 // | Y | Y | Y | Y | Y | Y |
148 // | Y | Y | Y | Y | Y | Y |
149 // | Y | Y | Y | Y | Y | Y |
150 // | Y | Y | Y | Y | Y | Y |
151 // | Y | Y | Y | Y | Y | Y |
152 // | Y | Y | Y | Y | Y | Y |
153 // | U | U | U | | | |
154 // | U | U | U | | | |
155 // | U | U | U | | | |
156 // | V | V | V | | | |
157 // | V | V | V | | | |
158 // | V | V | V | | | |
160 // where each plane is placed as a sub-image (albeit with a very large stride) in order to
161 // simplify transfers into 3 sub-image transfers for the above example.
163 // Additional note: the V-plane is not placed to the right of the U-plane due to some
164 // observed failures in media framework code which assumes the V-plane is not
165 // "row-interlaced" with the U-plane.
166 static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
168 uint32_t y_plane_height;
169 uint32_t c_plane_height;
170 uint32_t original_width = bo->meta.width;
171 uint32_t original_height = bo->meta.height;
173 metadata->format = DRM_FORMAT_R8;
174 switch (bo->meta.format) {
175 case DRM_FORMAT_NV12:
176 case DRM_FORMAT_NV21:
178 metadata->num_planes = 2;
180 y_plane_height = original_height;
181 c_plane_height = DIV_ROUND_UP(original_height, 2);
183 metadata->width = original_width;
184 metadata->height = y_plane_height + c_plane_height;
186 // Y-plane (full resolution)
187 metadata->strides[0] = metadata->width;
188 metadata->offsets[0] = 0;
189 metadata->sizes[0] = metadata->width * y_plane_height;
191 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
192 metadata->strides[1] = metadata->width;
193 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
194 metadata->sizes[1] = metadata->width * c_plane_height;
196 metadata->total_size = metadata->width * metadata->height;
198 case DRM_FORMAT_YVU420:
199 case DRM_FORMAT_YVU420_ANDROID:
201 metadata->num_planes = 3;
203 y_plane_height = original_height;
204 c_plane_height = DIV_ROUND_UP(original_height, 2);
206 metadata->width = ALIGN(original_width, 32);
207 metadata->height = y_plane_height + (2 * c_plane_height);
209 // Y-plane (full resolution)
210 metadata->strides[0] = metadata->width;
211 metadata->offsets[0] = 0;
212 metadata->sizes[0] = metadata->width * original_height;
214 // Cb-plane (half resolution, placed below Y-plane)
215 metadata->strides[1] = metadata->width;
216 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
217 metadata->sizes[1] = metadata->width * c_plane_height;
219 // Cr-plane (half resolution, placed below Cb-plane)
220 metadata->strides[2] = metadata->width;
221 metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
222 metadata->sizes[2] = metadata->width * c_plane_height;
224 metadata->total_size = metadata->width * metadata->height;
231 struct virtio_transfers_params {
233 struct rectangle xfer_boxes[DRV_MAX_PLANES];
236 static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
237 const struct rectangle *transfer_box,
238 struct virtio_transfers_params *xfer_params)
240 uint32_t y_plane_height;
241 uint32_t c_plane_height;
242 struct bo_metadata emulated_metadata;
244 if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
245 transfer_box->height == bo->meta.height) {
246 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
248 xfer_params->xfers_needed = 1;
249 xfer_params->xfer_boxes[0].x = 0;
250 xfer_params->xfer_boxes[0].y = 0;
251 xfer_params->xfer_boxes[0].width = emulated_metadata.width;
252 xfer_params->xfer_boxes[0].height = emulated_metadata.height;
257 switch (bo->meta.format) {
258 case DRM_FORMAT_NV12:
259 case DRM_FORMAT_NV21:
261 xfer_params->xfers_needed = 2;
263 y_plane_height = bo->meta.height;
264 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
266 // Y-plane (full resolution)
267 xfer_params->xfer_boxes[0].x = transfer_box->x;
268 xfer_params->xfer_boxes[0].y = transfer_box->y;
269 xfer_params->xfer_boxes[0].width = transfer_box->width;
270 xfer_params->xfer_boxes[0].height = transfer_box->height;
272 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
273 xfer_params->xfer_boxes[1].x = transfer_box->x;
274 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
275 xfer_params->xfer_boxes[1].width = transfer_box->width;
276 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
279 case DRM_FORMAT_YVU420:
280 case DRM_FORMAT_YVU420_ANDROID:
282 xfer_params->xfers_needed = 3;
284 y_plane_height = bo->meta.height;
285 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
287 // Y-plane (full resolution)
288 xfer_params->xfer_boxes[0].x = transfer_box->x;
289 xfer_params->xfer_boxes[0].y = transfer_box->y;
290 xfer_params->xfer_boxes[0].width = transfer_box->width;
291 xfer_params->xfer_boxes[0].height = transfer_box->height;
293 // Cb-plane (half resolution, placed below Y-plane)
294 xfer_params->xfer_boxes[1].x = transfer_box->x;
295 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
296 xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
297 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
299 // Cr-plane (half resolution, placed below Cb-plane)
300 xfer_params->xfer_boxes[2].x = transfer_box->x;
301 xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
302 xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
303 xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
309 static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
312 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
314 if (priv->caps.max_version == 0) {
318 if ((use_flags & BO_USE_RENDERING) &&
319 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
323 if ((use_flags & BO_USE_TEXTURE) &&
324 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
328 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
329 !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
336 // For virtio backends that do not support formats natively (e.g. multi-planar formats are not
337 // supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
338 // format and usage combination can be handled as a blob (byte buffer).
339 static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
343 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
345 // Only enable emulation on non-gbm virtio backends.
346 if (priv->host_gbm_enabled) {
350 if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
354 if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
358 return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
359 drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
362 // Adds the given buffer combination to the list of supported buffer combinations if the
363 // combination is supported by the virtio backend.
364 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
365 struct format_metadata *metadata, uint64_t use_flags)
367 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
369 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
370 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
371 !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
372 drv_log("Scanout format: %d\n", drm_format);
373 use_flags &= ~BO_USE_SCANOUT;
376 if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
377 !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
379 drv_log("Skipping unsupported combination format:%d\n", drm_format);
384 drv_add_combination(drv, drm_format, metadata, use_flags);
387 // Adds each given buffer combination to the list of supported buffer combinations if the
388 // combination supported by the virtio backend.
389 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
390 uint32_t num_formats, struct format_metadata *metadata,
395 for (i = 0; i < num_formats; i++) {
396 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
400 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
403 if (bo->meta.format != DRM_FORMAT_R8) {
404 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
405 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
408 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
411 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
414 if ((*flag) & check_flag) {
415 (*flag) &= ~check_flag;
416 (*bind) |= virgl_bind;
420 static uint32_t use_flags_to_bind(uint64_t use_flags)
422 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
423 uint32_t bind = VIRGL_BIND_SHARED;
425 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
426 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
427 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
428 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
429 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
431 if (use_flags & BO_USE_PROTECTED) {
432 handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
434 // Make sure we don't set both flags, since that could be mistaken for
435 // protected. Give OFTEN priority over RARELY.
436 if (use_flags & BO_USE_SW_READ_OFTEN) {
437 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
438 VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
440 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
441 VIRGL_BIND_MINIGBM_SW_READ_RARELY);
443 if (use_flags & BO_USE_SW_WRITE_OFTEN) {
444 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
445 VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
447 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
448 VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
452 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
453 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
454 handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
455 VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
456 handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
457 VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
460 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
466 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
472 struct drm_virtgpu_resource_create res_create = { 0 };
473 struct bo_metadata emulated_metadata;
475 if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
476 stride = drv_stride_from_format(format, width, 0);
477 drv_bo_from_format(bo, stride, height, format);
480 virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
482 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
484 format = emulated_metadata.format;
485 width = emulated_metadata.width;
486 height = emulated_metadata.height;
487 for (i = 0; i < emulated_metadata.num_planes; i++) {
488 bo->meta.strides[i] = emulated_metadata.strides[i];
489 bo->meta.offsets[i] = emulated_metadata.offsets[i];
490 bo->meta.sizes[i] = emulated_metadata.sizes[i];
492 bo->meta.total_size = emulated_metadata.total_size;
496 * Setting the target is intended to ensure this resource gets bound as a 2D
497 * texture in the host renderer's GL state. All of these resource properties are
498 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
499 * virglrenderer. When virglrenderer makes a resource, it will convert the target
500 * enum to the equivalent one in GL and then bind the resource to that target.
503 res_create.target = PIPE_TEXTURE_2D;
504 res_create.format = translate_format(format);
505 res_create.bind = use_flags_to_bind(use_flags);
506 res_create.width = width;
507 res_create.height = height;
510 res_create.depth = 1;
511 res_create.array_size = 1;
512 res_create.last_level = 0;
513 res_create.nr_samples = 0;
515 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
516 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
518 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
522 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
523 bo->handles[plane].u32 = res_create.bo_handle;
528 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
531 struct drm_virtgpu_map gem_map = { 0 };
533 gem_map.handle = bo->handles[0].u32;
534 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
536 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
540 vma->length = bo->meta.total_size;
541 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
545 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
548 struct drm_virtgpu_get_caps cap_args = { 0 };
551 cap_args.addr = (unsigned long long)caps;
552 if (features[feat_capset_fix].enabled) {
554 cap_args.cap_set_id = 2;
555 cap_args.size = sizeof(union virgl_caps);
557 cap_args.cap_set_id = 1;
558 cap_args.size = sizeof(struct virgl_caps_v1);
561 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
563 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
567 cap_args.cap_set_id = 1;
568 cap_args.size = sizeof(struct virgl_caps_v1);
570 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
572 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
579 static void virtio_gpu_init_features_and_caps(struct driver *drv)
581 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
583 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
584 struct drm_virtgpu_getparam params = { 0 };
586 params.param = features[i].feature;
587 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
588 int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
590 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
593 if (features[feat_3d].enabled) {
594 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
597 // Multi-planar formats are currently only supported in virglrenderer through gbm.
598 priv->host_gbm_enabled =
599 virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
602 static int virtio_gpu_init(struct driver *drv)
604 struct virtio_gpu_priv *priv;
606 priv = calloc(1, sizeof(*priv));
609 virtio_gpu_init_features_and_caps(drv);
611 if (features[feat_3d].enabled) {
612 /* This doesn't mean host can scanout everything, it just means host
613 * hypervisor can show it. */
614 virtio_gpu_add_combinations(drv, render_target_formats,
615 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
616 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
617 virtio_gpu_add_combinations(drv, texture_source_formats,
618 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
619 BO_USE_TEXTURE_MASK);
621 /* Virtio primary plane only allows this format. */
622 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
623 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
624 /* Virtio cursor plane only allows this format and Chrome cannot live without
625 * ARGB888 renderable format. */
626 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
627 BO_USE_RENDER_MASK | BO_USE_CURSOR);
628 /* Android needs more, but they cannot be bound as scanouts anymore after
629 * "drm/virtio: fix DRM_FORMAT_* handling" */
630 virtio_gpu_add_combinations(drv, render_target_formats,
631 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
633 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
634 ARRAY_SIZE(dumb_texture_source_formats),
635 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
636 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
637 BO_USE_SW_MASK | BO_USE_LINEAR);
638 virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
639 BO_USE_SW_MASK | BO_USE_LINEAR);
642 /* Android CTS tests require this. */
643 virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
644 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
645 virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
646 BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
648 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
649 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
650 BO_USE_HW_VIDEO_ENCODER);
651 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
652 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
653 BO_USE_HW_VIDEO_ENCODER);
654 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
655 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
656 BO_USE_HW_VIDEO_ENCODER);
657 drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
658 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
659 BO_USE_HW_VIDEO_ENCODER);
660 drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
661 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
662 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
663 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
664 BO_USE_HW_VIDEO_ENCODER);
665 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
666 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
667 BO_USE_HW_VIDEO_ENCODER);
668 drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
669 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
670 BO_USE_HW_VIDEO_ENCODER);
672 return drv_modify_linear_combinations(drv);
675 static void virtio_gpu_close(struct driver *drv)
681 static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
685 uint32_t cur_blob_id;
686 uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
687 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
688 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
690 uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
691 if (bo->meta.use_flags & BO_USE_SW_MASK)
692 blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
693 if (bo->meta.use_flags & BO_USE_NON_GPU_HW)
694 blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
696 cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
697 stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
698 drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
699 bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
700 bo->meta.tiling = blob_flags;
702 cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
703 cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
704 cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
705 cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
706 cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
707 cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
708 cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
709 cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
711 drm_rc_blob.cmd = (uint64_t)&cmd;
712 drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
713 drm_rc_blob.size = bo->meta.total_size;
714 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
715 drm_rc_blob.blob_flags = blob_flags;
716 drm_rc_blob.blob_id = cur_blob_id;
718 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
720 drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
724 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
725 bo->handles[plane].u32 = drm_rc_blob.bo_handle;
730 static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
732 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
734 // TODO(gurchetansingh): remove once all minigbm users are blob-safe
735 #ifndef VIRTIO_GPU_NEXT
739 // Only use blob when host gbm is available
740 if (!priv->host_gbm_enabled)
743 // Use regular resources if only the GPU needs efficient access
745 (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR | BO_USE_NON_GPU_HW)))
749 case DRM_FORMAT_YVU420_ANDROID:
751 // Formats with strictly defined strides are supported
753 case DRM_FORMAT_NV12:
754 // Knowing buffer metadata at buffer creation isn't yet supported, so buffers
755 // can't be properly mapped into the guest.
756 return (use_flags & BO_USE_SW_MASK) == 0;
762 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
765 if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
766 should_use_blob(bo->drv, format, use_flags))
767 return virtio_gpu_bo_create_blob(bo->drv, bo);
769 if (features[feat_3d].enabled)
770 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
772 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
775 static int virtio_gpu_bo_destroy(struct bo *bo)
777 if (features[feat_3d].enabled)
778 return drv_gem_bo_destroy(bo);
780 return drv_dumb_bo_destroy(bo);
783 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
785 if (features[feat_3d].enabled)
786 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
788 return drv_dumb_bo_map(bo, vma, plane, map_flags);
791 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
795 struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
796 struct drm_virtgpu_3d_wait waitcmd = { 0 };
797 struct virtio_transfers_params xfer_params;
798 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
800 if (!features[feat_3d].enabled)
803 // Invalidate is only necessary if the host writes to the buffer.
804 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
805 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
808 if (features[feat_resource_blob].enabled &&
809 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
812 xfer.bo_handle = mapping->vma->handle;
814 if (mapping->rect.x || mapping->rect.y) {
816 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
819 if (bo->meta.num_planes == 1) {
821 (bo->meta.strides[0] * mapping->rect.y) +
822 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
826 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
827 // Unfortunately, the kernel doesn't actually pass the guest layer_stride
828 // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
829 // For gbm based resources, we can work around this by using the level field
830 // to pass the stride to virglrenderer's gbm transfer code. However, we need
831 // to avoid doing this for resources which don't rely on that transfer code,
832 // which is resources with the BO_USE_RENDERING flag set.
833 // TODO(b/145993887): Send also stride when the patches are landed
834 if (priv->host_gbm_enabled) {
835 xfer.level = bo->meta.strides[0];
839 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
840 bo->meta.use_flags)) {
841 xfer_params.xfers_needed = 1;
842 xfer_params.xfer_boxes[0] = mapping->rect;
844 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
845 bo->meta.use_flags));
847 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
850 for (i = 0; i < xfer_params.xfers_needed; i++) {
851 xfer.box.x = xfer_params.xfer_boxes[i].x;
852 xfer.box.y = xfer_params.xfer_boxes[i].y;
853 xfer.box.w = xfer_params.xfer_boxes[i].width;
854 xfer.box.h = xfer_params.xfer_boxes[i].height;
857 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
859 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
865 // The transfer needs to complete before invalidate returns so that any host changes
866 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
867 // TODO(b/136733358): Support returning fences from transfers
868 waitcmd.handle = mapping->vma->handle;
869 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
871 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
878 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
882 struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
883 struct drm_virtgpu_3d_wait waitcmd = { 0 };
884 struct virtio_transfers_params xfer_params;
885 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
887 if (!features[feat_3d].enabled)
890 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
893 if (features[feat_resource_blob].enabled &&
894 (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
897 xfer.bo_handle = mapping->vma->handle;
899 if (mapping->rect.x || mapping->rect.y) {
901 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
904 if (bo->meta.num_planes == 1) {
906 (bo->meta.strides[0] * mapping->rect.y) +
907 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
911 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
912 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
913 // the level to work around this.
914 if (priv->host_gbm_enabled) {
915 xfer.level = bo->meta.strides[0];
918 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
919 bo->meta.use_flags)) {
920 xfer_params.xfers_needed = 1;
921 xfer_params.xfer_boxes[0] = mapping->rect;
923 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
924 bo->meta.use_flags));
926 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
929 for (i = 0; i < xfer_params.xfers_needed; i++) {
930 xfer.box.x = xfer_params.xfer_boxes[i].x;
931 xfer.box.y = xfer_params.xfer_boxes[i].y;
932 xfer.box.w = xfer_params.xfer_boxes[i].width;
933 xfer.box.h = xfer_params.xfer_boxes[i].height;
936 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
938 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
944 // If the buffer is only accessed by the host GPU, then the flush is ordered
945 // with subsequent commands. However, if other host hardware can access the
946 // buffer, we need to wait for the transfer to complete for consistency.
947 // TODO(b/136733358): Support returning fences from transfers
948 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
949 waitcmd.handle = mapping->vma->handle;
951 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
953 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
961 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
964 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
965 /* Camera subsystem requires NV12. */
966 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
967 return DRM_FORMAT_NV12;
968 /*HACK: See b/28671744 */
969 return DRM_FORMAT_XBGR8888;
970 case DRM_FORMAT_FLEX_YCbCr_420_888:
972 * All of our host drivers prefer NV12 as their flexible media format.
973 * If that changes, this will need to be modified.
975 if (features[feat_3d].enabled)
976 return DRM_FORMAT_NV12;
978 return DRM_FORMAT_YVU420_ANDROID;
984 static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
985 uint32_t offsets[DRV_MAX_PLANES])
988 struct drm_virtgpu_resource_info res_info = { 0 };
990 if (!features[feat_3d].enabled)
993 res_info.bo_handle = bo->handles[0].u32;
994 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
996 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
1000 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
1002 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
1005 if (res_info.strides[plane]) {
1006 strides[plane] = res_info.strides[plane];
1007 offsets[plane] = res_info.offsets[plane];
1014 const struct backend backend_virtio_gpu = {
1015 .name = "virtio_gpu",
1016 .init = virtio_gpu_init,
1017 .close = virtio_gpu_close,
1018 .bo_create = virtio_gpu_bo_create,
1019 .bo_destroy = virtio_gpu_bo_destroy,
1020 .bo_import = drv_prime_bo_import,
1021 .bo_map = virtio_gpu_bo_map,
1022 .bo_unmap = drv_bo_munmap,
1023 .bo_invalidate = virtio_gpu_bo_invalidate,
1024 .bo_flush = virtio_gpu_bo_flush,
1025 .resolve_format = virtio_gpu_resolve_format,
1026 .resource_info = virtio_gpu_resource_info,