2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
19 #include "virtgpu_drm.h"
22 #define PAGE_SIZE 0x1000
24 #define PIPE_TEXTURE_2D 2
26 #define MESA_LLVMPIPE_TILE_ORDER 6
27 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
47 static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
48 FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
50 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
51 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
52 DRM_FORMAT_XRGB8888 };
54 static const uint32_t dumb_texture_source_formats[] = {
55 DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420,
56 DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID
59 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21,
60 DRM_FORMAT_R8, DRM_FORMAT_R16,
61 DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID };
63 struct virtio_gpu_priv {
65 union virgl_caps caps;
69 static uint32_t translate_format(uint32_t drm_fourcc)
72 case DRM_FORMAT_BGR888:
73 case DRM_FORMAT_RGB888:
74 return VIRGL_FORMAT_R8G8B8_UNORM;
75 case DRM_FORMAT_XRGB8888:
76 return VIRGL_FORMAT_B8G8R8X8_UNORM;
77 case DRM_FORMAT_ARGB8888:
78 return VIRGL_FORMAT_B8G8R8A8_UNORM;
79 case DRM_FORMAT_XBGR8888:
80 return VIRGL_FORMAT_R8G8B8X8_UNORM;
81 case DRM_FORMAT_ABGR8888:
82 return VIRGL_FORMAT_R8G8B8A8_UNORM;
83 case DRM_FORMAT_ABGR16161616F:
84 return VIRGL_FORMAT_R16G16B16A16_UNORM;
85 case DRM_FORMAT_RGB565:
86 return VIRGL_FORMAT_B5G6R5_UNORM;
88 return VIRGL_FORMAT_R8_UNORM;
90 return VIRGL_FORMAT_R8G8_UNORM;
92 return VIRGL_FORMAT_NV12;
94 return VIRGL_FORMAT_NV21;
95 case DRM_FORMAT_YVU420:
96 case DRM_FORMAT_YVU420_ANDROID:
97 return VIRGL_FORMAT_YV12;
103 static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported,
106 uint32_t virgl_format = translate_format(drm_format);
111 uint32_t bitmask_index = virgl_format / 32;
112 uint32_t bit_index = virgl_format % 32;
113 return supported->bitmask[bitmask_index] & (1 << bit_index);
116 // The metadata generated here for emulated buffers is slightly different than the metadata
117 // generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
118 // functions below, the emulated buffers are oversized. For example, ignoring stride alignment
119 // requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
120 // drv_bo_from_format:
122 // | Y | Y | Y | Y | Y | Y |
123 // | Y | Y | Y | Y | Y | Y |
124 // | Y | Y | Y | Y | Y | Y |
125 // | Y | Y | Y | Y | Y | Y |
126 // | Y | Y | Y | Y | Y | Y |
127 // | Y | Y | Y | Y | Y | Y |
128 // | U | U | U | U | U | U |
129 // | U | U | U | V | V | V |
130 // | V | V | V | V | V | V |
132 // where each plane immediately follows the previous plane in memory. This layout makes it
133 // difficult to compute the transfers needed for example when the middle 2x2 region of the
134 // image is locked and needs to be flushed/invalidated.
136 // Emulated multi-plane buffers instead have a layout of:
138 // | Y | Y | Y | Y | Y | Y |
139 // | Y | Y | Y | Y | Y | Y |
140 // | Y | Y | Y | Y | Y | Y |
141 // | Y | Y | Y | Y | Y | Y |
142 // | Y | Y | Y | Y | Y | Y |
143 // | Y | Y | Y | Y | Y | Y |
144 // | U | U | U | | | |
145 // | U | U | U | | | |
146 // | U | U | U | | | |
147 // | V | V | V | | | |
148 // | V | V | V | | | |
149 // | V | V | V | | | |
151 // where each plane is placed as a sub-image (albeit with a very large stride) in order to
152 // simplify transfers into 3 sub-image transfers for the above example.
154 // Additional note: the V-plane is not placed to the right of the U-plane due to some
155 // observed failures in media framework code which assumes the V-plane is not
156 // "row-interlaced" with the U-plane.
157 static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
159 uint32_t y_plane_height;
160 uint32_t c_plane_height;
161 uint32_t original_width = bo->meta.width;
162 uint32_t original_height = bo->meta.height;
164 metadata->format = DRM_FORMAT_R8;
165 switch (bo->meta.format) {
166 case DRM_FORMAT_NV12:
167 case DRM_FORMAT_NV21:
169 metadata->num_planes = 2;
171 y_plane_height = original_height;
172 c_plane_height = DIV_ROUND_UP(original_height, 2);
174 metadata->width = original_width;
175 metadata->height = y_plane_height + c_plane_height;
177 // Y-plane (full resolution)
178 metadata->strides[0] = metadata->width;
179 metadata->offsets[0] = 0;
180 metadata->sizes[0] = metadata->width * y_plane_height;
182 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
183 metadata->strides[1] = metadata->width;
184 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
185 metadata->sizes[1] = metadata->width * c_plane_height;
187 metadata->total_size = metadata->width * metadata->height;
189 case DRM_FORMAT_YVU420:
190 case DRM_FORMAT_YVU420_ANDROID:
192 metadata->num_planes = 3;
194 y_plane_height = original_height;
195 c_plane_height = DIV_ROUND_UP(original_height, 2);
197 metadata->width = ALIGN(original_width, 32);
198 metadata->height = y_plane_height + (2 * c_plane_height);
200 // Y-plane (full resolution)
201 metadata->strides[0] = metadata->width;
202 metadata->offsets[0] = 0;
203 metadata->sizes[0] = metadata->width * original_height;
205 // Cb-plane (half resolution, placed below Y-plane)
206 metadata->strides[1] = metadata->width;
207 metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
208 metadata->sizes[1] = metadata->width * c_plane_height;
210 // Cr-plane (half resolution, placed below Cb-plane)
211 metadata->strides[2] = metadata->width;
212 metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
213 metadata->sizes[2] = metadata->width * c_plane_height;
215 metadata->total_size = metadata->width * metadata->height;
222 struct virtio_transfers_params {
224 struct rectangle xfer_boxes[DRV_MAX_PLANES];
227 static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo,
228 const struct rectangle *transfer_box,
229 struct virtio_transfers_params *xfer_params)
231 uint32_t y_plane_height;
232 uint32_t c_plane_height;
233 struct bo_metadata emulated_metadata;
235 if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
236 transfer_box->height == bo->meta.height) {
237 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
239 xfer_params->xfers_needed = 1;
240 xfer_params->xfer_boxes[0].x = 0;
241 xfer_params->xfer_boxes[0].y = 0;
242 xfer_params->xfer_boxes[0].width = emulated_metadata.width;
243 xfer_params->xfer_boxes[0].height = emulated_metadata.height;
248 switch (bo->meta.format) {
249 case DRM_FORMAT_NV12:
250 case DRM_FORMAT_NV21:
252 xfer_params->xfers_needed = 2;
254 y_plane_height = bo->meta.height;
255 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
257 // Y-plane (full resolution)
258 xfer_params->xfer_boxes[0].x = transfer_box->x;
259 xfer_params->xfer_boxes[0].y = transfer_box->y;
260 xfer_params->xfer_boxes[0].width = transfer_box->width;
261 xfer_params->xfer_boxes[0].height = transfer_box->height;
263 // CbCr-plane (half resolution, interleaved, placed below Y-plane)
264 xfer_params->xfer_boxes[1].x = transfer_box->x;
265 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
266 xfer_params->xfer_boxes[1].width = transfer_box->width;
267 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
270 case DRM_FORMAT_YVU420:
271 case DRM_FORMAT_YVU420_ANDROID:
273 xfer_params->xfers_needed = 3;
275 y_plane_height = bo->meta.height;
276 c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
278 // Y-plane (full resolution)
279 xfer_params->xfer_boxes[0].x = transfer_box->x;
280 xfer_params->xfer_boxes[0].y = transfer_box->y;
281 xfer_params->xfer_boxes[0].width = transfer_box->width;
282 xfer_params->xfer_boxes[0].height = transfer_box->height;
284 // Cb-plane (half resolution, placed below Y-plane)
285 xfer_params->xfer_boxes[1].x = transfer_box->x;
286 xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
287 xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
288 xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
290 // Cr-plane (half resolution, placed below Cb-plane)
291 xfer_params->xfer_boxes[2].x = transfer_box->x;
292 xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
293 xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
294 xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
300 static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format,
303 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
305 if (priv->caps.max_version == 0) {
309 if ((use_flags & BO_USE_RENDERING) &&
310 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) {
314 if ((use_flags & BO_USE_TEXTURE) &&
315 !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) {
319 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
320 !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) {
327 // For virtio backends that do not support formats natively (e.g. multi-planar formats are not
328 // supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
329 // format and usage combination can be handled as a blob (byte buffer).
330 static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv,
334 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
336 // Only enable emulation on non-gbm virtio backends.
337 if (priv->host_gbm_enabled) {
341 if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) {
345 if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) {
349 return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
350 drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
353 // Adds the given buffer combination to the list of supported buffer combinations if the
354 // combination is supported by the virtio backend.
355 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
356 struct format_metadata *metadata, uint64_t use_flags)
358 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
360 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
361 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
362 !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) {
363 drv_log("Scanout format: %d\n", drm_format);
364 use_flags &= ~BO_USE_SCANOUT;
367 if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) &&
368 !virtio_gpu_supports_combination_through_emulation(drv, drm_format,
370 drv_log("Skipping unsupported combination format:%d\n", drm_format);
375 drv_add_combination(drv, drm_format, metadata, use_flags);
378 // Adds each given buffer combination to the list of supported buffer combinations if the
379 // combination supported by the virtio backend.
380 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
381 uint32_t num_formats, struct format_metadata *metadata,
386 for (i = 0; i < num_formats; i++) {
387 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
391 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
394 if (bo->meta.format != DRM_FORMAT_R8) {
395 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
396 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
399 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
402 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
405 if ((*flag) & check_flag) {
406 (*flag) &= ~check_flag;
407 (*bind) |= virgl_bind;
411 static uint32_t use_flags_to_bind(uint64_t use_flags)
413 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
414 uint32_t bind = VIRGL_BIND_SHARED;
416 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
417 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
418 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
419 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
420 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
422 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
423 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
424 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
425 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
427 // All host drivers only support linear camera buffer formats. If
428 // that changes, this will need to be modified.
429 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
430 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
433 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
439 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
445 struct drm_virtgpu_resource_create res_create;
446 struct bo_metadata emulated_metadata;
448 if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) {
449 stride = drv_stride_from_format(format, width, 0);
450 drv_bo_from_format(bo, stride, height, format);
453 virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags));
455 virtio_gpu_get_emulated_metadata(bo, &emulated_metadata);
457 format = emulated_metadata.format;
458 width = emulated_metadata.width;
459 height = emulated_metadata.height;
460 for (i = 0; i < emulated_metadata.num_planes; i++) {
461 bo->meta.strides[i] = emulated_metadata.strides[i];
462 bo->meta.offsets[i] = emulated_metadata.offsets[i];
463 bo->meta.sizes[i] = emulated_metadata.sizes[i];
465 bo->meta.total_size = emulated_metadata.total_size;
469 * Setting the target is intended to ensure this resource gets bound as a 2D
470 * texture in the host renderer's GL state. All of these resource properties are
471 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
472 * virglrenderer. When virglrenderer makes a resource, it will convert the target
473 * enum to the equivalent one in GL and then bind the resource to that target.
475 memset(&res_create, 0, sizeof(res_create));
477 res_create.target = PIPE_TEXTURE_2D;
478 res_create.format = translate_format(format);
479 res_create.bind = use_flags_to_bind(use_flags);
480 res_create.width = width;
481 res_create.height = height;
484 res_create.depth = 1;
485 res_create.array_size = 1;
486 res_create.last_level = 0;
487 res_create.nr_samples = 0;
489 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
490 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
492 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
496 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
497 bo->handles[plane].u32 = res_create.bo_handle;
502 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
505 struct drm_virtgpu_map gem_map;
507 memset(&gem_map, 0, sizeof(gem_map));
508 gem_map.handle = bo->handles[0].u32;
510 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
512 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
516 vma->length = bo->meta.total_size;
517 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
521 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
524 struct drm_virtgpu_get_caps cap_args;
527 memset(&cap_args, 0, sizeof(cap_args));
528 cap_args.addr = (unsigned long long)caps;
529 if (features[feat_capset_fix].enabled) {
531 cap_args.cap_set_id = 2;
532 cap_args.size = sizeof(union virgl_caps);
534 cap_args.cap_set_id = 1;
535 cap_args.size = sizeof(struct virgl_caps_v1);
538 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
540 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
544 cap_args.cap_set_id = 1;
545 cap_args.size = sizeof(struct virgl_caps_v1);
547 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
549 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
556 static void virtio_gpu_init_features_and_caps(struct driver *drv)
558 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
560 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
561 struct drm_virtgpu_getparam params = { 0 };
563 params.param = features[i].feature;
564 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
565 int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
567 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
570 if (features[feat_3d].enabled) {
571 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
574 // Multi-planar formats are currently only supported in virglrenderer through gbm.
575 priv->host_gbm_enabled =
576 virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
579 static int virtio_gpu_init(struct driver *drv)
581 struct virtio_gpu_priv *priv;
583 priv = calloc(1, sizeof(*priv));
586 virtio_gpu_init_features_and_caps(drv);
588 if (features[feat_3d].enabled) {
589 /* This doesn't mean host can scanout everything, it just means host
590 * hypervisor can show it. */
591 virtio_gpu_add_combinations(drv, render_target_formats,
592 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
593 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
594 virtio_gpu_add_combinations(drv, texture_source_formats,
595 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
596 BO_USE_TEXTURE_MASK);
598 /* Virtio primary plane only allows this format. */
599 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
600 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
601 /* Virtio cursor plane only allows this format and Chrome cannot live without
602 * ARGB888 renderable format. */
603 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
604 BO_USE_RENDER_MASK | BO_USE_CURSOR);
605 /* Android needs more, but they cannot be bound as scanouts anymore after
606 * "drm/virtio: fix DRM_FORMAT_* handling" */
607 virtio_gpu_add_combinations(drv, render_target_formats,
608 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
610 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
611 ARRAY_SIZE(dumb_texture_source_formats),
612 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
613 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
614 BO_USE_SW_MASK | BO_USE_LINEAR);
615 virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
616 BO_USE_SW_MASK | BO_USE_LINEAR);
619 /* Android CTS tests require this. */
620 virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
621 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
622 virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA,
623 BO_USE_SW_MASK | BO_USE_TEXTURE_MASK);
625 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
626 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
627 BO_USE_HW_VIDEO_ENCODER);
628 drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
629 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
630 BO_USE_HW_VIDEO_ENCODER);
631 drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
632 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
633 BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
634 drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
635 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
636 BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT);
637 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
638 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
639 drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
640 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER);
642 return drv_modify_linear_combinations(drv);
645 static void virtio_gpu_close(struct driver *drv)
651 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
654 if (features[feat_3d].enabled)
655 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
657 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
660 static int virtio_gpu_bo_destroy(struct bo *bo)
662 if (features[feat_3d].enabled)
663 return drv_gem_bo_destroy(bo);
665 return drv_dumb_bo_destroy(bo);
668 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
670 if (features[feat_3d].enabled)
671 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
673 return drv_dumb_bo_map(bo, vma, plane, map_flags);
676 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
680 struct drm_virtgpu_3d_transfer_from_host xfer;
681 struct drm_virtgpu_3d_wait waitcmd;
682 struct virtio_transfers_params xfer_params;
683 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
685 if (!features[feat_3d].enabled)
688 // Invalidate is only necessary if the host writes to the buffer.
689 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
690 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
693 memset(&xfer, 0, sizeof(xfer));
694 xfer.bo_handle = mapping->vma->handle;
696 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
697 // Unfortunately, the kernel doesn't actually pass the guest layer_stride
698 // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h).
699 // For gbm based resources, we can work around this by using the level field
700 // to pass the stride to virglrenderer's gbm transfer code. However, we need
701 // to avoid doing this for resources which don't rely on that transfer code,
702 // which is resources with the BO_USE_RENDERING flag set.
703 // TODO(b/145993887): Send also stride when the patches are landed
704 if (priv->host_gbm_enabled) {
705 xfer.level = bo->meta.strides[0];
709 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
710 bo->meta.use_flags)) {
711 xfer_params.xfers_needed = 1;
712 xfer_params.xfer_boxes[0] = mapping->rect;
714 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
715 bo->meta.use_flags));
717 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
720 for (i = 0; i < xfer_params.xfers_needed; i++) {
721 xfer.box.x = xfer_params.xfer_boxes[i].x;
722 xfer.box.y = xfer_params.xfer_boxes[i].y;
723 xfer.box.w = xfer_params.xfer_boxes[i].width;
724 xfer.box.h = xfer_params.xfer_boxes[i].height;
727 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
729 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
735 // The transfer needs to complete before invalidate returns so that any host changes
736 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
737 // TODO(b/136733358): Support returning fences from transfers
738 memset(&waitcmd, 0, sizeof(waitcmd));
739 waitcmd.handle = mapping->vma->handle;
740 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
742 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
749 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
753 struct drm_virtgpu_3d_transfer_to_host xfer;
754 struct drm_virtgpu_3d_wait waitcmd;
755 struct virtio_transfers_params xfer_params;
756 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
758 if (!features[feat_3d].enabled)
761 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
764 memset(&xfer, 0, sizeof(xfer));
765 xfer.bo_handle = mapping->vma->handle;
767 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
768 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
769 // the level to work around this.
770 if (priv->host_gbm_enabled) {
771 xfer.level = bo->meta.strides[0];
774 if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format,
775 bo->meta.use_flags)) {
776 xfer_params.xfers_needed = 1;
777 xfer_params.xfer_boxes[0] = mapping->rect;
779 assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format,
780 bo->meta.use_flags));
782 virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
785 for (i = 0; i < xfer_params.xfers_needed; i++) {
786 xfer.box.x = xfer_params.xfer_boxes[i].x;
787 xfer.box.y = xfer_params.xfer_boxes[i].y;
788 xfer.box.w = xfer_params.xfer_boxes[i].width;
789 xfer.box.h = xfer_params.xfer_boxes[i].height;
792 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
794 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
800 // If the buffer is only accessed by the host GPU, then the flush is ordered
801 // with subsequent commands. However, if other host hardware can access the
802 // buffer, we need to wait for the transfer to complete for consistency.
803 // TODO(b/136733358): Support returning fences from transfers
804 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
805 memset(&waitcmd, 0, sizeof(waitcmd));
806 waitcmd.handle = mapping->vma->handle;
808 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
810 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
818 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
821 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
822 /* Camera subsystem requires NV12. */
823 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
824 return DRM_FORMAT_NV12;
825 /*HACK: See b/28671744 */
826 return DRM_FORMAT_XBGR8888;
827 case DRM_FORMAT_FLEX_YCbCr_420_888:
829 * All of our host drivers prefer NV12 as their flexible media format.
830 * If that changes, this will need to be modified.
832 if (features[feat_3d].enabled)
833 return DRM_FORMAT_NV12;
835 return DRM_FORMAT_YVU420_ANDROID;
841 static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
842 uint32_t offsets[DRV_MAX_PLANES])
845 struct drm_virtgpu_resource_info res_info;
847 if (!features[feat_3d].enabled)
850 memset(&res_info, 0, sizeof(res_info));
851 res_info.bo_handle = bo->handles[0].u32;
852 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
854 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
858 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
860 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
863 if (res_info.strides[plane]) {
864 strides[plane] = res_info.strides[plane];
865 offsets[plane] = res_info.offsets[plane];
872 const struct backend backend_virtio_gpu = {
873 .name = "virtio_gpu",
874 .init = virtio_gpu_init,
875 .close = virtio_gpu_close,
876 .bo_create = virtio_gpu_bo_create,
877 .bo_destroy = virtio_gpu_bo_destroy,
878 .bo_import = drv_prime_bo_import,
879 .bo_map = virtio_gpu_bo_map,
880 .bo_unmap = drv_bo_munmap,
881 .bo_invalidate = virtio_gpu_bo_invalidate,
882 .bo_flush = virtio_gpu_bo_flush,
883 .resolve_format = virtio_gpu_resolve_format,
884 .resource_info = virtio_gpu_resource_info,