2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
18 #include "virtgpu_drm.h"
21 #define PAGE_SIZE 0x1000
23 #define PIPE_TEXTURE_2D 2
25 #define MESA_LLVMPIPE_TILE_ORDER 6
26 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
46 static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
47 FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
49 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
50 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
51 DRM_FORMAT_XRGB8888 };
53 static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
55 DRM_FORMAT_YVU420_ANDROID };
57 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
58 DRM_FORMAT_YVU420_ANDROID };
60 struct virtio_gpu_priv {
62 union virgl_caps caps;
65 static uint32_t translate_format(uint32_t drm_fourcc)
68 case DRM_FORMAT_XRGB8888:
69 return VIRGL_FORMAT_B8G8R8X8_UNORM;
70 case DRM_FORMAT_ARGB8888:
71 return VIRGL_FORMAT_B8G8R8A8_UNORM;
72 case DRM_FORMAT_XBGR8888:
73 return VIRGL_FORMAT_R8G8B8X8_UNORM;
74 case DRM_FORMAT_ABGR8888:
75 return VIRGL_FORMAT_R8G8B8A8_UNORM;
76 case DRM_FORMAT_RGB565:
77 return VIRGL_FORMAT_B5G6R5_UNORM;
79 return VIRGL_FORMAT_R8_UNORM;
81 return VIRGL_FORMAT_R8G8_UNORM;
83 return VIRGL_FORMAT_NV12;
84 case DRM_FORMAT_YVU420:
85 case DRM_FORMAT_YVU420_ANDROID:
86 return VIRGL_FORMAT_YV12;
92 static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
95 uint32_t virgl_format = translate_format(drm_format);
100 uint32_t bitmask_index = virgl_format / 32;
101 uint32_t bit_index = virgl_format % 32;
102 return supported->bitmask[bitmask_index] & (1 << bit_index);
105 // Adds the given buffer combination to the list of supported buffer combinations if the
106 // combination is supported by the virtio backend.
107 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
108 struct format_metadata *metadata, uint64_t use_flags)
110 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
112 if (features[feat_3d].enabled && priv->caps.max_version >= 1) {
113 if ((use_flags & BO_USE_RENDERING) &&
114 !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
115 drv_log("Skipping unsupported render format: %d\n", drm_format);
119 if ((use_flags & BO_USE_TEXTURE) &&
120 !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
121 drv_log("Skipping unsupported texture format: %d\n", drm_format);
124 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
125 !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
126 drv_log("Unsupported scanout format: %d\n", drm_format);
127 use_flags &= ~BO_USE_SCANOUT;
131 drv_add_combination(drv, drm_format, metadata, use_flags);
134 // Adds each given buffer combination to the list of supported buffer combinations if the
135 // combination supported by the virtio backend.
136 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
137 uint32_t num_formats, struct format_metadata *metadata,
142 for (i = 0; i < num_formats; i++) {
143 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
147 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
150 if (bo->meta.format != DRM_FORMAT_R8) {
151 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
152 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
155 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
158 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
161 if ((*flag) & check_flag) {
162 (*flag) &= ~check_flag;
163 (*bind) |= virgl_bind;
167 static uint32_t use_flags_to_bind(uint64_t use_flags)
169 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
170 uint32_t bind = VIRGL_BIND_SHARED;
172 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
173 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
174 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
175 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
176 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
178 if (use_flags & BO_USE_PROTECTED) {
179 handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
181 // Make sure we don't set both flags, since that could be mistaken for
182 // protected. Give OFTEN priority over RARELY.
183 if (use_flags & BO_USE_SW_READ_OFTEN) {
184 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
185 VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
187 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
188 VIRGL_BIND_MINIGBM_SW_READ_RARELY);
190 if (use_flags & BO_USE_SW_WRITE_OFTEN) {
191 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
192 VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
194 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
195 VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
199 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
200 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
201 handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
202 VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
203 handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
204 VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
207 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
213 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
218 struct drm_virtgpu_resource_create res_create;
220 stride = drv_stride_from_format(format, width, 0);
221 drv_bo_from_format(bo, stride, height, format);
224 * Setting the target is intended to ensure this resource gets bound as a 2D
225 * texture in the host renderer's GL state. All of these resource properties are
226 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
227 * virglrenderer. When virglrenderer makes a resource, it will convert the target
228 * enum to the equivalent one in GL and then bind the resource to that target.
230 memset(&res_create, 0, sizeof(res_create));
232 res_create.target = PIPE_TEXTURE_2D;
233 res_create.format = translate_format(format);
234 res_create.bind = use_flags_to_bind(use_flags);
235 res_create.width = width;
236 res_create.height = height;
239 res_create.depth = 1;
240 res_create.array_size = 1;
241 res_create.last_level = 0;
242 res_create.nr_samples = 0;
244 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
245 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
247 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
251 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
252 bo->handles[plane].u32 = res_create.bo_handle;
257 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
260 struct drm_virtgpu_map gem_map;
262 memset(&gem_map, 0, sizeof(gem_map));
263 gem_map.handle = bo->handles[0].u32;
265 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
267 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
271 vma->length = bo->meta.total_size;
272 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
276 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
279 struct drm_virtgpu_get_caps cap_args;
282 memset(&cap_args, 0, sizeof(cap_args));
283 cap_args.addr = (unsigned long long)caps;
284 if (features[feat_capset_fix].enabled) {
286 cap_args.cap_set_id = 2;
287 cap_args.size = sizeof(union virgl_caps);
289 cap_args.cap_set_id = 1;
290 cap_args.size = sizeof(struct virgl_caps_v1);
293 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
295 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
299 cap_args.cap_set_id = 1;
300 cap_args.size = sizeof(struct virgl_caps_v1);
302 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
304 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
311 static int virtio_gpu_init(struct driver *drv)
314 struct virtio_gpu_priv *priv;
316 priv = calloc(1, sizeof(*priv));
318 for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) {
319 struct drm_virtgpu_getparam params = { 0 };
321 params.param = features[i].feature;
322 params.value = (uint64_t)(uintptr_t)&features[i].enabled;
323 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms);
325 drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno));
328 if (features[feat_3d].enabled) {
329 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
331 /* This doesn't mean host can scanout everything, it just means host
332 * hypervisor can show it. */
333 virtio_gpu_add_combinations(drv, render_target_formats,
334 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
335 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
336 virtio_gpu_add_combinations(drv, texture_source_formats,
337 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
338 BO_USE_TEXTURE_MASK);
340 /* Virtio primary plane only allows this format. */
341 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
342 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
343 /* Virtio cursor plane only allows this format and Chrome cannot live without
344 * ARGB888 renderable format. */
345 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
346 BO_USE_RENDER_MASK | BO_USE_CURSOR);
347 /* Android needs more, but they cannot be bound as scanouts anymore after
348 * "drm/virtio: fix DRM_FORMAT_* handling" */
349 virtio_gpu_add_combinations(drv, render_target_formats,
350 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
352 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
353 ARRAY_SIZE(dumb_texture_source_formats),
354 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
355 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
356 BO_USE_SW_MASK | BO_USE_LINEAR);
359 /* Android CTS tests require this. */
360 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
362 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
363 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
364 BO_USE_HW_VIDEO_ENCODER);
365 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
366 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
367 BO_USE_HW_VIDEO_ENCODER);
369 return drv_modify_linear_combinations(drv);
372 static void virtio_gpu_close(struct driver *drv)
378 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
381 if (features[feat_3d].enabled)
382 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
384 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
387 static int virtio_gpu_bo_destroy(struct bo *bo)
389 if (features[feat_3d].enabled)
390 return drv_gem_bo_destroy(bo);
392 return drv_dumb_bo_destroy(bo);
395 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
397 if (features[feat_3d].enabled)
398 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
400 return drv_dumb_bo_map(bo, vma, plane, map_flags);
403 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
406 struct drm_virtgpu_3d_transfer_from_host xfer;
407 struct drm_virtgpu_3d_wait waitcmd;
409 if (!features[feat_3d].enabled)
412 // Invalidate is only necessary if the host writes to the buffer.
413 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
414 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
417 memset(&xfer, 0, sizeof(xfer));
418 xfer.bo_handle = mapping->vma->handle;
419 xfer.box.x = mapping->rect.x;
420 xfer.box.y = mapping->rect.y;
421 xfer.box.w = mapping->rect.width;
422 xfer.box.h = mapping->rect.height;
425 if (mapping->rect.x || mapping->rect.y) {
426 drv_log("Non-zero transfer offset\n");
429 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
432 if (bo->meta.num_planes == 1) {
434 (bo->meta.strides[0] * mapping->rect.y) +
435 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
439 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
440 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
441 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
442 // based resources, we can work around this by using the level field to pass
443 // the stride to virglrenderer's gbm transfer code. However, we need to avoid
444 // doing this for resources which don't rely on that transfer code, which is
445 // resources with the BO_USE_RENDERING flag set.
446 // TODO(b/145993887): Send also stride when the patches are landed
447 xfer.level = bo->meta.strides[0];
450 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
452 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
456 // The transfer needs to complete before invalidate returns so that any host changes
457 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
458 // TODO(b/136733358): Support returning fences from transfers
459 memset(&waitcmd, 0, sizeof(waitcmd));
460 waitcmd.handle = mapping->vma->handle;
461 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
463 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
470 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
473 struct drm_virtgpu_3d_transfer_to_host xfer;
474 struct drm_virtgpu_3d_wait waitcmd;
476 if (!features[feat_3d].enabled)
479 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
482 memset(&xfer, 0, sizeof(xfer));
483 xfer.bo_handle = mapping->vma->handle;
484 xfer.box.x = mapping->rect.x;
485 xfer.box.y = mapping->rect.y;
486 xfer.box.w = mapping->rect.width;
487 xfer.box.h = mapping->rect.height;
490 if (mapping->rect.x || mapping->rect.y) {
491 drv_log("Non-zero transfer offset\n");
493 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
496 if (bo->meta.num_planes == 1) {
498 (bo->meta.strides[0] * mapping->rect.y) +
499 drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
503 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
504 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
505 // the level to work around this.
506 xfer.level = bo->meta.strides[0];
508 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
510 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
514 // If the buffer is only accessed by the host GPU, then the flush is ordered
515 // with subsequent commands. However, if other host hardware can access the
516 // buffer, we need to wait for the transfer to complete for consistency.
517 // TODO(b/136733358): Support returning fences from transfers
518 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
519 memset(&waitcmd, 0, sizeof(waitcmd));
520 waitcmd.handle = mapping->vma->handle;
522 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
524 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
532 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
536 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
537 /* Camera subsystem requires NV12. */
538 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
539 return DRM_FORMAT_NV12;
540 /*HACK: See b/28671744 */
541 return DRM_FORMAT_XBGR8888;
542 case DRM_FORMAT_FLEX_YCbCr_420_888:
544 * All of our host drivers prefer NV12 as their flexible media format.
545 * If that changes, this will need to be modified.
547 if (features[feat_3d].enabled)
548 return DRM_FORMAT_NV12;
550 return DRM_FORMAT_YVU420;
556 static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
557 uint32_t offsets[DRV_MAX_PLANES])
560 struct drm_virtgpu_resource_info res_info;
562 if (!features[feat_3d].enabled)
565 memset(&res_info, 0, sizeof(res_info));
566 res_info.bo_handle = bo->handles[0].u32;
567 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info);
569 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
573 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) {
575 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
578 if (res_info.strides[plane]) {
579 strides[plane] = res_info.strides[plane];
580 offsets[plane] = res_info.offsets[plane];
587 const struct backend backend_virtio_gpu = {
588 .name = "virtio_gpu",
589 .init = virtio_gpu_init,
590 .close = virtio_gpu_close,
591 .bo_create = virtio_gpu_bo_create,
592 .bo_destroy = virtio_gpu_bo_destroy,
593 .bo_import = drv_prime_bo_import,
594 .bo_map = virtio_gpu_bo_map,
595 .bo_unmap = drv_bo_munmap,
596 .bo_invalidate = virtio_gpu_bo_invalidate,
597 .bo_flush = virtio_gpu_bo_flush,
598 .resolve_format = virtio_gpu_resolve_format,
599 .resource_info = virtio_gpu_resource_info,