2 * Copyright 2017 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
12 #include <virtgpu_drm.h>
21 #define PAGE_SIZE 0x1000
23 #define PIPE_TEXTURE_2D 2
25 #define MESA_LLVMPIPE_TILE_ORDER 6
26 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
28 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
29 DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
30 DRM_FORMAT_XRGB8888 };
32 static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
34 DRM_FORMAT_YVU420_ANDROID };
36 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
37 DRM_FORMAT_YVU420_ANDROID };
39 struct virtio_gpu_priv {
42 union virgl_caps caps;
45 static uint32_t translate_format(uint32_t drm_fourcc)
48 case DRM_FORMAT_XRGB8888:
49 return VIRGL_FORMAT_B8G8R8X8_UNORM;
50 case DRM_FORMAT_ARGB8888:
51 return VIRGL_FORMAT_B8G8R8A8_UNORM;
52 case DRM_FORMAT_XBGR8888:
53 return VIRGL_FORMAT_R8G8B8X8_UNORM;
54 case DRM_FORMAT_ABGR8888:
55 return VIRGL_FORMAT_R8G8B8A8_UNORM;
56 case DRM_FORMAT_RGB565:
57 return VIRGL_FORMAT_B5G6R5_UNORM;
59 return VIRGL_FORMAT_R8_UNORM;
61 return VIRGL_FORMAT_R8G8_UNORM;
63 return VIRGL_FORMAT_NV12;
64 case DRM_FORMAT_YVU420:
65 case DRM_FORMAT_YVU420_ANDROID:
66 return VIRGL_FORMAT_YV12;
72 static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
75 uint32_t virgl_format = translate_format(drm_format);
80 uint32_t bitmask_index = virgl_format / 32;
81 uint32_t bit_index = virgl_format % 32;
82 return supported->bitmask[bitmask_index] & (1 << bit_index);
85 // Adds the given buffer combination to the list of supported buffer combinations if the
86 // combination is supported by the virtio backend.
87 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
88 struct format_metadata *metadata, uint64_t use_flags)
90 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
92 if (priv->has_3d && priv->caps.max_version >= 1) {
93 if ((use_flags & BO_USE_RENDERING) &&
94 !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
95 drv_log("Skipping unsupported render format: %d\n", drm_format);
99 if ((use_flags & BO_USE_TEXTURE) &&
100 !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
101 drv_log("Skipping unsupported texture format: %d\n", drm_format);
104 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
105 !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
106 drv_log("Unsupported scanout format: %d\n", drm_format);
107 use_flags &= ~BO_USE_SCANOUT;
111 drv_add_combination(drv, drm_format, metadata, use_flags);
114 // Adds each given buffer combination to the list of supported buffer combinations if the
115 // combination supported by the virtio backend.
116 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
117 uint32_t num_formats, struct format_metadata *metadata,
122 for (i = 0; i < num_formats; i++) {
123 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
127 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
130 if (bo->meta.format != DRM_FORMAT_R8) {
131 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
132 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
135 return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
138 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
141 if ((*flag) & check_flag) {
142 (*flag) &= ~check_flag;
143 (*bind) |= virgl_bind;
147 static uint32_t use_flags_to_bind(uint64_t use_flags)
149 /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
150 uint32_t bind = VIRGL_BIND_SHARED;
152 handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
153 handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
154 handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
155 handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
156 handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
158 handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
159 handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
160 handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
161 handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
163 // All host drivers only support linear camera buffer formats. If
164 // that changes, this will need to be modified.
165 handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
166 handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
169 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
175 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
180 struct drm_virtgpu_resource_create res_create;
182 stride = drv_stride_from_format(format, width, 0);
183 drv_bo_from_format(bo, stride, height, format);
186 * Setting the target is intended to ensure this resource gets bound as a 2D
187 * texture in the host renderer's GL state. All of these resource properties are
188 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
189 * virglrenderer. When virglrenderer makes a resource, it will convert the target
190 * enum to the equivalent one in GL and then bind the resource to that target.
192 memset(&res_create, 0, sizeof(res_create));
194 res_create.target = PIPE_TEXTURE_2D;
195 res_create.format = translate_format(format);
196 res_create.bind = use_flags_to_bind(use_flags);
197 res_create.width = width;
198 res_create.height = height;
201 res_create.depth = 1;
202 res_create.array_size = 1;
203 res_create.last_level = 0;
204 res_create.nr_samples = 0;
206 res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
207 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
209 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
213 for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
214 bo->handles[plane].u32 = res_create.bo_handle;
219 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
222 struct drm_virtgpu_map gem_map;
224 memset(&gem_map, 0, sizeof(gem_map));
225 gem_map.handle = bo->handles[0].u32;
227 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
229 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
233 vma->length = bo->meta.total_size;
234 return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
238 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
241 struct drm_virtgpu_get_caps cap_args;
242 struct drm_virtgpu_getparam param_args;
243 uint32_t can_query_v2 = 0;
245 memset(¶m_args, 0, sizeof(param_args));
246 param_args.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
247 param_args.value = (uint64_t)(uintptr_t)&can_query_v2;
248 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶m_args);
250 drv_log("DRM_IOCTL_VIRTGPU_GETPARAM failed with %s\n", strerror(errno));
254 memset(&cap_args, 0, sizeof(cap_args));
255 cap_args.addr = (unsigned long long)caps;
258 cap_args.cap_set_id = 2;
259 cap_args.size = sizeof(union virgl_caps);
261 cap_args.cap_set_id = 1;
262 cap_args.size = sizeof(struct virgl_caps_v1);
265 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
267 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
271 cap_args.cap_set_id = 1;
272 cap_args.size = sizeof(struct virgl_caps_v1);
274 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
276 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
283 static int virtio_gpu_init(struct driver *drv)
286 struct virtio_gpu_priv *priv;
287 struct drm_virtgpu_getparam args;
289 priv = calloc(1, sizeof(*priv));
292 memset(&args, 0, sizeof(args));
293 args.param = VIRTGPU_PARAM_3D_FEATURES;
294 args.value = (uint64_t)(uintptr_t)&priv->has_3d;
295 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
297 drv_log("virtio 3D acceleration is not available\n");
303 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
305 /* This doesn't mean host can scanout everything, it just means host
306 * hypervisor can show it. */
307 virtio_gpu_add_combinations(drv, render_target_formats,
308 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
309 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
310 virtio_gpu_add_combinations(drv, texture_source_formats,
311 ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
312 BO_USE_TEXTURE_MASK);
314 /* Virtio primary plane only allows this format. */
315 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
316 BO_USE_RENDER_MASK | BO_USE_SCANOUT);
317 /* Virtio cursor plane only allows this format and Chrome cannot live without
318 * ARGB888 renderable format. */
319 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
320 BO_USE_RENDER_MASK | BO_USE_CURSOR);
321 /* Android needs more, but they cannot be bound as scanouts anymore after
322 * "drm/virtio: fix DRM_FORMAT_* handling" */
323 virtio_gpu_add_combinations(drv, render_target_formats,
324 ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
326 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
327 ARRAY_SIZE(dumb_texture_source_formats),
328 &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
329 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
330 BO_USE_SW_MASK | BO_USE_LINEAR);
333 /* Android CTS tests require this. */
334 virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
336 drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
337 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
338 BO_USE_HW_VIDEO_ENCODER);
339 drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
340 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
342 return drv_modify_linear_combinations(drv);
345 static void virtio_gpu_close(struct driver *drv)
351 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
354 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
356 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
358 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
361 static int virtio_gpu_bo_destroy(struct bo *bo)
363 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
365 return drv_gem_bo_destroy(bo);
367 return drv_dumb_bo_destroy(bo);
370 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
372 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
374 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
376 return drv_dumb_bo_map(bo, vma, plane, map_flags);
379 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
382 struct drm_virtgpu_3d_transfer_from_host xfer;
383 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
384 struct drm_virtgpu_3d_wait waitcmd;
389 // Invalidate is only necessary if the host writes to the buffer.
390 if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
391 BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
394 memset(&xfer, 0, sizeof(xfer));
395 xfer.bo_handle = mapping->vma->handle;
396 xfer.box.x = mapping->rect.x;
397 xfer.box.y = mapping->rect.y;
398 xfer.box.w = mapping->rect.width;
399 xfer.box.h = mapping->rect.height;
402 if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
403 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
404 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
405 // based resources, we can work around this by using the level field to pass
406 // the stride to virglrenderer's gbm transfer code. However, we need to avoid
407 // doing this for resources which don't rely on that transfer code, which is
408 // resources with the BO_USE_RENDERING flag set.
409 // TODO(b/145993887): Send also stride when the patches are landed
410 xfer.level = bo->meta.strides[0];
413 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
415 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
419 // The transfer needs to complete before invalidate returns so that any host changes
420 // are visible and to ensure the host doesn't overwrite subsequent guest changes.
421 // TODO(b/136733358): Support returning fences from transfers
422 memset(&waitcmd, 0, sizeof(waitcmd));
423 waitcmd.handle = mapping->vma->handle;
424 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
426 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
433 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
436 struct drm_virtgpu_3d_transfer_to_host xfer;
437 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
438 struct drm_virtgpu_3d_wait waitcmd;
443 if (!(mapping->vma->map_flags & BO_MAP_WRITE))
446 memset(&xfer, 0, sizeof(xfer));
447 xfer.bo_handle = mapping->vma->handle;
448 xfer.box.x = mapping->rect.x;
449 xfer.box.y = mapping->rect.y;
450 xfer.box.w = mapping->rect.width;
451 xfer.box.h = mapping->rect.height;
454 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
455 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
456 // the level to work around this.
457 xfer.level = bo->meta.strides[0];
459 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
461 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
465 // If the buffer is only accessed by the host GPU, then the flush is ordered
466 // with subsequent commands. However, if other host hardware can access the
467 // buffer, we need to wait for the transfer to complete for consistency.
468 // TODO(b/136733358): Support returning fences from transfers
469 if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
470 memset(&waitcmd, 0, sizeof(waitcmd));
471 waitcmd.handle = mapping->vma->handle;
473 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
475 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
483 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
485 struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
488 case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
489 /* Camera subsystem requires NV12. */
490 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
491 return DRM_FORMAT_NV12;
492 /*HACK: See b/28671744 */
493 return DRM_FORMAT_XBGR8888;
494 case DRM_FORMAT_FLEX_YCbCr_420_888:
496 * All of our host drivers prefer NV12 as their flexible media format.
497 * If that changes, this will need to be modified.
500 return DRM_FORMAT_NV12;
502 return DRM_FORMAT_YVU420;
508 const struct backend backend_virtio_gpu = {
509 .name = "virtio_gpu",
510 .init = virtio_gpu_init,
511 .close = virtio_gpu_close,
512 .bo_create = virtio_gpu_bo_create,
513 .bo_destroy = virtio_gpu_bo_destroy,
514 .bo_import = drv_prime_bo_import,
515 .bo_map = virtio_gpu_bo_map,
516 .bo_unmap = drv_bo_munmap,
517 .bo_invalidate = virtio_gpu_bo_invalidate,
518 .bo_flush = virtio_gpu_bo_flush,
519 .resolve_format = virtio_gpu_resolve_format,