OSDN Git Service

Merge 'aosp/upstream-master' into 'aosp/master'
[android-x86/external-minigbm.git] / virtio_gpu.c
1 /*
2  * Copyright 2017 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <string.h>
11 #include <sys/mman.h>
12 #include <virtgpu_drm.h>
13 #include <xf86drm.h>
14
15 #include "drv_priv.h"
16 #include "helpers.h"
17 #include "util.h"
18 #include "virgl_hw.h"
19
20 #ifndef PAGE_SIZE
21 #define PAGE_SIZE 0x1000
22 #endif
23 #define PIPE_TEXTURE_2D 2
24
25 #define MESA_LLVMPIPE_TILE_ORDER 6
26 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER)
27
28 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
29                                                   DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
30                                                   DRM_FORMAT_XRGB8888 };
31
32 static const uint32_t dumb_texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420,
33                                                         DRM_FORMAT_NV12,
34                                                         DRM_FORMAT_YVU420_ANDROID };
35
36 static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, DRM_FORMAT_RG88,
37                                                    DRM_FORMAT_YVU420_ANDROID };
38
39 struct virtio_gpu_priv {
40         int has_3d;
41         int caps_is_v2;
42         union virgl_caps caps;
43 };
44
45 static uint32_t translate_format(uint32_t drm_fourcc)
46 {
47         switch (drm_fourcc) {
48         case DRM_FORMAT_XRGB8888:
49                 return VIRGL_FORMAT_B8G8R8X8_UNORM;
50         case DRM_FORMAT_ARGB8888:
51                 return VIRGL_FORMAT_B8G8R8A8_UNORM;
52         case DRM_FORMAT_XBGR8888:
53                 return VIRGL_FORMAT_R8G8B8X8_UNORM;
54         case DRM_FORMAT_ABGR8888:
55                 return VIRGL_FORMAT_R8G8B8A8_UNORM;
56         case DRM_FORMAT_RGB565:
57                 return VIRGL_FORMAT_B5G6R5_UNORM;
58         case DRM_FORMAT_R8:
59                 return VIRGL_FORMAT_R8_UNORM;
60         case DRM_FORMAT_RG88:
61                 return VIRGL_FORMAT_R8G8_UNORM;
62         case DRM_FORMAT_NV12:
63                 return VIRGL_FORMAT_NV12;
64         case DRM_FORMAT_YVU420:
65         case DRM_FORMAT_YVU420_ANDROID:
66                 return VIRGL_FORMAT_YV12;
67         default:
68                 return 0;
69         }
70 }
71
72 static bool virtio_gpu_supports_format(struct virgl_supported_format_mask *supported,
73                                        uint32_t drm_format)
74 {
75         uint32_t virgl_format = translate_format(drm_format);
76         if (!virgl_format) {
77                 return false;
78         }
79
80         uint32_t bitmask_index = virgl_format / 32;
81         uint32_t bit_index = virgl_format % 32;
82         return supported->bitmask[bitmask_index] & (1 << bit_index);
83 }
84
85 // Adds the given buffer combination to the list of supported buffer combinations if the
86 // combination is supported by the virtio backend.
87 static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format,
88                                        struct format_metadata *metadata, uint64_t use_flags)
89 {
90         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
91
92         if (priv->has_3d) {
93                 if ((use_flags & BO_USE_RENDERING) &&
94                     !virtio_gpu_supports_format(&priv->caps.v1.render, drm_format)) {
95                         drv_log("Skipping unsupported render format: %d\n", drm_format);
96                         return;
97                 }
98
99                 if ((use_flags & BO_USE_TEXTURE) &&
100                     !virtio_gpu_supports_format(&priv->caps.v1.sampler, drm_format)) {
101                         drv_log("Skipping unsupported texture format: %d\n", drm_format);
102                         return;
103                 }
104                 if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
105                     !virtio_gpu_supports_format(&priv->caps.v2.scanout, drm_format)) {
106                         drv_log("Unsupported scanout format: %d\n", drm_format);
107                         use_flags &= ~BO_USE_SCANOUT;
108                 }
109         }
110
111         drv_add_combination(drv, drm_format, metadata, use_flags);
112 }
113
114 // Adds each given buffer combination to the list of supported buffer combinations if the
115 // combination supported by the virtio backend.
116 static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats,
117                                         uint32_t num_formats, struct format_metadata *metadata,
118                                         uint64_t use_flags)
119 {
120         uint32_t i;
121
122         for (i = 0; i < num_formats; i++) {
123                 virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags);
124         }
125 }
126
127 static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
128                                  uint64_t use_flags)
129 {
130         if (bo->meta.format != DRM_FORMAT_R8) {
131                 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
132                 height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
133         }
134
135         return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
136 }
137
138 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
139                                uint32_t virgl_bind)
140 {
141         if ((*flag) & check_flag) {
142                 (*flag) &= ~check_flag;
143                 (*bind) |= virgl_bind;
144         }
145 }
146
147 static uint32_t use_flags_to_bind(uint64_t use_flags)
148 {
149         /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
150         uint32_t bind = VIRGL_BIND_SHARED;
151
152         handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
153         handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
154         handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
155         handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
156         handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
157
158         handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR);
159         handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR);
160         handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR);
161         handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR);
162
163         // All host drivers only support linear camera buffer formats. If
164         // that changes, this will need to be modified.
165         handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR);
166         handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR);
167
168         if (use_flags) {
169                 drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
170         }
171
172         return bind;
173 }
174
175 static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
176                                   uint64_t use_flags)
177 {
178         int ret;
179         uint32_t stride;
180         struct drm_virtgpu_resource_create res_create;
181
182         stride = drv_stride_from_format(format, width, 0);
183         drv_bo_from_format(bo, stride, height, format);
184
185         /*
186          * Setting the target is intended to ensure this resource gets bound as a 2D
187          * texture in the host renderer's GL state. All of these resource properties are
188          * sent unchanged by the kernel to the host, which in turn sends them unchanged to
189          * virglrenderer. When virglrenderer makes a resource, it will convert the target
190          * enum to the equivalent one in GL and then bind the resource to that target.
191          */
192         memset(&res_create, 0, sizeof(res_create));
193
194         res_create.target = PIPE_TEXTURE_2D;
195         res_create.format = translate_format(format);
196         res_create.bind = use_flags_to_bind(use_flags);
197         res_create.width = width;
198         res_create.height = height;
199
200         /* For virgl 3D */
201         res_create.depth = 1;
202         res_create.array_size = 1;
203         res_create.last_level = 0;
204         res_create.nr_samples = 0;
205
206         res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
207         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
208         if (ret) {
209                 drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
210                 return ret;
211         }
212
213         for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
214                 bo->handles[plane].u32 = res_create.bo_handle;
215
216         return 0;
217 }
218
219 static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
220 {
221         int ret;
222         struct drm_virtgpu_map gem_map;
223
224         memset(&gem_map, 0, sizeof(gem_map));
225         gem_map.handle = bo->handles[0].u32;
226
227         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
228         if (ret) {
229                 drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
230                 return MAP_FAILED;
231         }
232
233         vma->length = bo->meta.total_size;
234         return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
235                     gem_map.offset);
236 }
237
238 static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
239 {
240         int ret;
241         struct drm_virtgpu_get_caps cap_args;
242         struct drm_virtgpu_getparam param_args;
243         uint32_t can_query_v2 = 0;
244
245         memset(&param_args, 0, sizeof(param_args));
246         param_args.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
247         param_args.value = (uint64_t)(uintptr_t)&can_query_v2;
248         ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &param_args);
249         if (ret) {
250                 drv_log("DRM_IOCTL_VIRTGPU_GETPARAM failed with %s\n", strerror(errno));
251         }
252
253         *caps_is_v2 = 0;
254         memset(&cap_args, 0, sizeof(cap_args));
255         cap_args.addr = (unsigned long long)caps;
256         if (can_query_v2) {
257                 *caps_is_v2 = 1;
258                 cap_args.cap_set_id = 2;
259                 cap_args.size = sizeof(union virgl_caps);
260         } else {
261                 cap_args.cap_set_id = 1;
262                 cap_args.size = sizeof(struct virgl_caps_v1);
263         }
264
265         ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
266         if (ret) {
267                 drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
268                 *caps_is_v2 = 0;
269
270                 // Fallback to v1
271                 cap_args.cap_set_id = 1;
272                 cap_args.size = sizeof(struct virgl_caps_v1);
273
274                 ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
275                 if (ret) {
276                         drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
277                 }
278         }
279
280         return ret;
281 }
282
283 static int virtio_gpu_init(struct driver *drv)
284 {
285         int ret;
286         struct virtio_gpu_priv *priv;
287         struct drm_virtgpu_getparam args;
288
289         priv = calloc(1, sizeof(*priv));
290         drv->priv = priv;
291
292         memset(&args, 0, sizeof(args));
293         args.param = VIRTGPU_PARAM_3D_FEATURES;
294         args.value = (uint64_t)(uintptr_t)&priv->has_3d;
295         ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &args);
296         if (ret) {
297                 drv_log("virtio 3D acceleration is not available\n");
298                 /* Be paranoid */
299                 priv->has_3d = 0;
300         }
301
302         if (priv->has_3d) {
303                 virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2);
304
305                 /* This doesn't mean host can scanout everything, it just means host
306                  * hypervisor can show it. */
307                 virtio_gpu_add_combinations(drv, render_target_formats,
308                                             ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
309                                             BO_USE_RENDER_MASK | BO_USE_SCANOUT);
310                 virtio_gpu_add_combinations(drv, texture_source_formats,
311                                             ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
312                                             BO_USE_TEXTURE_MASK);
313         } else {
314                 /* Virtio primary plane only allows this format. */
315                 virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
316                                            BO_USE_RENDER_MASK | BO_USE_SCANOUT);
317                 /* Virtio cursor plane only allows this format and Chrome cannot live without
318                  * ARGB888 renderable format. */
319                 virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
320                                            BO_USE_RENDER_MASK | BO_USE_CURSOR);
321                 /* Android needs more, but they cannot be bound as scanouts anymore after
322                  * "drm/virtio: fix DRM_FORMAT_* handling" */
323                 virtio_gpu_add_combinations(drv, render_target_formats,
324                                             ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
325                                             BO_USE_RENDER_MASK);
326                 virtio_gpu_add_combinations(drv, dumb_texture_source_formats,
327                                             ARRAY_SIZE(dumb_texture_source_formats),
328                                             &LINEAR_METADATA, BO_USE_TEXTURE_MASK);
329                 virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
330                                            BO_USE_SW_MASK | BO_USE_LINEAR);
331         }
332
333         /* Android CTS tests require this. */
334         virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
335
336         drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
337                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
338                                    BO_USE_HW_VIDEO_ENCODER);
339         drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
340                                BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
341
342         return drv_modify_linear_combinations(drv);
343 }
344
345 static void virtio_gpu_close(struct driver *drv)
346 {
347         free(drv->priv);
348         drv->priv = NULL;
349 }
350
351 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
352                                 uint64_t use_flags)
353 {
354         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
355         if (priv->has_3d)
356                 return virtio_virgl_bo_create(bo, width, height, format, use_flags);
357         else
358                 return virtio_dumb_bo_create(bo, width, height, format, use_flags);
359 }
360
361 static int virtio_gpu_bo_destroy(struct bo *bo)
362 {
363         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
364         if (priv->has_3d)
365                 return drv_gem_bo_destroy(bo);
366         else
367                 return drv_dumb_bo_destroy(bo);
368 }
369
370 static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
371 {
372         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
373         if (priv->has_3d)
374                 return virtio_virgl_bo_map(bo, vma, plane, map_flags);
375         else
376                 return drv_dumb_bo_map(bo, vma, plane, map_flags);
377 }
378
379 static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
380 {
381         int ret;
382         struct drm_virtgpu_3d_transfer_from_host xfer;
383         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
384         struct drm_virtgpu_3d_wait waitcmd;
385
386         if (!priv->has_3d)
387                 return 0;
388
389         // Invalidate is only necessary if the host writes to the buffer.
390         if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE |
391                                    BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
392                 return 0;
393
394         memset(&xfer, 0, sizeof(xfer));
395         xfer.bo_handle = mapping->vma->handle;
396         xfer.box.x = mapping->rect.x;
397         xfer.box.y = mapping->rect.y;
398         xfer.box.w = mapping->rect.width;
399         xfer.box.h = mapping->rect.height;
400         xfer.box.d = 1;
401
402         if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
403                 // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
404                 // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). For gbm
405                 // based resources, we can work around this by using the level field to pass
406                 // the stride to virglrenderer's gbm transfer code. However, we need to avoid
407                 // doing this for resources which don't rely on that transfer code, which is
408                 // resources with the BO_USE_RENDERING flag set.
409                 // TODO(b/145993887): Send also stride when the patches are landed
410                 xfer.level = bo->meta.strides[0];
411         }
412
413         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
414         if (ret) {
415                 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", strerror(errno));
416                 return -errno;
417         }
418
419         // The transfer needs to complete before invalidate returns so that any host changes
420         // are visible and to ensure the host doesn't overwrite subsequent guest changes.
421         // TODO(b/136733358): Support returning fences from transfers
422         memset(&waitcmd, 0, sizeof(waitcmd));
423         waitcmd.handle = mapping->vma->handle;
424         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
425         if (ret) {
426                 drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
427                 return -errno;
428         }
429
430         return 0;
431 }
432
433 static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
434 {
435         int ret;
436         struct drm_virtgpu_3d_transfer_to_host xfer;
437         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv;
438         struct drm_virtgpu_3d_wait waitcmd;
439
440         if (!priv->has_3d)
441                 return 0;
442
443         if (!(mapping->vma->map_flags & BO_MAP_WRITE))
444                 return 0;
445
446         memset(&xfer, 0, sizeof(xfer));
447         xfer.bo_handle = mapping->vma->handle;
448         xfer.box.x = mapping->rect.x;
449         xfer.box.y = mapping->rect.y;
450         xfer.box.w = mapping->rect.width;
451         xfer.box.h = mapping->rect.height;
452         xfer.box.d = 1;
453
454         // Unfortunately, the kernel doesn't actually pass the guest layer_stride and
455         // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
456         // the level to work around this.
457         xfer.level = bo->meta.strides[0];
458
459         ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
460         if (ret) {
461                 drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", strerror(errno));
462                 return -errno;
463         }
464
465         // If the buffer is only accessed by the host GPU, then the flush is ordered
466         // with subsequent commands. However, if other host hardware can access the
467         // buffer, we need to wait for the transfer to complete for consistency.
468         // TODO(b/136733358): Support returning fences from transfers
469         if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
470                 memset(&waitcmd, 0, sizeof(waitcmd));
471                 waitcmd.handle = mapping->vma->handle;
472
473                 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
474                 if (ret) {
475                         drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
476                         return -errno;
477                 }
478         }
479
480         return 0;
481 }
482
483 static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags)
484 {
485         struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
486
487         switch (format) {
488         case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
489                 /* Camera subsystem requires NV12. */
490                 if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))
491                         return DRM_FORMAT_NV12;
492                 /*HACK: See b/28671744 */
493                 return DRM_FORMAT_XBGR8888;
494         case DRM_FORMAT_FLEX_YCbCr_420_888:
495                 /*
496                  * All of our host drivers prefer NV12 as their flexible media format.
497                  * If that changes, this will need to be modified.
498                  */
499                 if (priv->has_3d)
500                         return DRM_FORMAT_NV12;
501                 else
502                         return DRM_FORMAT_YVU420;
503         default:
504                 return format;
505         }
506 }
507
508 const struct backend backend_virtio_gpu = {
509         .name = "virtio_gpu",
510         .init = virtio_gpu_init,
511         .close = virtio_gpu_close,
512         .bo_create = virtio_gpu_bo_create,
513         .bo_destroy = virtio_gpu_bo_destroy,
514         .bo_import = drv_prime_bo_import,
515         .bo_map = virtio_gpu_bo_map,
516         .bo_unmap = drv_bo_munmap,
517         .bo_invalidate = virtio_gpu_bo_invalidate,
518         .bo_flush = virtio_gpu_bo_flush,
519         .resolve_format = virtio_gpu_resolve_format,
520 };