OSDN Git Service

minigbm: virtgpu: add resource create blob function
authorGurchetan Singh <gurchetansingh@chromium.org>
Sat, 14 Sep 2019 00:49:20 +0000 (17:49 -0700)
committerCommit Bot <commit-bot@chromium.org>
Mon, 31 Aug 2020 17:48:08 +0000 (17:48 +0000)
Some SW intensive apps could benefit from this.

This should decrease power consumption when playing a Youtube
1080p60fps video by 10%.

BUG=chromium:924405
TEST=mmap_test

Change-Id: I80bb08a95b7387eb8af58e1f0d30beb695fc3e74
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1804914
Tested-by: Gurchetan Singh <gurchetansingh@chromium.org>
Reviewed-by: Lingfeng Yang <lfy@google.com>
Reviewed-by: Jason Macnak <natsu@google.com>
Reviewed-by: Gurchetan Singh <gurchetansingh@chromium.org>
Commit-Queue: Gurchetan Singh <gurchetansingh@chromium.org>

external/virtgpu_drm.h
virtio_gpu.c

index a92d764..f324520 100644 (file)
@@ -46,6 +46,7 @@ extern "C" {
 #define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
 #define DRM_VIRTGPU_WAIT     0x08
 #define DRM_VIRTGPU_GET_CAPS  0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
 
 #define VIRTGPU_EXECBUF_FENCE_FD_IN    0x01
 #define VIRTGPU_EXECBUF_FENCE_FD_OUT   0x02
@@ -71,6 +72,9 @@ struct drm_virtgpu_execbuffer {
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
 #define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing  */
 
 struct drm_virtgpu_getparam {
        __u64 param;
@@ -101,9 +105,9 @@ struct drm_virtgpu_resource_info {
        __u32 res_handle;
        __u32 size;
        union {
-               __u32 stride;
+               __u32 blob_mem;
                __u32 strides[4]; /* strides[0] is accessible with stride. */
-       };
+       };
        __u32 num_planes;
        __u32 offsets[4];
        __u64 format_modifier;
@@ -123,6 +127,8 @@ struct drm_virtgpu_3d_transfer_to_host {
        struct drm_virtgpu_3d_box box;
        __u32 level;
        __u32 offset;
+       __u32 stride;
+       __u32 layer_stride;
 };
 
 struct drm_virtgpu_3d_transfer_from_host {
@@ -130,6 +136,8 @@ struct drm_virtgpu_3d_transfer_from_host {
        struct drm_virtgpu_3d_box box;
        __u32 level;
        __u32 offset;
+       __u32 stride;
+       __u32 layer_stride;
 };
 
 #define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -146,6 +154,31 @@ struct drm_virtgpu_get_caps {
        __u32 pad;
 };
 
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST              0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D             0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST       0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE       0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE      0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE   0x0004
+       /* zero is invalid blob_mem */
+       __u32 blob_mem;
+       __u32 blob_flags;
+       __u32 bo_handle;
+       __u32 res_handle;
+       __u64 size;
+
+       /*
+        * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+        * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+        */
+       __u32 pad;
+       __u32 cmd_size;
+       __u64 cmd;
+       __u64 blob_id;
+};
+
 #define DRM_IOCTL_VIRTGPU_MAP \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
@@ -181,6 +214,10 @@ struct drm_virtgpu_get_caps {
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
        struct drm_virtgpu_get_caps)
 
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB                         \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB,   \
+               struct drm_virtgpu_resource_create_blob)
+
 #if defined(__cplusplus)
 }
 #endif
index d0e477b..38d0249 100644 (file)
@@ -36,6 +36,9 @@ struct feature {
 enum feature_id {
        feat_3d,
        feat_capset_fix,
+       feat_resource_blob,
+       feat_host_visible,
+       feat_host_cross_device,
        feat_max,
 };
 
@@ -45,8 +48,11 @@ enum feature_id {
                x, #x, 0                                                                           \
        }
 
-static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES),
-                                    FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) };
+static struct feature features[] = {
+       FEATURE(VIRTGPU_PARAM_3D_FEATURES),   FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
+       FEATURE(VIRTGPU_PARAM_RESOURCE_BLOB), FEATURE(VIRTGPU_PARAM_HOST_VISIBLE),
+       FEATURE(VIRTGPU_PARAM_CROSS_DEVICE),
+};
 
 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
                                                  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
@@ -674,9 +680,79 @@ static void virtio_gpu_close(struct driver *drv)
        drv->priv = NULL;
 }
 
+static int virtio_gpu_bo_create_blob(struct driver *drv, struct bo *bo)
+{
+       int ret;
+       uint32_t stride;
+       uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
+       struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
+
+       stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
+       drv_bo_from_format(bo, stride, bo->meta.height, bo->meta.format);
+       bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
+       bo->meta.tiling = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+
+       cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
+       cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
+       cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
+       cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
+       cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
+       cmd[VIRGL_PIPE_RES_CREATE_BIND] = use_flags_to_bind(bo->meta.use_flags);
+       cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
+
+       drm_rc_blob.cmd = (uint64_t)&cmd;
+       drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
+       drm_rc_blob.size = bo->meta.total_size;
+       drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
+       drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE | VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+
+       ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+       if (ret < 0) {
+               drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
+               return -errno;
+       }
+
+       for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
+               bo->handles[plane].u32 = drm_rc_blob.bo_handle;
+
+       return 0;
+}
+
+static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
+{
+       struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv;
+
+       // TODO(gurchetansingh): remove once all minigbm users are blob-safe
+#ifndef VIRTIO_GPU_NEXT
+       return false;
+#endif
+
+       // Only use blob when host gbm is available
+       if (!priv->host_gbm_enabled)
+               return false;
+
+       // Focus on SW read/write apps for now
+       if (use_flags & (BO_USE_RENDERING | BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER))
+               return false;
+
+       // Simple, strictly defined formats for now
+       if (format != DRM_FORMAT_YVU420_ANDROID || format != DRM_FORMAT_R8)
+               return false;
+
+       if (use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR |
+                        BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ))
+               return true;
+
+       return false;
+}
+
 static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
                                uint64_t use_flags)
 {
+       if (features[feat_resource_blob].enabled && features[feat_host_visible].enabled &&
+           should_use_blob(bo->drv, format, use_flags))
+               return virtio_gpu_bo_create_blob(bo->drv, bo);
+
        if (features[feat_3d].enabled)
                return virtio_virgl_bo_create(bo, width, height, format, use_flags);
        else
@@ -716,6 +792,10 @@ static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
                                   BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0)
                return 0;
 
+       if (features[feat_resource_blob].enabled &&
+           (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+               return 0;
+
        memset(&xfer, 0, sizeof(xfer));
        xfer.bo_handle = mapping->vma->handle;
 
@@ -799,6 +879,10 @@ static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping)
        if (!(mapping->vma->map_flags & BO_MAP_WRITE))
                return 0;
 
+       if (features[feat_resource_blob].enabled &&
+           (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+               return 0;
+
        memset(&xfer, 0, sizeof(xfer));
        xfer.bo_handle = mapping->vma->handle;