OSDN Git Service

drm/virtio: notify virtqueues without holding spinlock
authorGerd Hoffmann <kraxel@redhat.com>
Tue, 13 Aug 2019 08:25:09 +0000 (10:25 +0200)
committerGerd Hoffmann <kraxel@redhat.com>
Wed, 28 Aug 2019 07:28:06 +0000 (09:28 +0200)
Split virtqueue_kick() call into virtqueue_kick_prepare(), which
requires serialization, and virtqueue_notify(), which does not.  Move
the virtqueue_notify() call out of the critical section protected by the
queue lock.  This avoids triggering a vmexit while holding the lock and
thereby fixes a rather bad spinlock contention.

Suggested-by: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190813082509.29324-3-kraxel@redhat.com
drivers/gpu/drm/virtio/virtgpu_vq.c

index ca91e83..e41c961 100644 (file)
@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
        wake_up(&vgdev->cursorq.ack_queue);
 }
 
-static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
                                                struct virtio_gpu_vbuffer *vbuf)
                __releases(&vgdev->ctrlq.qlock)
                __acquires(&vgdev->ctrlq.qlock)
@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
        struct virtqueue *vq = vgdev->ctrlq.vq;
        struct scatterlist *sgs[3], vcmd, vout, vresp;
        int outcnt = 0, incnt = 0;
+       bool notify = false;
        int ret;
 
        if (!vgdev->vqs_ready)
-               return;
+               return notify;
 
        sg_init_one(&vcmd, vbuf->buf, vbuf->size);
        sgs[outcnt + incnt] = &vcmd;
@@ -292,16 +293,21 @@ retry:
                trace_virtio_gpu_cmd_queue(vq,
                        (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 
-               virtqueue_kick(vq);
+               notify = virtqueue_kick_prepare(vq);
        }
+       return notify;
 }
 
 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
                                         struct virtio_gpu_vbuffer *vbuf)
 {
+       bool notify;
+
        spin_lock(&vgdev->ctrlq.qlock);
-       virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+       notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
        spin_unlock(&vgdev->ctrlq.qlock);
+       if (notify)
+               virtqueue_notify(vgdev->ctrlq.vq);
 }
 
 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
@@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
                                                struct virtio_gpu_fence *fence)
 {
        struct virtqueue *vq = vgdev->ctrlq.vq;
+       bool notify;
 
 again:
        spin_lock(&vgdev->ctrlq.qlock);
@@ -330,8 +337,10 @@ again:
 
        if (fence)
                virtio_gpu_fence_emit(vgdev, hdr, fence);
-       virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+       notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
        spin_unlock(&vgdev->ctrlq.qlock);
+       if (notify)
+               virtqueue_notify(vgdev->ctrlq.vq);
 }
 
 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 {
        struct virtqueue *vq = vgdev->cursorq.vq;
        struct scatterlist *sgs[1], ccmd;
+       bool notify;
        int ret;
        int outcnt;
 
@@ -361,10 +371,13 @@ retry:
                trace_virtio_gpu_cmd_queue(vq,
                        (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 
-               virtqueue_kick(vq);
+               notify = virtqueue_kick_prepare(vq);
        }
 
        spin_unlock(&vgdev->cursorq.qlock);
+
+       if (notify)
+               virtqueue_notify(vq);
 }
 
 /* just create gem objects for userspace and long lived objects,