linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/virtio: cleanup queue functions
       [not found] <20190813082509.29324-1-kraxel@redhat.com>
@ 2019-08-13  8:25 ` Gerd Hoffmann
  2019-08-13  8:25 ` [PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock Gerd Hoffmann
  1 sibling, 0 replies; 3+ messages in thread
From: Gerd Hoffmann @ 2019-08-13  8:25 UTC (permalink / raw)
  To: dri-devel
  Cc: olvaffe, Gerd Hoffmann, David Airlie, Daniel Vetter,
	open list:VIRTIO GPU DRIVER, open list

Make the queue functions return void, none of
the call sites checks the return value.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 drivers/gpu/drm/virtio/virtgpu_vq.c | 41 ++++++++++-------------------
 1 file changed, 14 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7ac20490e1b4..ca91e83ffaef 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -252,8 +252,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 	wake_up(&vgdev->cursorq.ack_queue);
 }
 
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
-					       struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+						struct virtio_gpu_vbuffer *vbuf)
 		__releases(&vgdev->ctrlq.qlock)
 		__acquires(&vgdev->ctrlq.qlock)
 {
@@ -263,7 +263,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 	int ret;
 
 	if (!vgdev->vqs_ready)
-		return -ENODEV;
+		return;
 
 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 	sgs[outcnt + incnt] = &vcmd;
@@ -294,30 +294,22 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 
 		virtqueue_kick(vq);
 	}
-
-	if (!ret)
-		ret = vq->num_free;
-	return ret;
 }
 
-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
-					struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+					 struct virtio_gpu_vbuffer *vbuf)
 {
-	int rc;
-
 	spin_lock(&vgdev->ctrlq.qlock);
-	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+	virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 	spin_unlock(&vgdev->ctrlq.qlock);
-	return rc;
 }
 
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
-					       struct virtio_gpu_vbuffer *vbuf,
-					       struct virtio_gpu_ctrl_hdr *hdr,
-					       struct virtio_gpu_fence *fence)
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+						struct virtio_gpu_vbuffer *vbuf,
+						struct virtio_gpu_ctrl_hdr *hdr,
+						struct virtio_gpu_fence *fence)
 {
 	struct virtqueue *vq = vgdev->ctrlq.vq;
-	int rc;
 
 again:
 	spin_lock(&vgdev->ctrlq.qlock);
@@ -338,13 +330,12 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 
 	if (fence)
 		virtio_gpu_fence_emit(vgdev, hdr, fence);
-	rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+	virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 	spin_unlock(&vgdev->ctrlq.qlock);
-	return rc;
 }
 
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
-				   struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+				    struct virtio_gpu_vbuffer *vbuf)
 {
 	struct virtqueue *vq = vgdev->cursorq.vq;
 	struct scatterlist *sgs[1], ccmd;
@@ -352,7 +343,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 	int outcnt;
 
 	if (!vgdev->vqs_ready)
-		return -ENODEV;
+		return;
 
 	sg_init_one(&ccmd, vbuf->buf, vbuf->size);
 	sgs[0] = &ccmd;
@@ -374,10 +365,6 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 	}
 
 	spin_unlock(&vgdev->cursorq.qlock);
-
-	if (!ret)
-		ret = vq->num_free;
-	return ret;
 }
 
 /* just create gem objects for userspace and long lived objects,
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock
       [not found] <20190813082509.29324-1-kraxel@redhat.com>
  2019-08-13  8:25 ` [PATCH 1/2] drm/virtio: cleanup queue functions Gerd Hoffmann
@ 2019-08-13  8:25 ` Gerd Hoffmann
  2019-08-27 16:45   ` Chia-I Wu
  1 sibling, 1 reply; 3+ messages in thread
From: Gerd Hoffmann @ 2019-08-13  8:25 UTC (permalink / raw)
  To: dri-devel
  Cc: olvaffe, Gerd Hoffmann, David Airlie, Daniel Vetter,
	open list:VIRTIO GPU DRIVER, open list

Split virtqueue_kick() call into virtqueue_kick_prepare(), which
requires serialization, and virtqueue_notify(), which does not.  Move
the virtqueue_notify() call out of the critical section protected by the
queue lock.  This avoids triggering a vmexit while holding the lock and
thereby fixes a rather bad spinlock contention.

Suggested-by: Chia-I Wu <olvaffe@gmail.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
---
 drivers/gpu/drm/virtio/virtgpu_vq.c | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index ca91e83ffaef..e41c96143342 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 	wake_up(&vgdev->cursorq.ack_queue);
 }
 
-static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 						struct virtio_gpu_vbuffer *vbuf)
 		__releases(&vgdev->ctrlq.qlock)
 		__acquires(&vgdev->ctrlq.qlock)
@@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 	struct virtqueue *vq = vgdev->ctrlq.vq;
 	struct scatterlist *sgs[3], vcmd, vout, vresp;
 	int outcnt = 0, incnt = 0;
+	bool notify = false;
 	int ret;
 
 	if (!vgdev->vqs_ready)
-		return;
+		return notify;
 
 	sg_init_one(&vcmd, vbuf->buf, vbuf->size);
 	sgs[outcnt + incnt] = &vcmd;
@@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 		trace_virtio_gpu_cmd_queue(vq,
 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 
-		virtqueue_kick(vq);
+		notify = virtqueue_kick_prepare(vq);
 	}
+	return notify;
 }
 
 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
 					 struct virtio_gpu_vbuffer *vbuf)
 {
+	bool notify;
+
 	spin_lock(&vgdev->ctrlq.qlock);
-	virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 	spin_unlock(&vgdev->ctrlq.qlock);
+	if (notify)
+		virtqueue_notify(vgdev->ctrlq.vq);
 }
 
 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
@@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 						struct virtio_gpu_fence *fence)
 {
 	struct virtqueue *vq = vgdev->ctrlq.vq;
+	bool notify;
 
 again:
 	spin_lock(&vgdev->ctrlq.qlock);
@@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
 
 	if (fence)
 		virtio_gpu_fence_emit(vgdev, hdr, fence);
-	virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+	notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
 	spin_unlock(&vgdev->ctrlq.qlock);
+	if (notify)
+		virtqueue_notify(vgdev->ctrlq.vq);
 }
 
 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 {
 	struct virtqueue *vq = vgdev->cursorq.vq;
 	struct scatterlist *sgs[1], ccmd;
+	bool notify;
 	int ret;
 	int outcnt;
 
@@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
 		trace_virtio_gpu_cmd_queue(vq,
 			(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
 
-		virtqueue_kick(vq);
+		notify = virtqueue_kick_prepare(vq);
 	}
 
 	spin_unlock(&vgdev->cursorq.qlock);
+
+	if (notify)
+		virtqueue_notify(vq);
 }
 
 /* just create gem objects for userspace and long lived objects,
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock
  2019-08-13  8:25 ` [PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock Gerd Hoffmann
@ 2019-08-27 16:45   ` Chia-I Wu
  0 siblings, 0 replies; 3+ messages in thread
From: Chia-I Wu @ 2019-08-27 16:45 UTC (permalink / raw)
  To: Gerd Hoffmann
  Cc: ML dri-devel, David Airlie, Daniel Vetter,
	open list:VIRTIO GPU DRIVER, open list

On Tue, Aug 13, 2019 at 1:25 AM Gerd Hoffmann <kraxel@redhat.com> wrote:
>
> Split virtqueue_kick() call into virtqueue_kick_prepare(), which
> requires serialization, and virtqueue_notify(), which does not.  Move
> the virtqueue_notify() call out of the critical section protected by the
> queue lock.  This avoids triggering a vmexit while holding the lock and
> thereby fixes a rather bad spinlock contention.
>
> Suggested-by: Chia-I Wu <olvaffe@gmail.com>
> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Series is

Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
> ---
>  drivers/gpu/drm/virtio/virtgpu_vq.c | 25 +++++++++++++++++++------
>  1 file changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index ca91e83ffaef..e41c96143342 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -252,7 +252,7 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
>         wake_up(&vgdev->cursorq.ack_queue);
>  }
>
> -static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
> +static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
>                                                 struct virtio_gpu_vbuffer *vbuf)
>                 __releases(&vgdev->ctrlq.qlock)
>                 __acquires(&vgdev->ctrlq.qlock)
> @@ -260,10 +260,11 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
>         struct virtqueue *vq = vgdev->ctrlq.vq;
>         struct scatterlist *sgs[3], vcmd, vout, vresp;
>         int outcnt = 0, incnt = 0;
> +       bool notify = false;
>         int ret;
>
>         if (!vgdev->vqs_ready)
> -               return;
> +               return notify;
>
>         sg_init_one(&vcmd, vbuf->buf, vbuf->size);
>         sgs[outcnt + incnt] = &vcmd;
> @@ -292,16 +293,21 @@ static void virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
>                 trace_virtio_gpu_cmd_queue(vq,
>                         (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
>
> -               virtqueue_kick(vq);
> +               notify = virtqueue_kick_prepare(vq);
>         }
> +       return notify;
>  }
>
>  static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
>                                          struct virtio_gpu_vbuffer *vbuf)
>  {
> +       bool notify;
> +
>         spin_lock(&vgdev->ctrlq.qlock);
> -       virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
> +       notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
>         spin_unlock(&vgdev->ctrlq.qlock);
> +       if (notify)
> +               virtqueue_notify(vgdev->ctrlq.vq);
>  }
>
>  static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
> @@ -310,6 +316,7 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
>                                                 struct virtio_gpu_fence *fence)
>  {
>         struct virtqueue *vq = vgdev->ctrlq.vq;
> +       bool notify;
>
>  again:
>         spin_lock(&vgdev->ctrlq.qlock);
> @@ -330,8 +337,10 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
>
>         if (fence)
>                 virtio_gpu_fence_emit(vgdev, hdr, fence);
> -       virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
> +       notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
>         spin_unlock(&vgdev->ctrlq.qlock);
> +       if (notify)
> +               virtqueue_notify(vgdev->ctrlq.vq);
>  }
>
>  static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
> @@ -339,6 +348,7 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
>  {
>         struct virtqueue *vq = vgdev->cursorq.vq;
>         struct scatterlist *sgs[1], ccmd;
> +       bool notify;
>         int ret;
>         int outcnt;
>
> @@ -361,10 +371,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
>                 trace_virtio_gpu_cmd_queue(vq,
>                         (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
>
> -               virtqueue_kick(vq);
> +               notify = virtqueue_kick_prepare(vq);
>         }
>
>         spin_unlock(&vgdev->cursorq.qlock);
> +
> +       if (notify)
> +               virtqueue_notify(vq);
>  }
>
>  /* just create gem objects for userspace and long lived objects,
> --
> 2.18.1
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-08-27 16:45 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20190813082509.29324-1-kraxel@redhat.com>
2019-08-13  8:25 ` [PATCH 1/2] drm/virtio: cleanup queue functions Gerd Hoffmann
2019-08-13  8:25 ` [PATCH 2/2] drm/virtio: notify virtqueues without holding spinlock Gerd Hoffmann
2019-08-27 16:45   ` Chia-I Wu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).