All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/7] vhost flush cleanups
@ 2021-12-07  2:45 Mike Christie
  2021-12-07  2:45 ` [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper Mike Christie
                   ` (6 more replies)
  0 siblings, 7 replies; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

The following patches are Andrey Ryabinin's flush cleanups and some
from me. They reduce the number of flush calls and remove some bogus
ones where we don't even have a worker running anymore.

I wanted to send these patches now, because my vhost threading patches
have conflicts and are now built over them, and they seem like nice
cleanups in general.


_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-08  3:49   ` Jason Wang
  2021-12-07  2:45 ` [PATCH 2/7] vhost: flush dev once during vhost_dev_stop Mike Christie
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

vhost_poll_flush() is a simple wrapper around vhost_work_dev_flush().
It gives wrong impression that we are doing some work over vhost_poll,
while in fact it flushes vhost_poll->dev.
It only complicate understanding of the code and leads to mistakes
like flushing the same vhost_dev several times in a row.

Just remove vhost_poll_flush() and call vhost_work_dev_flush() directly.

Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
[merge vhost_poll_flush removal from Stefano Garzarella]
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/net.c   |  4 ++--
 drivers/vhost/test.c  |  2 +-
 drivers/vhost/vhost.c | 12 ++----------
 drivers/vhost/vhost.h |  1 -
 drivers/vhost/vsock.c |  2 +-
 5 files changed, 6 insertions(+), 15 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 28ef323882fb..11221f6d11b8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1375,8 +1375,8 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
 
 static void vhost_net_flush_vq(struct vhost_net *n, int index)
 {
-	vhost_poll_flush(n->poll + index);
-	vhost_poll_flush(&n->vqs[index].vq.poll);
+	vhost_work_dev_flush(n->poll[index].dev);
+	vhost_work_dev_flush(n->vqs[index].vq.poll.dev);
 }
 
 static void vhost_net_flush(struct vhost_net *n)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a09dedc79f68..1a8ab1d8cb1c 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -146,7 +146,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
 
 static void vhost_test_flush_vq(struct vhost_test *n, int index)
 {
-	vhost_poll_flush(&n->vqs[index].poll);
+	vhost_work_dev_flush(n->vqs[index].poll.dev);
 }
 
 static void vhost_test_flush(struct vhost_test *n)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 8cf259d798c0..7346fa519eb6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -244,14 +244,6 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
 }
 EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
 
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
-{
-	vhost_work_dev_flush(poll->dev);
-}
-EXPORT_SYMBOL_GPL(vhost_poll_flush);
-
 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 {
 	if (!dev->worker)
@@ -677,7 +669,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
 	for (i = 0; i < dev->nvqs; ++i) {
 		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 			vhost_poll_stop(&dev->vqs[i]->poll);
-			vhost_poll_flush(&dev->vqs[i]->poll);
+			vhost_work_dev_flush(dev->vqs[i]->poll.dev);
 		}
 	}
 }
@@ -1721,7 +1713,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
 	mutex_unlock(&vq->mutex);
 
 	if (pollstop && vq->handle_kick)
-		vhost_poll_flush(&vq->poll);
+		vhost_work_dev_flush(vq->poll.dev);
 	return r;
 }
 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 09748694cb66..67b23e178812 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -56,7 +56,6 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 		     __poll_t mask, struct vhost_dev *dev);
 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
-void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
 void vhost_work_dev_flush(struct vhost_dev *dev);
 
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d6ca1c7ad513..2339587bcd31 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -707,7 +707,7 @@ static void vhost_vsock_flush(struct vhost_vsock *vsock)
 
 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
 		if (vsock->vqs[i].handle_kick)
-			vhost_poll_flush(&vsock->vqs[i].poll);
+			vhost_work_dev_flush(vsock->vqs[i].poll.dev);
 	vhost_work_dev_flush(&vsock->dev);
 }
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/7] vhost: flush dev once during vhost_dev_stop
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
  2021-12-07  2:45 ` [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-07  2:45 ` [PATCH 3/7] vhost_net: get rid of vhost_net_flush_vq() and extra flush calls Mike Christie
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

When vhost_work_dev_flush returns all work queued at that time will have
completed. There is then no need to flush after every vhost_poll_stop
call, and we can move the flush call to after the loop that stops the
pollers.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/vhost.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 7346fa519eb6..17e5956e7424 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -667,11 +667,11 @@ void vhost_dev_stop(struct vhost_dev *dev)
 	int i;
 
 	for (i = 0; i < dev->nvqs; ++i) {
-		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
+		if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
 			vhost_poll_stop(&dev->vqs[i]->poll);
-			vhost_work_dev_flush(dev->vqs[i]->poll.dev);
-		}
 	}
+
+	vhost_work_dev_flush(dev);
 }
 EXPORT_SYMBOL_GPL(vhost_dev_stop);
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/7] vhost_net: get rid of vhost_net_flush_vq() and extra flush calls
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
  2021-12-07  2:45 ` [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper Mike Christie
  2021-12-07  2:45 ` [PATCH 2/7] vhost: flush dev once during vhost_dev_stop Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-07  2:45 ` [PATCH 4/7] vhost_test: remove vhost_test_flush_vq() Mike Christie
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

From: Andrey Ryabinin <arbn@yandex-team.com>

vhost_net_flush_vq() calls vhost_work_dev_flush() twice passing
vhost_dev pointer obtained via 'n->poll[index].dev' and
'n->vqs[index].vq.poll.dev'. This is actually the same pointer,
initialized in vhost_net_open()/vhost_dev_init()/vhost_poll_init()

Remove vhost_net_flush_vq() and call vhost_work_dev_flush() directly.
Do the flushes only once instead of several flush calls in a row
which seems rather useless.

Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
[drop vhost_dev forward declaration in vhost.h]
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/net.c | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 11221f6d11b8..b1feb5e0571e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1373,16 +1373,9 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
 }
 
-static void vhost_net_flush_vq(struct vhost_net *n, int index)
-{
-	vhost_work_dev_flush(n->poll[index].dev);
-	vhost_work_dev_flush(n->vqs[index].vq.poll.dev);
-}
-
 static void vhost_net_flush(struct vhost_net *n)
 {
-	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
-	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+	vhost_work_dev_flush(&n->dev);
 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 		n->tx_flush = true;
@@ -1572,7 +1565,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 	}
 
 	if (oldsock) {
-		vhost_net_flush_vq(n, index);
+		vhost_work_dev_flush(&n->dev);
 		sockfd_put(oldsock);
 	}
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 4/7] vhost_test: remove vhost_test_flush_vq()
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
                   ` (2 preceding siblings ...)
  2021-12-07  2:45 ` [PATCH 3/7] vhost_net: get rid of vhost_net_flush_vq() and extra flush calls Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-07  2:45 ` [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush() Mike Christie
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

From: Andrey Ryabinin <arbn@yandex-team.com>

vhost_test_flush_vq() just a simple wrapper around vhost_work_dev_flush()
which seems have no value. It's just easier to call vhost_work_dev_flush()
directly. Besides there is no point in obtaining vhost_dev pointer
via 'n->vqs[index].poll.dev' while we can just use &n->dev.
It's the same pointers, see vhost_test_open()/vhost_dev_init().

Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/test.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 1a8ab1d8cb1c..d4f63068d762 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -144,14 +144,9 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
 	*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
 }
 
-static void vhost_test_flush_vq(struct vhost_test *n, int index)
-{
-	vhost_work_dev_flush(n->vqs[index].poll.dev);
-}
-
 static void vhost_test_flush(struct vhost_test *n)
 {
-	vhost_test_flush_vq(n, VHOST_TEST_VQ);
+	vhost_work_dev_flush(&n->dev);
 }
 
 static int vhost_test_release(struct inode *inode, struct file *f)
@@ -209,7 +204,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
 			goto err;
 
 		if (oldpriv) {
-			vhost_test_flush_vq(n, index);
+			vhost_test_flush(n, index);
 		}
 	}
 
@@ -302,7 +297,7 @@ static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
 	mutex_unlock(&vq->mutex);
 
 	if (enable) {
-		vhost_test_flush_vq(n, index);
+		vhost_test_flush(n);
 	}
 
 	mutex_unlock(&n->dev.mutex);
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush()
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
                   ` (3 preceding siblings ...)
  2021-12-07  2:45 ` [PATCH 4/7] vhost_test: remove vhost_test_flush_vq() Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-08  3:53   ` Jason Wang
  2021-12-07  2:45 ` [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup Mike Christie
  2021-12-07  2:45 ` [PATCH 7/7] vhost-test: " Mike Christie
  6 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

From: Andrey Ryabinin <arbn@yandex-team.com>

vhost_vsock_flush() calls vhost_work_dev_flush(vsock->vqs[i].poll.dev)
before vhost_work_dev_flush(&vsock->dev). This seems pointless
as vsock->vqs[i].poll.dev is the same as &vsock->dev and several flushes
in a row doesn't do anything useful, one is just enough.

Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/vsock.c | 5 -----
 1 file changed, 5 deletions(-)

diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 2339587bcd31..1f38160b249d 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -703,11 +703,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
 
 static void vhost_vsock_flush(struct vhost_vsock *vsock)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
-		if (vsock->vqs[i].handle_kick)
-			vhost_work_dev_flush(vsock->vqs[i].poll.dev);
 	vhost_work_dev_flush(&vsock->dev);
 }
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
                   ` (4 preceding siblings ...)
  2021-12-07  2:45 ` [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush() Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-08  4:07   ` Jason Wang
  2021-12-07  2:45 ` [PATCH 7/7] vhost-test: " Mike Christie
  6 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

The flush after vhost_dev_cleanup is not needed because:

1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
so the flush call will just return since the worker has not device.

2. It's not needed for the re-queue case. vhost_scsi_evt_handle_kick grabs
the mutex and if the backend is NULL will return without queueing a work.
vhost_scsi_clear_endpoint will set the backend to NULL under the vq->mutex
then drops the mutex and does a flush. So we know when
vhost_scsi_clear_endpoint has dropped the mutex after clearing the backend
no evt related work will be able to requeue. The flush would then make sure
any queued evts are run and return.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/scsi.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 532e204f2b1b..94535c813ef7 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
 	vhost_scsi_clear_endpoint(vs, &t);
 	vhost_dev_stop(&vs->dev);
 	vhost_dev_cleanup(&vs->dev);
-	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
-	vhost_scsi_flush(vs);
 	kfree(vs->dev.vqs);
 	kvfree(vs);
 	return 0;
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 7/7] vhost-test: drop flush after vhost_dev_cleanup
  2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
                   ` (5 preceding siblings ...)
  2021-12-07  2:45 ` [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup Mike Christie
@ 2021-12-07  2:45 ` Mike Christie
  2021-12-08  4:09   ` Jason Wang
  6 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-07  2:45 UTC (permalink / raw)
  To: stefanha, jasowang, mst, sgarzare, virtualization, arbn

The flush after vhost_dev_cleanup is not needed because:

1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
so the flush call will just return since the worker has not device.

2. It's not needed. The comment about jobs re-queueing themselves does
not look correct because handle_vq does not requeue work.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/vhost/test.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index d4f63068d762..57e24eceff27 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -158,9 +158,6 @@ static int vhost_test_release(struct inode *inode, struct file *f)
 	vhost_test_flush(n);
 	vhost_dev_stop(&n->dev);
 	vhost_dev_cleanup(&n->dev);
-	/* We do an extra flush before freeing memory,
-	 * since jobs can re-queue themselves. */
-	vhost_test_flush(n);
 	kfree(n);
 	return 0;
 }
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper
  2021-12-07  2:45 ` [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper Mike Christie
@ 2021-12-08  3:49   ` Jason Wang
  2021-12-08 16:41     ` Mike Christie
  0 siblings, 1 reply; 18+ messages in thread
From: Jason Wang @ 2021-12-08  3:49 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> vhost_poll_flush() is a simple wrapper around vhost_work_dev_flush().
> It gives wrong impression that we are doing some work over vhost_poll,
> while in fact it flushes vhost_poll->dev.

This "problem" is a byproduct of 7235acdb1144 ("vhost: simplify work flushing").

Before that we indeed have per poll flush flush.

> It only complicate understanding of the code and leads to mistakes
> like flushing the same vhost_dev several times in a row.
>
> Just remove vhost_poll_flush() and call vhost_work_dev_flush() directly.

Not a native speaker but since we don't have an per work flush, is it
better to rename this simply as vhost_flush()?

Thanks

>
> Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
> [merge vhost_poll_flush removal from Stefano Garzarella]
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>  drivers/vhost/net.c   |  4 ++--
>  drivers/vhost/test.c  |  2 +-
>  drivers/vhost/vhost.c | 12 ++----------
>  drivers/vhost/vhost.h |  1 -
>  drivers/vhost/vsock.c |  2 +-
>  5 files changed, 6 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index 28ef323882fb..11221f6d11b8 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -1375,8 +1375,8 @@ static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
>
>  static void vhost_net_flush_vq(struct vhost_net *n, int index)
>  {
> -       vhost_poll_flush(n->poll + index);
> -       vhost_poll_flush(&n->vqs[index].vq.poll);
> +       vhost_work_dev_flush(n->poll[index].dev);
> +       vhost_work_dev_flush(n->vqs[index].vq.poll.dev);
>  }
>
>  static void vhost_net_flush(struct vhost_net *n)
> diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
> index a09dedc79f68..1a8ab1d8cb1c 100644
> --- a/drivers/vhost/test.c
> +++ b/drivers/vhost/test.c
> @@ -146,7 +146,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
>
>  static void vhost_test_flush_vq(struct vhost_test *n, int index)
>  {
> -       vhost_poll_flush(&n->vqs[index].poll);
> +       vhost_work_dev_flush(n->vqs[index].poll.dev);
>  }
>
>  static void vhost_test_flush(struct vhost_test *n)
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 8cf259d798c0..7346fa519eb6 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -244,14 +244,6 @@ void vhost_work_dev_flush(struct vhost_dev *dev)
>  }
>  EXPORT_SYMBOL_GPL(vhost_work_dev_flush);
>
> -/* Flush any work that has been scheduled. When calling this, don't hold any
> - * locks that are also used by the callback. */
> -void vhost_poll_flush(struct vhost_poll *poll)
> -{
> -       vhost_work_dev_flush(poll->dev);
> -}
> -EXPORT_SYMBOL_GPL(vhost_poll_flush);
> -
>  void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
>  {
>         if (!dev->worker)
> @@ -677,7 +669,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
>         for (i = 0; i < dev->nvqs; ++i) {
>                 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
>                         vhost_poll_stop(&dev->vqs[i]->poll);
> -                       vhost_poll_flush(&dev->vqs[i]->poll);
> +                       vhost_work_dev_flush(dev->vqs[i]->poll.dev);
>                 }
>         }
>  }
> @@ -1721,7 +1713,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
>         mutex_unlock(&vq->mutex);
>
>         if (pollstop && vq->handle_kick)
> -               vhost_poll_flush(&vq->poll);
> +               vhost_work_dev_flush(vq->poll.dev);
>         return r;
>  }
>  EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 09748694cb66..67b23e178812 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -56,7 +56,6 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
>                      __poll_t mask, struct vhost_dev *dev);
>  int vhost_poll_start(struct vhost_poll *poll, struct file *file);
>  void vhost_poll_stop(struct vhost_poll *poll);
> -void vhost_poll_flush(struct vhost_poll *poll);
>  void vhost_poll_queue(struct vhost_poll *poll);
>  void vhost_work_dev_flush(struct vhost_dev *dev);
>
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index d6ca1c7ad513..2339587bcd31 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -707,7 +707,7 @@ static void vhost_vsock_flush(struct vhost_vsock *vsock)
>
>         for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
>                 if (vsock->vqs[i].handle_kick)
> -                       vhost_poll_flush(&vsock->vqs[i].poll);
> +                       vhost_work_dev_flush(vsock->vqs[i].poll.dev);
>         vhost_work_dev_flush(&vsock->dev);
>  }
>
> --
> 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush()
  2021-12-07  2:45 ` [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush() Mike Christie
@ 2021-12-08  3:53   ` Jason Wang
  2021-12-08 17:35     ` michael.christie
  0 siblings, 1 reply; 18+ messages in thread
From: Jason Wang @ 2021-12-08  3:53 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> From: Andrey Ryabinin <arbn@yandex-team.com>
>
> vhost_vsock_flush() calls vhost_work_dev_flush(vsock->vqs[i].poll.dev)
> before vhost_work_dev_flush(&vsock->dev). This seems pointless
> as vsock->vqs[i].poll.dev is the same as &vsock->dev and several flushes
> in a row doesn't do anything useful, one is just enough.
>
> Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>  drivers/vhost/vsock.c | 5 -----
>  1 file changed, 5 deletions(-)
>
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index 2339587bcd31..1f38160b249d 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -703,11 +703,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
>
>  static void vhost_vsock_flush(struct vhost_vsock *vsock)
>  {
> -       int i;
> -
> -       for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
> -               if (vsock->vqs[i].handle_kick)
> -                       vhost_work_dev_flush(vsock->vqs[i].poll.dev);
>         vhost_work_dev_flush(&vsock->dev);
>  }

Is this better to be consistent with vhost-net so that we can simply
remove the vhost_vsock_flush() wrapper here.

Thanks

>
> --
> 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup
  2021-12-07  2:45 ` [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup Mike Christie
@ 2021-12-08  4:07   ` Jason Wang
  2021-12-08 16:45     ` Mike Christie
  0 siblings, 1 reply; 18+ messages in thread
From: Jason Wang @ 2021-12-08  4:07 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> The flush after vhost_dev_cleanup is not needed because:
>
> 1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
> so the flush call will just return since the worker has not device.
>
> 2. It's not needed for the re-queue case. vhost_scsi_evt_handle_kick grabs
> the mutex and if the backend is NULL will return without queueing a work.
> vhost_scsi_clear_endpoint will set the backend to NULL under the vq->mutex
> then drops the mutex and does a flush. So we know when
> vhost_scsi_clear_endpoint has dropped the mutex after clearing the backend
> no evt related work will be able to requeue. The flush would then make sure
> any queued evts are run and return.

What happens if a kick after vhost_scsi_clear_endpoint() but before
vhost_dev_stop()?

Is this safe or the kthread_stop() in vhost_dev_cleanup() makes us safe?

Thanks

>
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>  drivers/vhost/scsi.c | 2 --
>  1 file changed, 2 deletions(-)
>
> diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
> index 532e204f2b1b..94535c813ef7 100644
> --- a/drivers/vhost/scsi.c
> +++ b/drivers/vhost/scsi.c
> @@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
>         vhost_scsi_clear_endpoint(vs, &t);
>         vhost_dev_stop(&vs->dev);
>         vhost_dev_cleanup(&vs->dev);
> -       /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
> -       vhost_scsi_flush(vs);
>         kfree(vs->dev.vqs);
>         kvfree(vs);
>         return 0;
> --
> 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 7/7] vhost-test: drop flush after vhost_dev_cleanup
  2021-12-07  2:45 ` [PATCH 7/7] vhost-test: " Mike Christie
@ 2021-12-08  4:09   ` Jason Wang
  0 siblings, 0 replies; 18+ messages in thread
From: Jason Wang @ 2021-12-08  4:09 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> The flush after vhost_dev_cleanup is not needed because:
>
> 1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
> so the flush call will just return since the worker has not device.
>
> 2. It's not needed. The comment about jobs re-queueing themselves does
> not look correct because handle_vq does not requeue work.

Similar to the previous patch, I wonder if it's the credit from
vhost_dev_cleanup().

Thanks

>
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>  drivers/vhost/test.c | 3 ---
>  1 file changed, 3 deletions(-)
>
> diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
> index d4f63068d762..57e24eceff27 100644
> --- a/drivers/vhost/test.c
> +++ b/drivers/vhost/test.c
> @@ -158,9 +158,6 @@ static int vhost_test_release(struct inode *inode, struct file *f)
>         vhost_test_flush(n);
>         vhost_dev_stop(&n->dev);
>         vhost_dev_cleanup(&n->dev);
> -       /* We do an extra flush before freeing memory,
> -        * since jobs can re-queue themselves. */
> -       vhost_test_flush(n);
>         kfree(n);
>         return 0;
>  }
> --
> 2.25.1
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper
  2021-12-08  3:49   ` Jason Wang
@ 2021-12-08 16:41     ` Mike Christie
  2021-12-09  1:32       ` Jason Wang
  0 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-08 16:41 UTC (permalink / raw)
  To: Jason Wang; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On 12/7/21 9:49 PM, Jason Wang wrote:
> On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> <michael.christie@oracle.com> wrote:
>>
>> vhost_poll_flush() is a simple wrapper around vhost_work_dev_flush().
>> It gives wrong impression that we are doing some work over vhost_poll,
>> while in fact it flushes vhost_poll->dev.
> 
> This "problem" is a byproduct of 7235acdb1144 ("vhost: simplify work flushing").
> 
> Before that we indeed have per poll flush flush.
> 
>> It only complicate understanding of the code and leads to mistakes
>> like flushing the same vhost_dev several times in a row.
>>
>> Just remove vhost_poll_flush() and call vhost_work_dev_flush() directly.
> 
> Not a native speaker but since we don't have an per work flush, is it
> better to rename this simply as vhost_flush()?
> 

What about vhost_dev_flush?

For the existing naming when we have a function exported we tend to have
"vhost_" then the object/struct it works on then the action.

For work we have:

vhost_work_queue/init

(we also have vhost_has_work which doesn't follow that pattern but
would sound strange as vhost_work_has so ignore that one).

For dev operations we have:

vhost_dev_reset_owner/set_owner/has_owner/cleanup/init

For the flush operation I wanted it to reflect it flushed all work
on the device, so I mashed up the work and dev naming above and
I agree it's a little strange.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup
  2021-12-08  4:07   ` Jason Wang
@ 2021-12-08 16:45     ` Mike Christie
  2021-12-09  1:35       ` Jason Wang
  0 siblings, 1 reply; 18+ messages in thread
From: Mike Christie @ 2021-12-08 16:45 UTC (permalink / raw)
  To: Jason Wang; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On 12/7/21 10:07 PM, Jason Wang wrote:
> On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> <michael.christie@oracle.com> wrote:
>>
>> The flush after vhost_dev_cleanup is not needed because:
>>
>> 1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
>> so the flush call will just return since the worker has not device.
>>
>> 2. It's not needed for the re-queue case. vhost_scsi_evt_handle_kick grabs
>> the mutex and if the backend is NULL will return without queueing a work.
>> vhost_scsi_clear_endpoint will set the backend to NULL under the vq->mutex
>> then drops the mutex and does a flush. So we know when
>> vhost_scsi_clear_endpoint has dropped the mutex after clearing the backend
>> no evt related work will be able to requeue. The flush would then make sure
>> any queued evts are run and return.
> 
> What happens if a kick after vhost_scsi_clear_endpoint() but before
> vhost_dev_stop()?

vhost_dev_stop also does a flush, so:

1. The kick handler would see the backend is null and return immediately.
2. The flush in vhost_dev_stop would wait for those kicks in #1 to complete.


> 
> Is this safe or the kthread_stop() in vhost_dev_cleanup() makes us safe?
> 
> Thanks
> 
>>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>>  drivers/vhost/scsi.c | 2 --
>>  1 file changed, 2 deletions(-)
>>
>> diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
>> index 532e204f2b1b..94535c813ef7 100644
>> --- a/drivers/vhost/scsi.c
>> +++ b/drivers/vhost/scsi.c
>> @@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
>>         vhost_scsi_clear_endpoint(vs, &t);
>>         vhost_dev_stop(&vs->dev);
>>         vhost_dev_cleanup(&vs->dev);
>> -       /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
>> -       vhost_scsi_flush(vs);
>>         kfree(vs->dev.vqs);
>>         kvfree(vs);
>>         return 0;
>> --
>> 2.25.1
>>
> 

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush()
  2021-12-08  3:53   ` Jason Wang
@ 2021-12-08 17:35     ` michael.christie
  2021-12-09  1:35       ` Jason Wang
  0 siblings, 1 reply; 18+ messages in thread
From: michael.christie @ 2021-12-08 17:35 UTC (permalink / raw)
  To: Jason Wang; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On 12/7/21 9:53 PM, Jason Wang wrote:
> On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> <michael.christie@oracle.com> wrote:
>>
>> From: Andrey Ryabinin <arbn@yandex-team.com>
>>
>> vhost_vsock_flush() calls vhost_work_dev_flush(vsock->vqs[i].poll.dev)
>> before vhost_work_dev_flush(&vsock->dev). This seems pointless
>> as vsock->vqs[i].poll.dev is the same as &vsock->dev and several flushes
>> in a row doesn't do anything useful, one is just enough.
>>
>> Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
>> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>>  drivers/vhost/vsock.c | 5 -----
>>  1 file changed, 5 deletions(-)
>>
>> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
>> index 2339587bcd31..1f38160b249d 100644
>> --- a/drivers/vhost/vsock.c
>> +++ b/drivers/vhost/vsock.c
>> @@ -703,11 +703,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
>>
>>  static void vhost_vsock_flush(struct vhost_vsock *vsock)
>>  {
>> -       int i;
>> -
>> -       for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
>> -               if (vsock->vqs[i].handle_kick)
>> -                       vhost_work_dev_flush(vsock->vqs[i].poll.dev);
>>         vhost_work_dev_flush(&vsock->dev);
>>  }
> 
> Is this better to be consistent with vhost-net so that we can simply
> remove the vhost_vsock_flush() wrapper here.
> 

I didn't understand that comment.

Did you mean consistent as they both have vhost_vsock/net_flush functions
or as in they prefer to not have one line wrappers?

Before and after this patchset, both net and vsock have a vhost_vsock/net_flush
function, so maybe you didn't mean that.

I think the wrapper is not very useful and could be removed. However,
I thought vsock preferred wrappers because we have vhost_vsock_free
which is just a wrapper around kfree. I also noticed test.c is a
fan of one line wrappers, but I see net and scsi do not do that.
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper
  2021-12-08 16:41     ` Mike Christie
@ 2021-12-09  1:32       ` Jason Wang
  0 siblings, 0 replies; 18+ messages in thread
From: Jason Wang @ 2021-12-09  1:32 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Thu, Dec 9, 2021 at 12:41 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> On 12/7/21 9:49 PM, Jason Wang wrote:
> > On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> > <michael.christie@oracle.com> wrote:
> >>
> >> vhost_poll_flush() is a simple wrapper around vhost_work_dev_flush().
> >> It gives wrong impression that we are doing some work over vhost_poll,
> >> while in fact it flushes vhost_poll->dev.
> >
> > This "problem" is a byproduct of 7235acdb1144 ("vhost: simplify work flushing").
> >
> > Before that we indeed have per poll flush flush.
> >
> >> It only complicate understanding of the code and leads to mistakes
> >> like flushing the same vhost_dev several times in a row.
> >>
> >> Just remove vhost_poll_flush() and call vhost_work_dev_flush() directly.
> >
> > Not a native speaker but since we don't have an per work flush, is it
> > better to rename this simply as vhost_flush()?
> >
>
> What about vhost_dev_flush?
>
> For the existing naming when we have a function exported we tend to have
> "vhost_" then the object/struct it works on then the action.
>
> For work we have:
>
> vhost_work_queue/init
>
> (we also have vhost_has_work which doesn't follow that pattern but
> would sound strange as vhost_work_has so ignore that one).
>
> For dev operations we have:
>
> vhost_dev_reset_owner/set_owner/has_owner/cleanup/init
>
> For the flush operation I wanted it to reflect it flushed all work
> on the device, so I mashed up the work and dev naming above and
> I agree it's a little strange.

It looks fine to me.

Thanks

>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup
  2021-12-08 16:45     ` Mike Christie
@ 2021-12-09  1:35       ` Jason Wang
  0 siblings, 0 replies; 18+ messages in thread
From: Jason Wang @ 2021-12-09  1:35 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Thu, Dec 9, 2021 at 12:45 AM Mike Christie
<michael.christie@oracle.com> wrote:
>
> On 12/7/21 10:07 PM, Jason Wang wrote:
> > On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> > <michael.christie@oracle.com> wrote:
> >>
> >> The flush after vhost_dev_cleanup is not needed because:
> >>
> >> 1. It doesn't do anything. vhost_dev_cleanup will stop the worker thread
> >> so the flush call will just return since the worker has not device.
> >>
> >> 2. It's not needed for the re-queue case. vhost_scsi_evt_handle_kick grabs
> >> the mutex and if the backend is NULL will return without queueing a work.
> >> vhost_scsi_clear_endpoint will set the backend to NULL under the vq->mutex
> >> then drops the mutex and does a flush. So we know when
> >> vhost_scsi_clear_endpoint has dropped the mutex after clearing the backend
> >> no evt related work will be able to requeue. The flush would then make sure
> >> any queued evts are run and return.
> >
> > What happens if a kick after vhost_scsi_clear_endpoint() but before
> > vhost_dev_stop()?
>
> vhost_dev_stop also does a flush, so:
>
> 1. The kick handler would see the backend is null and return immediately.
> 2. The flush in vhost_dev_stop would wait for those kicks in #1 to complete.

You are right.

So

Acked-by: Jason Wang <jasowang@redhat.com>

>
>
> >
> > Is this safe or the kthread_stop() in vhost_dev_cleanup() makes us safe?
> >
> > Thanks
> >
> >>
> >> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> >> ---
> >>  drivers/vhost/scsi.c | 2 --
> >>  1 file changed, 2 deletions(-)
> >>
> >> diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
> >> index 532e204f2b1b..94535c813ef7 100644
> >> --- a/drivers/vhost/scsi.c
> >> +++ b/drivers/vhost/scsi.c
> >> @@ -1827,8 +1827,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
> >>         vhost_scsi_clear_endpoint(vs, &t);
> >>         vhost_dev_stop(&vs->dev);
> >>         vhost_dev_cleanup(&vs->dev);
> >> -       /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
> >> -       vhost_scsi_flush(vs);
> >>         kfree(vs->dev.vqs);
> >>         kvfree(vs);
> >>         return 0;
> >> --
> >> 2.25.1
> >>
> >
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush()
  2021-12-08 17:35     ` michael.christie
@ 2021-12-09  1:35       ` Jason Wang
  0 siblings, 0 replies; 18+ messages in thread
From: Jason Wang @ 2021-12-09  1:35 UTC (permalink / raw)
  To: Mike Christie; +Cc: Andrey Ryabinin, virtualization, Stefan Hajnoczi, mst

On Thu, Dec 9, 2021 at 1:35 AM <michael.christie@oracle.com> wrote:
>
> On 12/7/21 9:53 PM, Jason Wang wrote:
> > On Tue, Dec 7, 2021 at 10:45 AM Mike Christie
> > <michael.christie@oracle.com> wrote:
> >>
> >> From: Andrey Ryabinin <arbn@yandex-team.com>
> >>
> >> vhost_vsock_flush() calls vhost_work_dev_flush(vsock->vqs[i].poll.dev)
> >> before vhost_work_dev_flush(&vsock->dev). This seems pointless
> >> as vsock->vqs[i].poll.dev is the same as &vsock->dev and several flushes
> >> in a row doesn't do anything useful, one is just enough.
> >>
> >> Signed-off-by: Andrey Ryabinin <arbn@yandex-team.com>
> >> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
> >> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> >> ---
> >>  drivers/vhost/vsock.c | 5 -----
> >>  1 file changed, 5 deletions(-)
> >>
> >> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> >> index 2339587bcd31..1f38160b249d 100644
> >> --- a/drivers/vhost/vsock.c
> >> +++ b/drivers/vhost/vsock.c
> >> @@ -703,11 +703,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
> >>
> >>  static void vhost_vsock_flush(struct vhost_vsock *vsock)
> >>  {
> >> -       int i;
> >> -
> >> -       for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
> >> -               if (vsock->vqs[i].handle_kick)
> >> -                       vhost_work_dev_flush(vsock->vqs[i].poll.dev);
> >>         vhost_work_dev_flush(&vsock->dev);
> >>  }
> >
> > Is this better to be consistent with vhost-net so that we can simply
> > remove the vhost_vsock_flush() wrapper here.
> >
>
> I didn't understand that comment.
>
> Did you mean consistent as they both have vhost_vsock/net_flush functions
> or as in they prefer to not have one line wrappers?
>
> Before and after this patchset, both net and vsock have a vhost_vsock/net_flush
> function, so maybe you didn't mean that.
>
> I think the wrapper is not very useful and could be removed. However,
> I thought vsock preferred wrappers because we have vhost_vsock_free
> which is just a wrapper around kfree. I also noticed test.c is a
> fan of one line wrappers, but I see net and scsi do not do that.

Ok, then I'm fine with this.

Thanks

>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2021-12-09  1:36 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-07  2:45 [PATCH 0/7] vhost flush cleanups Mike Christie
2021-12-07  2:45 ` [PATCH 1/7] vhost: get rid of vhost_poll_flush() wrapper Mike Christie
2021-12-08  3:49   ` Jason Wang
2021-12-08 16:41     ` Mike Christie
2021-12-09  1:32       ` Jason Wang
2021-12-07  2:45 ` [PATCH 2/7] vhost: flush dev once during vhost_dev_stop Mike Christie
2021-12-07  2:45 ` [PATCH 3/7] vhost_net: get rid of vhost_net_flush_vq() and extra flush calls Mike Christie
2021-12-07  2:45 ` [PATCH 4/7] vhost_test: remove vhost_test_flush_vq() Mike Christie
2021-12-07  2:45 ` [PATCH 5/7] vhost_vsock: simplify vhost_vsock_flush() Mike Christie
2021-12-08  3:53   ` Jason Wang
2021-12-08 17:35     ` michael.christie
2021-12-09  1:35       ` Jason Wang
2021-12-07  2:45 ` [PATCH 6/7] vhost-scsi: drop flush after vhost_dev_cleanup Mike Christie
2021-12-08  4:07   ` Jason Wang
2021-12-08 16:45     ` Mike Christie
2021-12-09  1:35       ` Jason Wang
2021-12-07  2:45 ` [PATCH 7/7] vhost-test: " Mike Christie
2021-12-08  4:09   ` Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.