All of lore.kernel.org
 help / color / mirror / Atom feed
From: Greg Kurz <groug@kaod.org>
To: qemu-devel@nongnu.org
Cc: "Daniel P . Berrangé" <berrange@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
	"Greg Kurz" <groug@kaod.org>,
	virtio-fs@redhat.com, "Stefan Hajnoczi" <stefanha@redhat.com>,
	"Vivek Goyal" <vgoyal@redhat.com>
Subject: [PATCH v2 6/7] vhost-user: Monitor slave channel in vhost_user_read()
Date: Fri, 12 Mar 2021 10:22:11 +0100	[thread overview]
Message-ID: <20210312092212.782255-7-groug@kaod.org> (raw)
In-Reply-To: <20210312092212.782255-1-groug@kaod.org>

Now that everything is in place, have the nested event loop to monitor
the slave channel. The source in the main event loop is destroyed and
recreated to ensure any pending even for the slave channel that was
previously detected is purged. This guarantees that the main loop
wont invoke slave_read() based on an event that was already handled
by the nested loop.

Signed-off-by: Greg Kurz <groug@kaod.org>
---
v2: - also monitor G_IO_HUP (Stefan)
    - refactored the code a bit
---
 hw/virtio/vhost-user.c | 35 ++++++++++++++++++++++++++++++++---
 1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 00256fa318a6..ded0c1045309 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -345,6 +345,35 @@ end:
     return G_SOURCE_REMOVE;
 }
 
+static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
+                           gpointer opaque);
+
+/*
+ * This updates the read handler to use a new event loop context.
+ * Event sources are removed from the previous context : this ensures
+ * that events detected in the previous context are purged. They will
+ * be re-detected and processed in the new context.
+ */
+static void slave_update_read_handler(struct vhost_dev *dev,
+                                      GMainContext *ctxt)
+{
+    struct vhost_user *u = dev->opaque;
+
+    if (!u->slave_ioc) {
+        return;
+    }
+
+    if (u->slave_src) {
+        g_source_destroy(u->slave_src);
+        g_source_unref(u->slave_src);
+    }
+
+    u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
+                                                G_IO_IN | G_IO_HUP,
+                                                slave_read, dev, NULL,
+                                                ctxt);
+}
+
 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
 {
     struct vhost_user *u = dev->opaque;
@@ -366,6 +395,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
      * be prepared for re-entrancy. So we create a new one and switch chr
      * to use it.
      */
+    slave_update_read_handler(dev, ctxt);
     qemu_chr_be_update_read_handlers(chr->chr, ctxt);
     qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
 
@@ -377,6 +407,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
      * context that have been processed by the nested loop are purged.
      */
     qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
+    slave_update_read_handler(dev, NULL);
 
     g_main_loop_unref(loop);
     g_main_context_unref(ctxt);
@@ -1580,9 +1611,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
         return -1;
     }
     u->slave_ioc = ioc;
-    u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
-                                                G_IO_IN | G_IO_HUP,
-                                                slave_read, dev, NULL, NULL);
+    slave_update_read_handler(dev, NULL);
 
     if (reply_supported) {
         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
-- 
2.26.2



WARNING: multiple messages have this Message-ID (diff)
From: Greg Kurz <groug@kaod.org>
To: qemu-devel@nongnu.org
Cc: "Daniel P . Berrangé" <berrange@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	virtio-fs@redhat.com, "Vivek Goyal" <vgoyal@redhat.com>
Subject: [Virtio-fs] [PATCH v2 6/7] vhost-user: Monitor slave channel in vhost_user_read()
Date: Fri, 12 Mar 2021 10:22:11 +0100	[thread overview]
Message-ID: <20210312092212.782255-7-groug@kaod.org> (raw)
In-Reply-To: <20210312092212.782255-1-groug@kaod.org>

Now that everything is in place, have the nested event loop to monitor
the slave channel. The source in the main event loop is destroyed and
recreated to ensure any pending even for the slave channel that was
previously detected is purged. This guarantees that the main loop
wont invoke slave_read() based on an event that was already handled
by the nested loop.

Signed-off-by: Greg Kurz <groug@kaod.org>
---
v2: - also monitor G_IO_HUP (Stefan)
    - refactored the code a bit
---
 hw/virtio/vhost-user.c | 35 ++++++++++++++++++++++++++++++++---
 1 file changed, 32 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 00256fa318a6..ded0c1045309 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -345,6 +345,35 @@ end:
     return G_SOURCE_REMOVE;
 }
 
+static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
+                           gpointer opaque);
+
+/*
+ * This updates the read handler to use a new event loop context.
+ * Event sources are removed from the previous context : this ensures
+ * that events detected in the previous context are purged. They will
+ * be re-detected and processed in the new context.
+ */
+static void slave_update_read_handler(struct vhost_dev *dev,
+                                      GMainContext *ctxt)
+{
+    struct vhost_user *u = dev->opaque;
+
+    if (!u->slave_ioc) {
+        return;
+    }
+
+    if (u->slave_src) {
+        g_source_destroy(u->slave_src);
+        g_source_unref(u->slave_src);
+    }
+
+    u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
+                                                G_IO_IN | G_IO_HUP,
+                                                slave_read, dev, NULL,
+                                                ctxt);
+}
+
 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
 {
     struct vhost_user *u = dev->opaque;
@@ -366,6 +395,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
      * be prepared for re-entrancy. So we create a new one and switch chr
      * to use it.
      */
+    slave_update_read_handler(dev, ctxt);
     qemu_chr_be_update_read_handlers(chr->chr, ctxt);
     qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
 
@@ -377,6 +407,7 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
      * context that have been processed by the nested loop are purged.
      */
     qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
+    slave_update_read_handler(dev, NULL);
 
     g_main_loop_unref(loop);
     g_main_context_unref(ctxt);
@@ -1580,9 +1611,7 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev)
         return -1;
     }
     u->slave_ioc = ioc;
-    u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
-                                                G_IO_IN | G_IO_HUP,
-                                                slave_read, dev, NULL, NULL);
+    slave_update_read_handler(dev, NULL);
 
     if (reply_supported) {
         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
-- 
2.26.2


  parent reply	other threads:[~2021-03-12  9:36 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-12  9:22 [PATCH v2 0/7] virtiofsd: Avoid potential deadlocks Greg Kurz
2021-03-12  9:22 ` [Virtio-fs] " Greg Kurz
2021-03-12  9:22 ` [PATCH v2 1/7] vhost-user: Drop misleading EAGAIN checks in slave_read() Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 10:34   ` Stefan Hajnoczi
2021-03-15 10:34     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` [PATCH v2 2/7] vhost-user: Fix double-close on slave_read() error path Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 10:36   ` Stefan Hajnoczi
2021-03-15 10:36     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` [PATCH v2 3/7] vhost-user: Factor out duplicated slave_fd teardown code Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 10:36   ` Stefan Hajnoczi
2021-03-15 10:36     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` [PATCH v2 4/7] vhost-user: Convert slave channel to QIOChannelSocket Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 10:37   ` Stefan Hajnoczi
2021-03-15 10:37     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` [PATCH v2 5/7] vhost-user: Introduce nested event loop in vhost_user_read() Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 10:38   ` Stefan Hajnoczi
2021-03-15 10:38     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` Greg Kurz [this message]
2021-03-12  9:22   ` [Virtio-fs] [PATCH v2 6/7] vhost-user: Monitor slave channel " Greg Kurz
2021-03-15 12:20   ` Stefan Hajnoczi
2021-03-15 12:20     ` [Virtio-fs] " Stefan Hajnoczi
2021-03-12  9:22 ` [PATCH v2 7/7] virtiofsd: Release vu_dispatch_lock when stopping queue Greg Kurz
2021-03-12  9:22   ` [Virtio-fs] " Greg Kurz
2021-03-15 14:57   ` Dr. David Alan Gilbert
2021-03-15 14:57     ` [Virtio-fs] " Dr. David Alan Gilbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210312092212.782255-7-groug@kaod.org \
    --to=groug@kaod.org \
    --cc=berrange@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    --cc=vgoyal@redhat.com \
    --cc=virtio-fs@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.