qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect"
       [not found] <20240514061239.86461-1-fengli@smartx.com>
@ 2024-05-14  6:12 ` Li Feng
  2024-05-14 13:58   ` Raphael Norwitz
  2024-05-14  6:12 ` [PATCH v3 2/2] vhost-user: fix lost reconnect again Li Feng
  1 sibling, 1 reply; 10+ messages in thread
From: Li Feng @ 2024-05-14  6:12 UTC (permalink / raw)
  To: Michael S. Tsirkin, Raphael Norwitz, Kevin Wolf, Hanna Reitz,
	Paolo Bonzini, Fam Zheng, Alex Bennée,
	open list:Block layer core, open list:All patches CC here
  Cc: Yajun Wu, Stefano Garzarella

This reverts commit f02a4b8e6431598612466f76aac64ab492849abf.

Since the current patch cannot completely fix the lost reconnect
problem, there is a scenario that is not considered:
- When the virtio-blk driver is removed from the guest os,
  s->connected has no chance to be set to false, resulting in
  subsequent reconnection not being executed.

The next patch will completely fix this issue with a better approach.

Signed-off-by: Li Feng <fengli@smartx.com>
---
 hw/block/vhost-user-blk.c      |  2 +-
 hw/scsi/vhost-user-scsi.c      |  3 +--
 hw/virtio/vhost-user-base.c    |  2 +-
 hw/virtio/vhost-user.c         | 10 ++--------
 include/hw/virtio/vhost-user.h |  3 +--
 5 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 9e6bbc6950..41d1ac3a5a 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -384,7 +384,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
     case CHR_EVENT_CLOSED:
         /* defer close until later to avoid circular close */
         vhost_user_async_close(dev, &s->chardev, &s->dev,
-                               vhost_user_blk_disconnect, vhost_user_blk_event);
+                               vhost_user_blk_disconnect);
         break;
     case CHR_EVENT_BREAK:
     case CHR_EVENT_MUX_IN:
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index a63b1f4948..48a59e020e 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -214,8 +214,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
     case CHR_EVENT_CLOSED:
         /* defer close until later to avoid circular close */
         vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
-                               vhost_user_scsi_disconnect,
-                               vhost_user_scsi_event);
+                               vhost_user_scsi_disconnect);
         break;
     case CHR_EVENT_BREAK:
     case CHR_EVENT_MUX_IN:
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
index a83167191e..4b54255682 100644
--- a/hw/virtio/vhost-user-base.c
+++ b/hw/virtio/vhost-user-base.c
@@ -254,7 +254,7 @@ static void vub_event(void *opaque, QEMUChrEvent event)
     case CHR_EVENT_CLOSED:
         /* defer close until later to avoid circular close */
         vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
-                               vub_disconnect, vub_event);
+                               vub_disconnect);
         break;
     case CHR_EVENT_BREAK:
     case CHR_EVENT_MUX_IN:
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index cdf9af4a4b..c929097e87 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -2776,7 +2776,6 @@ typedef struct {
     DeviceState *dev;
     CharBackend *cd;
     struct vhost_dev *vhost;
-    IOEventHandler *event_cb;
 } VhostAsyncCallback;
 
 static void vhost_user_async_close_bh(void *opaque)
@@ -2791,10 +2790,7 @@ static void vhost_user_async_close_bh(void *opaque)
      */
     if (vhost->vdev) {
         data->cb(data->dev);
-    } else if (data->event_cb) {
-        qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
-                                 NULL, data->dev, NULL, true);
-   }
+    }
 
     g_free(data);
 }
@@ -2806,8 +2802,7 @@ static void vhost_user_async_close_bh(void *opaque)
  */
 void vhost_user_async_close(DeviceState *d,
                             CharBackend *chardev, struct vhost_dev *vhost,
-                            vu_async_close_fn cb,
-                            IOEventHandler *event_cb)
+                            vu_async_close_fn cb)
 {
     if (!runstate_check(RUN_STATE_SHUTDOWN)) {
         /*
@@ -2823,7 +2818,6 @@ void vhost_user_async_close(DeviceState *d,
         data->dev = d;
         data->cd = chardev;
         data->vhost = vhost;
-        data->event_cb = event_cb;
 
         /* Disable any further notifications on the chardev */
         qemu_chr_fe_set_handlers(chardev,
diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
index d7c09ffd34..324cd8663a 100644
--- a/include/hw/virtio/vhost-user.h
+++ b/include/hw/virtio/vhost-user.h
@@ -108,7 +108,6 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
 
 void vhost_user_async_close(DeviceState *d,
                             CharBackend *chardev, struct vhost_dev *vhost,
-                            vu_async_close_fn cb,
-                            IOEventHandler *event_cb);
+                            vu_async_close_fn cb);
 
 #endif
-- 
2.45.0



^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v3 2/2] vhost-user: fix lost reconnect again
       [not found] <20240514061239.86461-1-fengli@smartx.com>
  2024-05-14  6:12 ` [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect" Li Feng
@ 2024-05-14  6:12 ` Li Feng
  2024-05-14 13:58   ` Raphael Norwitz
  1 sibling, 1 reply; 10+ messages in thread
From: Li Feng @ 2024-05-14  6:12 UTC (permalink / raw)
  To: Raphael Norwitz, Michael S. Tsirkin, Kevin Wolf, Hanna Reitz,
	Paolo Bonzini, Fam Zheng, Alex Bennée,
	open list:Block layer core, open list:All patches CC here
  Cc: Yajun Wu, Stefano Garzarella

When the vhost-user is reconnecting to the backend, and if the vhost-user fails
at the get_features in vhost_dev_init(), then the reconnect will fail
and it will not be retriggered forever.

The reason is:
When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
immediately.

vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.

The reconnect path is:
vhost_user_blk_event
   vhost_user_async_close(.. vhost_user_blk_disconnect ..)
     qemu_chr_fe_set_handlers <----- clear the notifier callback
       schedule vhost_user_async_close_bh

The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
called, then the event fd callback will not be reinstalled.

With this patch, the vhost_user_blk_disconnect will call the
vhost_dev_cleanup() again, it's safe.

In addition, the CLOSE event may occur in a scenario where connected is false.
At this time, the event handler will be cleared. We need to ensure that the
event handler can remain installed.

All vhost-user devices have this issue, including vhost-user-blk/scsi.

Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")

Signed-off-by: Li Feng <fengli@smartx.com>
---
 hw/block/vhost-user-blk.c   |  3 ++-
 hw/scsi/vhost-user-scsi.c   |  3 ++-
 hw/virtio/vhost-user-base.c |  3 ++-
 hw/virtio/vhost-user.c      | 10 +---------
 4 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
index 41d1ac3a5a..c6842ced48 100644
--- a/hw/block/vhost-user-blk.c
+++ b/hw/block/vhost-user-blk.c
@@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
     VHostUserBlk *s = VHOST_USER_BLK(vdev);
 
     if (!s->connected) {
-        return;
+        goto done;
     }
     s->connected = false;
 
@@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
 
     vhost_dev_cleanup(&s->dev);
 
+done:
     /* Re-instate the event handler for new connections */
     qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
                              NULL, dev, NULL, true);
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index 48a59e020e..b49a11d23b 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
 
     if (!s->connected) {
-        return;
+        goto done;
     }
     s->connected = false;
 
@@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
 
     vhost_dev_cleanup(&vsc->dev);
 
+done:
     /* Re-instate the event handler for new connections */
     qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
                              vhost_user_scsi_event, NULL, dev, NULL, true);
diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
index 4b54255682..11e72b1e3b 100644
--- a/hw/virtio/vhost-user-base.c
+++ b/hw/virtio/vhost-user-base.c
@@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
     VHostUserBase *vub = VHOST_USER_BASE(vdev);
 
     if (!vub->connected) {
-        return;
+        goto done;
     }
     vub->connected = false;
 
     vub_stop(vdev);
     vhost_dev_cleanup(&vub->vhost_dev);
 
+done:
     /* Re-instate the event handler for new connections */
     qemu_chr_fe_set_handlers(&vub->chardev,
                              NULL, NULL, vub_event,
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index c929097e87..c407ea8939 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -2781,16 +2781,8 @@ typedef struct {
 static void vhost_user_async_close_bh(void *opaque)
 {
     VhostAsyncCallback *data = opaque;
-    struct vhost_dev *vhost = data->vhost;
 
-    /*
-     * If the vhost_dev has been cleared in the meantime there is
-     * nothing left to do as some other path has completed the
-     * cleanup.
-     */
-    if (vhost->vdev) {
-        data->cb(data->dev);
-    }
+    data->cb(data->dev);
 
     g_free(data);
 }
-- 
2.45.0



^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect"
  2024-05-14  6:12 ` [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect" Li Feng
@ 2024-05-14 13:58   ` Raphael Norwitz
  2024-05-15  5:46     ` Li Feng
  0 siblings, 1 reply; 10+ messages in thread
From: Raphael Norwitz @ 2024-05-14 13:58 UTC (permalink / raw)
  To: Li Feng
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

The code for these two patches looks fine. Just some questions on the
failure case you're trying to fix.


On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>
> This reverts commit f02a4b8e6431598612466f76aac64ab492849abf.
>
> Since the current patch cannot completely fix the lost reconnect
> problem, there is a scenario that is not considered:
> - When the virtio-blk driver is removed from the guest os,
>   s->connected has no chance to be set to false, resulting in

Why would the virtio-blk driver being removed (unloaded?) in the guest
effect s->connected? Isn't this variable just tracking whether Qemu is
connected to the backend process? What does it have to do with the
guest driver state?

>   subsequent reconnection not being executed.
>
> The next patch will completely fix this issue with a better approach.
>
> Signed-off-by: Li Feng <fengli@smartx.com>
> ---
>  hw/block/vhost-user-blk.c      |  2 +-
>  hw/scsi/vhost-user-scsi.c      |  3 +--
>  hw/virtio/vhost-user-base.c    |  2 +-
>  hw/virtio/vhost-user.c         | 10 ++--------
>  include/hw/virtio/vhost-user.h |  3 +--
>  5 files changed, 6 insertions(+), 14 deletions(-)
>
> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> index 9e6bbc6950..41d1ac3a5a 100644
> --- a/hw/block/vhost-user-blk.c
> +++ b/hw/block/vhost-user-blk.c
> @@ -384,7 +384,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
>      case CHR_EVENT_CLOSED:
>          /* defer close until later to avoid circular close */
>          vhost_user_async_close(dev, &s->chardev, &s->dev,
> -                               vhost_user_blk_disconnect, vhost_user_blk_event);
> +                               vhost_user_blk_disconnect);
>          break;
>      case CHR_EVENT_BREAK:
>      case CHR_EVENT_MUX_IN:
> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> index a63b1f4948..48a59e020e 100644
> --- a/hw/scsi/vhost-user-scsi.c
> +++ b/hw/scsi/vhost-user-scsi.c
> @@ -214,8 +214,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
>      case CHR_EVENT_CLOSED:
>          /* defer close until later to avoid circular close */
>          vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
> -                               vhost_user_scsi_disconnect,
> -                               vhost_user_scsi_event);
> +                               vhost_user_scsi_disconnect);
>          break;
>      case CHR_EVENT_BREAK:
>      case CHR_EVENT_MUX_IN:
> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> index a83167191e..4b54255682 100644
> --- a/hw/virtio/vhost-user-base.c
> +++ b/hw/virtio/vhost-user-base.c
> @@ -254,7 +254,7 @@ static void vub_event(void *opaque, QEMUChrEvent event)
>      case CHR_EVENT_CLOSED:
>          /* defer close until later to avoid circular close */
>          vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
> -                               vub_disconnect, vub_event);
> +                               vub_disconnect);
>          break;
>      case CHR_EVENT_BREAK:
>      case CHR_EVENT_MUX_IN:
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index cdf9af4a4b..c929097e87 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2776,7 +2776,6 @@ typedef struct {
>      DeviceState *dev;
>      CharBackend *cd;
>      struct vhost_dev *vhost;
> -    IOEventHandler *event_cb;
>  } VhostAsyncCallback;
>
>  static void vhost_user_async_close_bh(void *opaque)
> @@ -2791,10 +2790,7 @@ static void vhost_user_async_close_bh(void *opaque)
>       */
>      if (vhost->vdev) {
>          data->cb(data->dev);
> -    } else if (data->event_cb) {
> -        qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
> -                                 NULL, data->dev, NULL, true);
> -   }
> +    }
>
>      g_free(data);
>  }
> @@ -2806,8 +2802,7 @@ static void vhost_user_async_close_bh(void *opaque)
>   */
>  void vhost_user_async_close(DeviceState *d,
>                              CharBackend *chardev, struct vhost_dev *vhost,
> -                            vu_async_close_fn cb,
> -                            IOEventHandler *event_cb)
> +                            vu_async_close_fn cb)
>  {
>      if (!runstate_check(RUN_STATE_SHUTDOWN)) {
>          /*
> @@ -2823,7 +2818,6 @@ void vhost_user_async_close(DeviceState *d,
>          data->dev = d;
>          data->cd = chardev;
>          data->vhost = vhost;
> -        data->event_cb = event_cb;
>
>          /* Disable any further notifications on the chardev */
>          qemu_chr_fe_set_handlers(chardev,
> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> index d7c09ffd34..324cd8663a 100644
> --- a/include/hw/virtio/vhost-user.h
> +++ b/include/hw/virtio/vhost-user.h
> @@ -108,7 +108,6 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
>
>  void vhost_user_async_close(DeviceState *d,
>                              CharBackend *chardev, struct vhost_dev *vhost,
> -                            vu_async_close_fn cb,
> -                            IOEventHandler *event_cb);
> +                            vu_async_close_fn cb);
>
>  #endif
> --
> 2.45.0
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] vhost-user: fix lost reconnect again
  2024-05-14  6:12 ` [PATCH v3 2/2] vhost-user: fix lost reconnect again Li Feng
@ 2024-05-14 13:58   ` Raphael Norwitz
  2024-05-15  5:47     ` Li Feng
  0 siblings, 1 reply; 10+ messages in thread
From: Raphael Norwitz @ 2024-05-14 13:58 UTC (permalink / raw)
  To: Li Feng
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

Code looks good. Just a question on the error case you're trying to fix.

On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>
> When the vhost-user is reconnecting to the backend, and if the vhost-user fails
> at the get_features in vhost_dev_init(), then the reconnect will fail
> and it will not be retriggered forever.
>
> The reason is:
> When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
> immediately.
>
> vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
>
> The reconnect path is:
> vhost_user_blk_event
>    vhost_user_async_close(.. vhost_user_blk_disconnect ..)
>      qemu_chr_fe_set_handlers <----- clear the notifier callback
>        schedule vhost_user_async_close_bh
>
> The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
> called, then the event fd callback will not be reinstalled.
>
> With this patch, the vhost_user_blk_disconnect will call the
> vhost_dev_cleanup() again, it's safe.
>
> In addition, the CLOSE event may occur in a scenario where connected is false.
> At this time, the event handler will be cleared. We need to ensure that the
> event handler can remain installed.

Following on from the prior patch, why would "connected" be false when
a CLOSE event happens?

>
> All vhost-user devices have this issue, including vhost-user-blk/scsi.
>
> Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
>
> Signed-off-by: Li Feng <fengli@smartx.com>
> ---
>  hw/block/vhost-user-blk.c   |  3 ++-
>  hw/scsi/vhost-user-scsi.c   |  3 ++-
>  hw/virtio/vhost-user-base.c |  3 ++-
>  hw/virtio/vhost-user.c      | 10 +---------
>  4 files changed, 7 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> index 41d1ac3a5a..c6842ced48 100644
> --- a/hw/block/vhost-user-blk.c
> +++ b/hw/block/vhost-user-blk.c
> @@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>      VHostUserBlk *s = VHOST_USER_BLK(vdev);
>
>      if (!s->connected) {
> -        return;
> +        goto done;
>      }
>      s->connected = false;
>
> @@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>
>      vhost_dev_cleanup(&s->dev);
>
> +done:
>      /* Re-instate the event handler for new connections */
>      qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
>                               NULL, dev, NULL, true);
> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> index 48a59e020e..b49a11d23b 100644
> --- a/hw/scsi/vhost-user-scsi.c
> +++ b/hw/scsi/vhost-user-scsi.c
> @@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>      VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>
>      if (!s->connected) {
> -        return;
> +        goto done;
>      }
>      s->connected = false;
>
> @@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>
>      vhost_dev_cleanup(&vsc->dev);
>
> +done:
>      /* Re-instate the event handler for new connections */
>      qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
>                               vhost_user_scsi_event, NULL, dev, NULL, true);
> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> index 4b54255682..11e72b1e3b 100644
> --- a/hw/virtio/vhost-user-base.c
> +++ b/hw/virtio/vhost-user-base.c
> @@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
>      VHostUserBase *vub = VHOST_USER_BASE(vdev);
>
>      if (!vub->connected) {
> -        return;
> +        goto done;
>      }
>      vub->connected = false;
>
>      vub_stop(vdev);
>      vhost_dev_cleanup(&vub->vhost_dev);
>
> +done:
>      /* Re-instate the event handler for new connections */
>      qemu_chr_fe_set_handlers(&vub->chardev,
>                               NULL, NULL, vub_event,
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index c929097e87..c407ea8939 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2781,16 +2781,8 @@ typedef struct {
>  static void vhost_user_async_close_bh(void *opaque)
>  {
>      VhostAsyncCallback *data = opaque;
> -    struct vhost_dev *vhost = data->vhost;
>
> -    /*
> -     * If the vhost_dev has been cleared in the meantime there is
> -     * nothing left to do as some other path has completed the
> -     * cleanup.
> -     */
> -    if (vhost->vdev) {
> -        data->cb(data->dev);
> -    }
> +    data->cb(data->dev);
>
>      g_free(data);
>  }
> --
> 2.45.0
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect"
  2024-05-14 13:58   ` Raphael Norwitz
@ 2024-05-15  5:46     ` Li Feng
  2024-05-15 15:47       ` Raphael Norwitz
  0 siblings, 1 reply; 10+ messages in thread
From: Li Feng @ 2024-05-15  5:46 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella



> 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
> 
> The code for these two patches looks fine. Just some questions on the
> failure case you're trying to fix.
> 
> 
> On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>> 
>> This reverts commit f02a4b8e6431598612466f76aac64ab492849abf.
>> 
>> Since the current patch cannot completely fix the lost reconnect
>> problem, there is a scenario that is not considered:
>> - When the virtio-blk driver is removed from the guest os,
>>  s->connected has no chance to be set to false, resulting in
> 
> Why would the virtio-blk driver being removed (unloaded?) in the guest
> effect s->connected? Isn't this variable just tracking whether Qemu is
> connected to the backend process? What does it have to do with the
> guest driver state?

Unload the virtio-blk, it will trigger ‘vhost_user_blk_stop’, and in `vhost_dev_stop`
it will set the `hdev->vdev = NULL;`.

Next if kill the backend, the CLOSE event will be triggered, and the `vhost->vdev`
has been set to null before, then the `vhost_user_blk_disconnect` will not have a
chance to execute.So that he s->connected is still true.

static void vhost_user_async_close_bh(void *opaque)
{
    VhostAsyncCallback *data = opaque;
    struct vhost_dev *vhost = data->vhost;

    /*
     * If the vhost_dev has been cleared in the meantime there is
     * nothing left to do as some other path has completed the
     * cleanup.
     */
    if (vhost->vdev) {  <============================ HERE vdev is null.
        data->cb(data->dev);
    } else if (data->event_cb) {
        qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
                                 NULL, data->dev, NULL, true);
   }

    g_free(data);
}

Thanks,
Li

> 
>>  subsequent reconnection not being executed.
>> 
>> The next patch will completely fix this issue with a better approach.
>> 
>> Signed-off-by: Li Feng <fengli@smartx.com>
>> ---
>> hw/block/vhost-user-blk.c      |  2 +-
>> hw/scsi/vhost-user-scsi.c      |  3 +--
>> hw/virtio/vhost-user-base.c    |  2 +-
>> hw/virtio/vhost-user.c         | 10 ++--------
>> include/hw/virtio/vhost-user.h |  3 +--
>> 5 files changed, 6 insertions(+), 14 deletions(-)
>> 
>> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
>> index 9e6bbc6950..41d1ac3a5a 100644
>> --- a/hw/block/vhost-user-blk.c
>> +++ b/hw/block/vhost-user-blk.c
>> @@ -384,7 +384,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
>>     case CHR_EVENT_CLOSED:
>>         /* defer close until later to avoid circular close */
>>         vhost_user_async_close(dev, &s->chardev, &s->dev,
>> -                               vhost_user_blk_disconnect, vhost_user_blk_event);
>> +                               vhost_user_blk_disconnect);
>>         break;
>>     case CHR_EVENT_BREAK:
>>     case CHR_EVENT_MUX_IN:
>> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
>> index a63b1f4948..48a59e020e 100644
>> --- a/hw/scsi/vhost-user-scsi.c
>> +++ b/hw/scsi/vhost-user-scsi.c
>> @@ -214,8 +214,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
>>     case CHR_EVENT_CLOSED:
>>         /* defer close until later to avoid circular close */
>>         vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
>> -                               vhost_user_scsi_disconnect,
>> -                               vhost_user_scsi_event);
>> +                               vhost_user_scsi_disconnect);
>>         break;
>>     case CHR_EVENT_BREAK:
>>     case CHR_EVENT_MUX_IN:
>> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
>> index a83167191e..4b54255682 100644
>> --- a/hw/virtio/vhost-user-base.c
>> +++ b/hw/virtio/vhost-user-base.c
>> @@ -254,7 +254,7 @@ static void vub_event(void *opaque, QEMUChrEvent event)
>>     case CHR_EVENT_CLOSED:
>>         /* defer close until later to avoid circular close */
>>         vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
>> -                               vub_disconnect, vub_event);
>> +                               vub_disconnect);
>>         break;
>>     case CHR_EVENT_BREAK:
>>     case CHR_EVENT_MUX_IN:
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index cdf9af4a4b..c929097e87 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -2776,7 +2776,6 @@ typedef struct {
>>     DeviceState *dev;
>>     CharBackend *cd;
>>     struct vhost_dev *vhost;
>> -    IOEventHandler *event_cb;
>> } VhostAsyncCallback;
>> 
>> static void vhost_user_async_close_bh(void *opaque)
>> @@ -2791,10 +2790,7 @@ static void vhost_user_async_close_bh(void *opaque)
>>      */
>>     if (vhost->vdev) {
>>         data->cb(data->dev);
>> -    } else if (data->event_cb) {
>> -        qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
>> -                                 NULL, data->dev, NULL, true);
>> -   }
>> +    }
>> 
>>     g_free(data);
>> }
>> @@ -2806,8 +2802,7 @@ static void vhost_user_async_close_bh(void *opaque)
>>  */
>> void vhost_user_async_close(DeviceState *d,
>>                             CharBackend *chardev, struct vhost_dev *vhost,
>> -                            vu_async_close_fn cb,
>> -                            IOEventHandler *event_cb)
>> +                            vu_async_close_fn cb)
>> {
>>     if (!runstate_check(RUN_STATE_SHUTDOWN)) {
>>         /*
>> @@ -2823,7 +2818,6 @@ void vhost_user_async_close(DeviceState *d,
>>         data->dev = d;
>>         data->cd = chardev;
>>         data->vhost = vhost;
>> -        data->event_cb = event_cb;
>> 
>>         /* Disable any further notifications on the chardev */
>>         qemu_chr_fe_set_handlers(chardev,
>> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
>> index d7c09ffd34..324cd8663a 100644
>> --- a/include/hw/virtio/vhost-user.h
>> +++ b/include/hw/virtio/vhost-user.h
>> @@ -108,7 +108,6 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
>> 
>> void vhost_user_async_close(DeviceState *d,
>>                             CharBackend *chardev, struct vhost_dev *vhost,
>> -                            vu_async_close_fn cb,
>> -                            IOEventHandler *event_cb);
>> +                            vu_async_close_fn cb);
>> 
>> #endif
>> --
>> 2.45.0
>> 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] vhost-user: fix lost reconnect again
  2024-05-14 13:58   ` Raphael Norwitz
@ 2024-05-15  5:47     ` Li Feng
  2024-05-15 15:47       ` Raphael Norwitz
  0 siblings, 1 reply; 10+ messages in thread
From: Li Feng @ 2024-05-15  5:47 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella



> 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
> 
> Code looks good. Just a question on the error case you're trying to fix.
> 
> On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>> 
>> When the vhost-user is reconnecting to the backend, and if the vhost-user fails
>> at the get_features in vhost_dev_init(), then the reconnect will fail
>> and it will not be retriggered forever.
>> 
>> The reason is:
>> When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
>> immediately.
>> 
>> vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
>> 
>> The reconnect path is:
>> vhost_user_blk_event
>>   vhost_user_async_close(.. vhost_user_blk_disconnect ..)
>>     qemu_chr_fe_set_handlers <----- clear the notifier callback
>>       schedule vhost_user_async_close_bh
>> 
>> The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
>> called, then the event fd callback will not be reinstalled.
>> 
>> With this patch, the vhost_user_blk_disconnect will call the
>> vhost_dev_cleanup() again, it's safe.
>> 
>> In addition, the CLOSE event may occur in a scenario where connected is false.
>> At this time, the event handler will be cleared. We need to ensure that the
>> event handler can remain installed.
> 
> Following on from the prior patch, why would "connected" be false when
> a CLOSE event happens?

In OPEN event handling, vhost_user_blk_connect calls vhost_dev_init and encounters
an error such that s->connected remains false.
Next, after the CLOSE event arrives, it is found that s->connected is false, so nothing
is done, but the event handler will be cleaned up in `vhost_user_async_close`
before the CLOSE event is executed.

Thanks,
Li

> 
>> 
>> All vhost-user devices have this issue, including vhost-user-blk/scsi.
>> 
>> Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
>> 
>> Signed-off-by: Li Feng <fengli@smartx.com>
>> ---
>> hw/block/vhost-user-blk.c   |  3 ++-
>> hw/scsi/vhost-user-scsi.c   |  3 ++-
>> hw/virtio/vhost-user-base.c |  3 ++-
>> hw/virtio/vhost-user.c      | 10 +---------
>> 4 files changed, 7 insertions(+), 12 deletions(-)
>> 
>> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
>> index 41d1ac3a5a..c6842ced48 100644
>> --- a/hw/block/vhost-user-blk.c
>> +++ b/hw/block/vhost-user-blk.c
>> @@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>>     VHostUserBlk *s = VHOST_USER_BLK(vdev);
>> 
>>     if (!s->connected) {
>> -        return;
>> +        goto done;
>>     }
>>     s->connected = false;
>> 
>> @@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>> 
>>     vhost_dev_cleanup(&s->dev);
>> 
>> +done:
>>     /* Re-instate the event handler for new connections */
>>     qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
>>                              NULL, dev, NULL, true);
>> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
>> index 48a59e020e..b49a11d23b 100644
>> --- a/hw/scsi/vhost-user-scsi.c
>> +++ b/hw/scsi/vhost-user-scsi.c
>> @@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>>     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>> 
>>     if (!s->connected) {
>> -        return;
>> +        goto done;
>>     }
>>     s->connected = false;
>> 
>> @@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>> 
>>     vhost_dev_cleanup(&vsc->dev);
>> 
>> +done:
>>     /* Re-instate the event handler for new connections */
>>     qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
>>                              vhost_user_scsi_event, NULL, dev, NULL, true);
>> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
>> index 4b54255682..11e72b1e3b 100644
>> --- a/hw/virtio/vhost-user-base.c
>> +++ b/hw/virtio/vhost-user-base.c
>> @@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
>>     VHostUserBase *vub = VHOST_USER_BASE(vdev);
>> 
>>     if (!vub->connected) {
>> -        return;
>> +        goto done;
>>     }
>>     vub->connected = false;
>> 
>>     vub_stop(vdev);
>>     vhost_dev_cleanup(&vub->vhost_dev);
>> 
>> +done:
>>     /* Re-instate the event handler for new connections */
>>     qemu_chr_fe_set_handlers(&vub->chardev,
>>                              NULL, NULL, vub_event,
>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>> index c929097e87..c407ea8939 100644
>> --- a/hw/virtio/vhost-user.c
>> +++ b/hw/virtio/vhost-user.c
>> @@ -2781,16 +2781,8 @@ typedef struct {
>> static void vhost_user_async_close_bh(void *opaque)
>> {
>>     VhostAsyncCallback *data = opaque;
>> -    struct vhost_dev *vhost = data->vhost;
>> 
>> -    /*
>> -     * If the vhost_dev has been cleared in the meantime there is
>> -     * nothing left to do as some other path has completed the
>> -     * cleanup.
>> -     */
>> -    if (vhost->vdev) {
>> -        data->cb(data->dev);
>> -    }
>> +    data->cb(data->dev);
>> 
>>     g_free(data);
>> }
>> --
>> 2.45.0
>> 



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect"
  2024-05-15  5:46     ` Li Feng
@ 2024-05-15 15:47       ` Raphael Norwitz
  0 siblings, 0 replies; 10+ messages in thread
From: Raphael Norwitz @ 2024-05-15 15:47 UTC (permalink / raw)
  To: Li Feng
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

On Wed, May 15, 2024 at 1:47 AM Li Feng <fengli@smartx.com> wrote:
>
>
>
> > 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
> >
> > The code for these two patches looks fine. Just some questions on the
> > failure case you're trying to fix.
> >
> >
> > On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
> >>
> >> This reverts commit f02a4b8e6431598612466f76aac64ab492849abf.
> >>
> >> Since the current patch cannot completely fix the lost reconnect
> >> problem, there is a scenario that is not considered:
> >> - When the virtio-blk driver is removed from the guest os,
> >>  s->connected has no chance to be set to false, resulting in
> >
> > Why would the virtio-blk driver being removed (unloaded?) in the guest
> > effect s->connected? Isn't this variable just tracking whether Qemu is
> > connected to the backend process? What does it have to do with the
> > guest driver state?
>
> Unload the virtio-blk, it will trigger ‘vhost_user_blk_stop’, and in `vhost_dev_stop`
> it will set the `hdev->vdev = NULL;`.
>
> Next if kill the backend, the CLOSE event will be triggered, and the `vhost->vdev`
> has been set to null before, then the `vhost_user_blk_disconnect` will not have a
> chance to execute.So that he s->connected is still true.

Makes sense - basically if the driver is unloaded and then the device
is brought down s->connected will remain true when it should be false,
which will mess up a subsequent reconnect.

See my comments on the following patch though.

>
> static void vhost_user_async_close_bh(void *opaque)
> {
>     VhostAsyncCallback *data = opaque;
>     struct vhost_dev *vhost = data->vhost;
>
>     /*
>      * If the vhost_dev has been cleared in the meantime there is
>      * nothing left to do as some other path has completed the
>      * cleanup.
>      */
>     if (vhost->vdev) {  <============================ HERE vdev is null.
>         data->cb(data->dev);
>     } else if (data->event_cb) {
>         qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
>                                  NULL, data->dev, NULL, true);
>    }
>
>     g_free(data);
> }
>
> Thanks,
> Li
>
> >
> >>  subsequent reconnection not being executed.
> >>
> >> The next patch will completely fix this issue with a better approach.
> >>
> >> Signed-off-by: Li Feng <fengli@smartx.com>
> >> ---
> >> hw/block/vhost-user-blk.c      |  2 +-
> >> hw/scsi/vhost-user-scsi.c      |  3 +--
> >> hw/virtio/vhost-user-base.c    |  2 +-
> >> hw/virtio/vhost-user.c         | 10 ++--------
> >> include/hw/virtio/vhost-user.h |  3 +--
> >> 5 files changed, 6 insertions(+), 14 deletions(-)
> >>
> >> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> >> index 9e6bbc6950..41d1ac3a5a 100644
> >> --- a/hw/block/vhost-user-blk.c
> >> +++ b/hw/block/vhost-user-blk.c
> >> @@ -384,7 +384,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
> >>     case CHR_EVENT_CLOSED:
> >>         /* defer close until later to avoid circular close */
> >>         vhost_user_async_close(dev, &s->chardev, &s->dev,
> >> -                               vhost_user_blk_disconnect, vhost_user_blk_event);
> >> +                               vhost_user_blk_disconnect);
> >>         break;
> >>     case CHR_EVENT_BREAK:
> >>     case CHR_EVENT_MUX_IN:
> >> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> >> index a63b1f4948..48a59e020e 100644
> >> --- a/hw/scsi/vhost-user-scsi.c
> >> +++ b/hw/scsi/vhost-user-scsi.c
> >> @@ -214,8 +214,7 @@ static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
> >>     case CHR_EVENT_CLOSED:
> >>         /* defer close until later to avoid circular close */
> >>         vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
> >> -                               vhost_user_scsi_disconnect,
> >> -                               vhost_user_scsi_event);
> >> +                               vhost_user_scsi_disconnect);
> >>         break;
> >>     case CHR_EVENT_BREAK:
> >>     case CHR_EVENT_MUX_IN:
> >> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> >> index a83167191e..4b54255682 100644
> >> --- a/hw/virtio/vhost-user-base.c
> >> +++ b/hw/virtio/vhost-user-base.c
> >> @@ -254,7 +254,7 @@ static void vub_event(void *opaque, QEMUChrEvent event)
> >>     case CHR_EVENT_CLOSED:
> >>         /* defer close until later to avoid circular close */
> >>         vhost_user_async_close(dev, &vub->chardev, &vub->vhost_dev,
> >> -                               vub_disconnect, vub_event);
> >> +                               vub_disconnect);
> >>         break;
> >>     case CHR_EVENT_BREAK:
> >>     case CHR_EVENT_MUX_IN:
> >> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> >> index cdf9af4a4b..c929097e87 100644
> >> --- a/hw/virtio/vhost-user.c
> >> +++ b/hw/virtio/vhost-user.c
> >> @@ -2776,7 +2776,6 @@ typedef struct {
> >>     DeviceState *dev;
> >>     CharBackend *cd;
> >>     struct vhost_dev *vhost;
> >> -    IOEventHandler *event_cb;
> >> } VhostAsyncCallback;
> >>
> >> static void vhost_user_async_close_bh(void *opaque)
> >> @@ -2791,10 +2790,7 @@ static void vhost_user_async_close_bh(void *opaque)
> >>      */
> >>     if (vhost->vdev) {
> >>         data->cb(data->dev);
> >> -    } else if (data->event_cb) {
> >> -        qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
> >> -                                 NULL, data->dev, NULL, true);
> >> -   }
> >> +    }
> >>
> >>     g_free(data);
> >> }
> >> @@ -2806,8 +2802,7 @@ static void vhost_user_async_close_bh(void *opaque)
> >>  */
> >> void vhost_user_async_close(DeviceState *d,
> >>                             CharBackend *chardev, struct vhost_dev *vhost,
> >> -                            vu_async_close_fn cb,
> >> -                            IOEventHandler *event_cb)
> >> +                            vu_async_close_fn cb)
> >> {
> >>     if (!runstate_check(RUN_STATE_SHUTDOWN)) {
> >>         /*
> >> @@ -2823,7 +2818,6 @@ void vhost_user_async_close(DeviceState *d,
> >>         data->dev = d;
> >>         data->cd = chardev;
> >>         data->vhost = vhost;
> >> -        data->event_cb = event_cb;
> >>
> >>         /* Disable any further notifications on the chardev */
> >>         qemu_chr_fe_set_handlers(chardev,
> >> diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h
> >> index d7c09ffd34..324cd8663a 100644
> >> --- a/include/hw/virtio/vhost-user.h
> >> +++ b/include/hw/virtio/vhost-user.h
> >> @@ -108,7 +108,6 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
> >>
> >> void vhost_user_async_close(DeviceState *d,
> >>                             CharBackend *chardev, struct vhost_dev *vhost,
> >> -                            vu_async_close_fn cb,
> >> -                            IOEventHandler *event_cb);
> >> +                            vu_async_close_fn cb);
> >>
> >> #endif
> >> --
> >> 2.45.0
> >>
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] vhost-user: fix lost reconnect again
  2024-05-15  5:47     ` Li Feng
@ 2024-05-15 15:47       ` Raphael Norwitz
  2024-05-16  2:48         ` Li Feng
  0 siblings, 1 reply; 10+ messages in thread
From: Raphael Norwitz @ 2024-05-15 15:47 UTC (permalink / raw)
  To: Li Feng
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

The case your describing makes sense but now I have some concerns on
the vhost_dev_cleanup bit.

On Wed, May 15, 2024 at 1:47 AM Li Feng <fengli@smartx.com> wrote:
>
>
>
> > 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
> >
> > Code looks good. Just a question on the error case you're trying to fix.
> >
> > On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
> >>
> >> When the vhost-user is reconnecting to the backend, and if the vhost-user fails
> >> at the get_features in vhost_dev_init(), then the reconnect will fail
> >> and it will not be retriggered forever.
> >>
> >> The reason is:
> >> When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
> >> immediately.
> >>
> >> vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
> >>
> >> The reconnect path is:
> >> vhost_user_blk_event
> >>   vhost_user_async_close(.. vhost_user_blk_disconnect ..)
> >>     qemu_chr_fe_set_handlers <----- clear the notifier callback
> >>       schedule vhost_user_async_close_bh
> >>
> >> The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
> >> called, then the event fd callback will not be reinstalled.
> >>
> >> With this patch, the vhost_user_blk_disconnect will call the
> >> vhost_dev_cleanup() again, it's safe.
> >>
> >> In addition, the CLOSE event may occur in a scenario where connected is false.
> >> At this time, the event handler will be cleared. We need to ensure that the
> >> event handler can remain installed.
> >
> > Following on from the prior patch, why would "connected" be false when
> > a CLOSE event happens?
>
> In OPEN event handling, vhost_user_blk_connect calls vhost_dev_init and encounters
> an error such that s->connected remains false.
> Next, after the CLOSE event arrives, it is found that s->connected is false, so nothing
> is done, but the event handler will be cleaned up in `vhost_user_async_close`
> before the CLOSE event is executed.
>

Got it - I see why the event handler is never re-installed in the code
as it was before if we fail at get_features. That said, how do you
explain your comment:

> >> With this patch, the vhost_user_blk_disconnect will call the
> >> vhost_dev_cleanup() again, it's safe.

I see vhost_dev_cleanup() accessing hdev without even a NULL check. In
the case we're talking about here I don't think it's a problem because
if vhost_dev_init() fails, connected will be false and hit the goto
but I am concerned that there could be double-frees or use-after-frees
in other cases.

> Thanks,
> Li
>
> >
> >>
> >> All vhost-user devices have this issue, including vhost-user-blk/scsi.
> >>
> >> Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
> >>
> >> Signed-off-by: Li Feng <fengli@smartx.com>
> >> ---
> >> hw/block/vhost-user-blk.c   |  3 ++-
> >> hw/scsi/vhost-user-scsi.c   |  3 ++-
> >> hw/virtio/vhost-user-base.c |  3 ++-
> >> hw/virtio/vhost-user.c      | 10 +---------
> >> 4 files changed, 7 insertions(+), 12 deletions(-)
> >>
> >> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> >> index 41d1ac3a5a..c6842ced48 100644
> >> --- a/hw/block/vhost-user-blk.c
> >> +++ b/hw/block/vhost-user-blk.c
> >> @@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
> >>     VHostUserBlk *s = VHOST_USER_BLK(vdev);
> >>
> >>     if (!s->connected) {
> >> -        return;
> >> +        goto done;
> >>     }
> >>     s->connected = false;
> >>
> >> @@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
> >>
> >>     vhost_dev_cleanup(&s->dev);
> >>
> >> +done:
> >>     /* Re-instate the event handler for new connections */
> >>     qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
> >>                              NULL, dev, NULL, true);
> >> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> >> index 48a59e020e..b49a11d23b 100644
> >> --- a/hw/scsi/vhost-user-scsi.c
> >> +++ b/hw/scsi/vhost-user-scsi.c
> >> @@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
> >>     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
> >>
> >>     if (!s->connected) {
> >> -        return;
> >> +        goto done;
> >>     }
> >>     s->connected = false;
> >>
> >> @@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
> >>
> >>     vhost_dev_cleanup(&vsc->dev);
> >>
> >> +done:
> >>     /* Re-instate the event handler for new connections */
> >>     qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
> >>                              vhost_user_scsi_event, NULL, dev, NULL, true);
> >> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> >> index 4b54255682..11e72b1e3b 100644
> >> --- a/hw/virtio/vhost-user-base.c
> >> +++ b/hw/virtio/vhost-user-base.c
> >> @@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
> >>     VHostUserBase *vub = VHOST_USER_BASE(vdev);
> >>
> >>     if (!vub->connected) {
> >> -        return;
> >> +        goto done;
> >>     }
> >>     vub->connected = false;
> >>
> >>     vub_stop(vdev);
> >>     vhost_dev_cleanup(&vub->vhost_dev);
> >>
> >> +done:
> >>     /* Re-instate the event handler for new connections */
> >>     qemu_chr_fe_set_handlers(&vub->chardev,
> >>                              NULL, NULL, vub_event,
> >> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> >> index c929097e87..c407ea8939 100644
> >> --- a/hw/virtio/vhost-user.c
> >> +++ b/hw/virtio/vhost-user.c
> >> @@ -2781,16 +2781,8 @@ typedef struct {
> >> static void vhost_user_async_close_bh(void *opaque)
> >> {
> >>     VhostAsyncCallback *data = opaque;
> >> -    struct vhost_dev *vhost = data->vhost;
> >>
> >> -    /*
> >> -     * If the vhost_dev has been cleared in the meantime there is
> >> -     * nothing left to do as some other path has completed the
> >> -     * cleanup.
> >> -     */
> >> -    if (vhost->vdev) {
> >> -        data->cb(data->dev);
> >> -    }
> >> +    data->cb(data->dev);
> >>
> >>     g_free(data);
> >> }
> >> --
> >> 2.45.0
> >>
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] vhost-user: fix lost reconnect again
  2024-05-15 15:47       ` Raphael Norwitz
@ 2024-05-16  2:48         ` Li Feng
  2024-05-16  4:19           ` Raphael Norwitz
  0 siblings, 1 reply; 10+ messages in thread
From: Li Feng @ 2024-05-16  2:48 UTC (permalink / raw)
  To: Raphael Norwitz
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

[-- Attachment #1: Type: text/plain, Size: 7494 bytes --]



> 2024年5月15日 23:47,Raphael Norwitz <raphael@enfabrica.net> 写道:
> 
> The case your describing makes sense but now I have some concerns on
> the vhost_dev_cleanup bit.
> 
> On Wed, May 15, 2024 at 1:47 AM Li Feng <fengli@smartx.com <mailto:fengli@smartx.com>> wrote:
>> 
>> 
>> 
>>> 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
>>> 
>>> Code looks good. Just a question on the error case you're trying to fix.
>>> 
>>> On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>>>> 
>>>> When the vhost-user is reconnecting to the backend, and if the vhost-user fails
>>>> at the get_features in vhost_dev_init(), then the reconnect will fail
>>>> and it will not be retriggered forever.
>>>> 
>>>> The reason is:
>>>> When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
>>>> immediately.
>>>> 
>>>> vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
>>>> 
>>>> The reconnect path is:
>>>> vhost_user_blk_event
>>>>  vhost_user_async_close(.. vhost_user_blk_disconnect ..)
>>>>    qemu_chr_fe_set_handlers <----- clear the notifier callback
>>>>      schedule vhost_user_async_close_bh
>>>> 
>>>> The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
>>>> called, then the event fd callback will not be reinstalled.
>>>> 
>>>> With this patch, the vhost_user_blk_disconnect will call the
>>>> vhost_dev_cleanup() again, it's safe.
>>>> 
>>>> In addition, the CLOSE event may occur in a scenario where connected is false.
>>>> At this time, the event handler will be cleared. We need to ensure that the
>>>> event handler can remain installed.
>>> 
>>> Following on from the prior patch, why would "connected" be false when
>>> a CLOSE event happens?
>> 
>> In OPEN event handling, vhost_user_blk_connect calls vhost_dev_init and encounters
>> an error such that s->connected remains false.
>> Next, after the CLOSE event arrives, it is found that s->connected is false, so nothing
>> is done, but the event handler will be cleaned up in `vhost_user_async_close`
>> before the CLOSE event is executed.
>> 
> 
> Got it - I see why the event handler is never re-installed in the code
> as it was before if we fail at get_features. That said, how do you
> explain your comment:

OK, I will update the commit message because this code has changed some months ago.

> 
>>>> With this patch, the vhost_user_blk_disconnect will call the
>>>> vhost_dev_cleanup() again, it's safe.
> 
> I see vhost_dev_cleanup() accessing hdev without even a NULL check. In
> the case we're talking about here I don't think it's a problem because
> if vhost_dev_init() fails, connected will be false and hit the goto
> but I am concerned that there could be double-frees or use-after-frees
> in other cases.

OK, you are right, with this patch, the vhost_dev_cleanup will not be
called multiple times now.

I think there is no need to worry about calling vhost_dev_cleanup multiple times,
because historically vhost_dev_cleanup has been allowed to be called multiple
times, and looking at the code, it can be found that calling vhost_dev_cleanup
multiple times is indeed safe.

Look this patch:

commit e0547b59dc0ead4c605d3f02d1c8829630a1311b
Author: Marc-André Lureau <marcandre.lureau@redhat.com>
Date:   Wed Jul 27 01:15:02 2016 +0400

    vhost: make vhost_dev_cleanup() idempotent

    It is called on multiple code path, so make it safe to call several
    times (note: I don't remember a reproducer here, but a function called
    'cleanup' should probably be idempotent in my book)

    Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
    Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
    Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

Thanks,
Li

> 
>> Thanks,
>> Li
>> 
>>> 
>>>> 
>>>> All vhost-user devices have this issue, including vhost-user-blk/scsi.
>>>> 
>>>> Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
>>>> 
>>>> Signed-off-by: Li Feng <fengli@smartx.com>
>>>> ---
>>>> hw/block/vhost-user-blk.c   |  3 ++-
>>>> hw/scsi/vhost-user-scsi.c   |  3 ++-
>>>> hw/virtio/vhost-user-base.c |  3 ++-
>>>> hw/virtio/vhost-user.c      | 10 +---------
>>>> 4 files changed, 7 insertions(+), 12 deletions(-)
>>>> 
>>>> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
>>>> index 41d1ac3a5a..c6842ced48 100644
>>>> --- a/hw/block/vhost-user-blk.c
>>>> +++ b/hw/block/vhost-user-blk.c
>>>> @@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>>>>    VHostUserBlk *s = VHOST_USER_BLK(vdev);
>>>> 
>>>>    if (!s->connected) {
>>>> -        return;
>>>> +        goto done;
>>>>    }
>>>>    s->connected = false;
>>>> 
>>>> @@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>>>> 
>>>>    vhost_dev_cleanup(&s->dev);
>>>> 
>>>> +done:
>>>>    /* Re-instate the event handler for new connections */
>>>>    qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
>>>>                             NULL, dev, NULL, true);
>>>> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
>>>> index 48a59e020e..b49a11d23b 100644
>>>> --- a/hw/scsi/vhost-user-scsi.c
>>>> +++ b/hw/scsi/vhost-user-scsi.c
>>>> @@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>>>>    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>>>> 
>>>>    if (!s->connected) {
>>>> -        return;
>>>> +        goto done;
>>>>    }
>>>>    s->connected = false;
>>>> 
>>>> @@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>>>> 
>>>>    vhost_dev_cleanup(&vsc->dev);
>>>> 
>>>> +done:
>>>>    /* Re-instate the event handler for new connections */
>>>>    qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
>>>>                             vhost_user_scsi_event, NULL, dev, NULL, true);
>>>> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
>>>> index 4b54255682..11e72b1e3b 100644
>>>> --- a/hw/virtio/vhost-user-base.c
>>>> +++ b/hw/virtio/vhost-user-base.c
>>>> @@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
>>>>    VHostUserBase *vub = VHOST_USER_BASE(vdev);
>>>> 
>>>>    if (!vub->connected) {
>>>> -        return;
>>>> +        goto done;
>>>>    }
>>>>    vub->connected = false;
>>>> 
>>>>    vub_stop(vdev);
>>>>    vhost_dev_cleanup(&vub->vhost_dev);
>>>> 
>>>> +done:
>>>>    /* Re-instate the event handler for new connections */
>>>>    qemu_chr_fe_set_handlers(&vub->chardev,
>>>>                             NULL, NULL, vub_event,
>>>> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
>>>> index c929097e87..c407ea8939 100644
>>>> --- a/hw/virtio/vhost-user.c
>>>> +++ b/hw/virtio/vhost-user.c
>>>> @@ -2781,16 +2781,8 @@ typedef struct {
>>>> static void vhost_user_async_close_bh(void *opaque)
>>>> {
>>>>    VhostAsyncCallback *data = opaque;
>>>> -    struct vhost_dev *vhost = data->vhost;
>>>> 
>>>> -    /*
>>>> -     * If the vhost_dev has been cleared in the meantime there is
>>>> -     * nothing left to do as some other path has completed the
>>>> -     * cleanup.
>>>> -     */
>>>> -    if (vhost->vdev) {
>>>> -        data->cb(data->dev);
>>>> -    }
>>>> +    data->cb(data->dev);
>>>> 
>>>>    g_free(data);
>>>> }
>>>> --
>>>> 2.45.0


[-- Attachment #2: Type: text/html, Size: 20619 bytes --]

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v3 2/2] vhost-user: fix lost reconnect again
  2024-05-16  2:48         ` Li Feng
@ 2024-05-16  4:19           ` Raphael Norwitz
  0 siblings, 0 replies; 10+ messages in thread
From: Raphael Norwitz @ 2024-05-16  4:19 UTC (permalink / raw)
  To: Li Feng
  Cc: Michael S. Tsirkin, Kevin Wolf, Hanna Reitz, Paolo Bonzini,
	Fam Zheng, Alex Bennée, open list:Block layer core,
	open list:All patches CC here, Yajun Wu, Stefano Garzarella

OK - I'm happy with this approach then.

On Wed, May 15, 2024 at 10:48 PM Li Feng <fengli@smartx.com> wrote:
>
>
>
> 2024年5月15日 23:47,Raphael Norwitz <raphael@enfabrica.net> 写道:
>
> The case your describing makes sense but now I have some concerns on
> the vhost_dev_cleanup bit.
>
> On Wed, May 15, 2024 at 1:47 AM Li Feng <fengli@smartx.com> wrote:
>
>
>
>
> 2024年5月14日 21:58,Raphael Norwitz <raphael@enfabrica.net> 写道:
>
> Code looks good. Just a question on the error case you're trying to fix.
>
> On Tue, May 14, 2024 at 2:12 AM Li Feng <fengli@smartx.com> wrote:
>
>
> When the vhost-user is reconnecting to the backend, and if the vhost-user fails
> at the get_features in vhost_dev_init(), then the reconnect will fail
> and it will not be retriggered forever.
>
> The reason is:
> When the vhost-user fail at get_features, the vhost_dev_cleanup will be called
> immediately.
>
> vhost_dev_cleanup calls 'memset(hdev, 0, sizeof(struct vhost_dev))'.
>
> The reconnect path is:
> vhost_user_blk_event
>  vhost_user_async_close(.. vhost_user_blk_disconnect ..)
>    qemu_chr_fe_set_handlers <----- clear the notifier callback
>      schedule vhost_user_async_close_bh
>
> The vhost->vdev is null, so the vhost_user_blk_disconnect will not be
> called, then the event fd callback will not be reinstalled.
>
> With this patch, the vhost_user_blk_disconnect will call the
> vhost_dev_cleanup() again, it's safe.
>
> In addition, the CLOSE event may occur in a scenario where connected is false.
> At this time, the event handler will be cleared. We need to ensure that the
> event handler can remain installed.
>
>
> Following on from the prior patch, why would "connected" be false when
> a CLOSE event happens?
>
>
> In OPEN event handling, vhost_user_blk_connect calls vhost_dev_init and encounters
> an error such that s->connected remains false.
> Next, after the CLOSE event arrives, it is found that s->connected is false, so nothing
> is done, but the event handler will be cleaned up in `vhost_user_async_close`
> before the CLOSE event is executed.
>
>
> Got it - I see why the event handler is never re-installed in the code
> as it was before if we fail at get_features. That said, how do you
> explain your comment:
>
>
> OK, I will update the commit message because this code has changed some months ago.
>
>
> With this patch, the vhost_user_blk_disconnect will call the
> vhost_dev_cleanup() again, it's safe.
>
>
> I see vhost_dev_cleanup() accessing hdev without even a NULL check. In
> the case we're talking about here I don't think it's a problem because
> if vhost_dev_init() fails, connected will be false and hit the goto
> but I am concerned that there could be double-frees or use-after-frees
> in other cases.
>
>
> OK, you are right, with this patch, the vhost_dev_cleanup will not be
> called multiple times now.
>
> I think there is no need to worry about calling vhost_dev_cleanup multiple times,
> because historically vhost_dev_cleanup has been allowed to be called multiple
> times, and looking at the code, it can be found that calling vhost_dev_cleanup
> multiple times is indeed safe.
>
> Look this patch:
>
> commit e0547b59dc0ead4c605d3f02d1c8829630a1311b
> Author: Marc-André Lureau <marcandre.lureau@redhat.com>
> Date:   Wed Jul 27 01:15:02 2016 +0400
>
>     vhost: make vhost_dev_cleanup() idempotent
>
>     It is called on multiple code path, so make it safe to call several
>     times (note: I don't remember a reproducer here, but a function called
>     'cleanup' should probably be idempotent in my book)
>
>     Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
>     Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
>     Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
>
> Thanks,
> Li
>
>
> Thanks,
> Li
>
>
>
> All vhost-user devices have this issue, including vhost-user-blk/scsi.
>
> Fixes: 71e076a07d ("hw/virtio: generalise CHR_EVENT_CLOSED handling")
>
> Signed-off-by: Li Feng <fengli@smartx.com>
> ---
> hw/block/vhost-user-blk.c   |  3 ++-
> hw/scsi/vhost-user-scsi.c   |  3 ++-
> hw/virtio/vhost-user-base.c |  3 ++-
> hw/virtio/vhost-user.c      | 10 +---------
> 4 files changed, 7 insertions(+), 12 deletions(-)
>
> diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c
> index 41d1ac3a5a..c6842ced48 100644
> --- a/hw/block/vhost-user-blk.c
> +++ b/hw/block/vhost-user-blk.c
> @@ -353,7 +353,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>    VHostUserBlk *s = VHOST_USER_BLK(vdev);
>
>    if (!s->connected) {
> -        return;
> +        goto done;
>    }
>    s->connected = false;
>
> @@ -361,6 +361,7 @@ static void vhost_user_blk_disconnect(DeviceState *dev)
>
>    vhost_dev_cleanup(&s->dev);
>
> +done:
>    /* Re-instate the event handler for new connections */
>    qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
>                             NULL, dev, NULL, true);
> diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
> index 48a59e020e..b49a11d23b 100644
> --- a/hw/scsi/vhost-user-scsi.c
> +++ b/hw/scsi/vhost-user-scsi.c
> @@ -181,7 +181,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>    VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>
>    if (!s->connected) {
> -        return;
> +        goto done;
>    }
>    s->connected = false;
>
> @@ -189,6 +189,7 @@ static void vhost_user_scsi_disconnect(DeviceState *dev)
>
>    vhost_dev_cleanup(&vsc->dev);
>
> +done:
>    /* Re-instate the event handler for new connections */
>    qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
>                             vhost_user_scsi_event, NULL, dev, NULL, true);
> diff --git a/hw/virtio/vhost-user-base.c b/hw/virtio/vhost-user-base.c
> index 4b54255682..11e72b1e3b 100644
> --- a/hw/virtio/vhost-user-base.c
> +++ b/hw/virtio/vhost-user-base.c
> @@ -225,13 +225,14 @@ static void vub_disconnect(DeviceState *dev)
>    VHostUserBase *vub = VHOST_USER_BASE(vdev);
>
>    if (!vub->connected) {
> -        return;
> +        goto done;
>    }
>    vub->connected = false;
>
>    vub_stop(vdev);
>    vhost_dev_cleanup(&vub->vhost_dev);
>
> +done:
>    /* Re-instate the event handler for new connections */
>    qemu_chr_fe_set_handlers(&vub->chardev,
>                             NULL, NULL, vub_event,
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index c929097e87..c407ea8939 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -2781,16 +2781,8 @@ typedef struct {
> static void vhost_user_async_close_bh(void *opaque)
> {
>    VhostAsyncCallback *data = opaque;
> -    struct vhost_dev *vhost = data->vhost;
>
> -    /*
> -     * If the vhost_dev has been cleared in the meantime there is
> -     * nothing left to do as some other path has completed the
> -     * cleanup.
> -     */
> -    if (vhost->vdev) {
> -        data->cb(data->dev);
> -    }
> +    data->cb(data->dev);
>
>    g_free(data);
> }
> --
> 2.45.0
>
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2024-05-16  4:20 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20240514061239.86461-1-fengli@smartx.com>
2024-05-14  6:12 ` [PATCH v3 1/2] Revert "vhost-user: fix lost reconnect" Li Feng
2024-05-14 13:58   ` Raphael Norwitz
2024-05-15  5:46     ` Li Feng
2024-05-15 15:47       ` Raphael Norwitz
2024-05-14  6:12 ` [PATCH v3 2/2] vhost-user: fix lost reconnect again Li Feng
2024-05-14 13:58   ` Raphael Norwitz
2024-05-15  5:47     ` Li Feng
2024-05-15 15:47       ` Raphael Norwitz
2024-05-16  2:48         ` Li Feng
2024-05-16  4:19           ` Raphael Norwitz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).