All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC v3] virtio/vsock: add two more queues for datagram types
@ 2021-07-06 22:26 Jiang Wang
  2021-07-07  8:33 ` Stefano Garzarella
  0 siblings, 1 reply; 11+ messages in thread
From: Jiang Wang @ 2021-07-06 22:26 UTC (permalink / raw)
  To: mst, qemu-devel; +Cc: jasowang, arseny.krasnov, stefanha, sgarzare

Datagram sockets are connectionless and unreliable.
The sender does not know the capacity of the receiver
and may send more packets than the receiver can handle.

Add two more dedicate virtqueues for datagram sockets,
so that it will not unfairly steal resources from
stream and future connection-oriented sockets.
---
v1 -> v2: use qemu cmd option to control number of queues,
        removed configuration settings for dgram.
v2 -> v3: use ioctl to get features and decie numbr of
       virt queues, instead of qemu cmd option.

btw: this patch is based on Arseny's SEQPACKET patch.

 hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
 hw/virtio/vhost-vsock.c                       |  3 ++
 include/hw/virtio/vhost-vsock-common.h        |  3 +-
 include/hw/virtio/vhost-vsock.h               |  4 ++
 include/standard-headers/linux/virtio_vsock.h |  3 ++
 5 files changed, 63 insertions(+), 3 deletions(-)

diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 4ad6e234ad..8164e09445 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -17,6 +17,8 @@
 #include "hw/virtio/vhost-vsock.h"
 #include "qemu/iov.h"
 #include "monitor/monitor.h"
+#include <sys/ioctl.h>
+#include <linux/vhost.h>
 
 int vhost_vsock_common_start(VirtIODevice *vdev)
 {
@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
     return 0;
 }
 
+static int vhost_vsock_get_max_qps(void)
+{
+    uint64_t features;
+    int ret;
+    int fd = -1;
+
+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
+    if (fd == -1) {
+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
+        return -1;
+    }
+
+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
+    if (ret) {
+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
+        qemu_close(fd);
+        return ret;
+    }
+
+    qemu_close(fd);
+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
+        return MAX_VQS_WITH_DGRAM;
+
+    return MAX_VQS_WITHOUT_DGRAM;
+}
+
 void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
 
     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
                 sizeof(struct virtio_vsock_config));
@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
+    nvqs = vhost_vsock_get_max_qps();
+
+    if (nvqs < 0)
+        nvqs = MAX_VQS_WITHOUT_DGRAM;
+
+    if (nvqs == MAX_VQS_WITH_DGRAM) {
+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                              vhost_vsock_common_handle_output);
+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                               vhost_vsock_common_handle_output);
+    }
+
     /* The event queue belongs to QEMU */
     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
+    vvc->vhost_dev.nvqs = nvqs;
+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
 
     vvc->post_load_timer = NULL;
 }
@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
 
     virtio_delete_queue(vvc->recv_vq);
     virtio_delete_queue(vvc->trans_vq);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
+        virtio_delete_queue(vvc->dgram_recv_vq);
+        virtio_delete_queue(vvc->dgram_trans_vq);
+    }
+
+    if (vvc->vhost_dev.vqs)
+        g_free(vvc->vhost_dev.vqs);
+
     virtio_delete_queue(vvc->event_vq);
     virtio_cleanup(vdev);
 }
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 1b1a5c70ed..33bbe16983 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -23,6 +23,7 @@
 
 const int feature_bits[] = {
     VIRTIO_VSOCK_F_SEQPACKET,
+    VIRTIO_VSOCK_F_DGRAM,
     VHOST_INVALID_FEATURE_BIT
 };
 
@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
 
     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
     return vhost_get_features(&vvc->vhost_dev, feature_bits,
                                 requested_features);
 }
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
index e412b5ee98..798715241f 100644
--- a/include/hw/virtio/vhost-vsock-common.h
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -27,12 +27,13 @@ enum {
 struct VHostVSockCommon {
     VirtIODevice parent;
 
-    struct vhost_virtqueue vhost_vqs[2];
     struct vhost_dev vhost_dev;
 
     VirtQueue *event_vq;
     VirtQueue *recv_vq;
     VirtQueue *trans_vq;
+    VirtQueue *dgram_recv_vq;
+    VirtQueue *dgram_trans_vq;
 
     QEMUTimer *post_load_timer;
 };
diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
index 84f4e727c7..e10319785d 100644
--- a/include/hw/virtio/vhost-vsock.h
+++ b/include/hw/virtio/vhost-vsock.h
@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
 typedef struct {
     uint64_t guest_cid;
     char *vhostfd;
+    bool enable_dgram;
 } VHostVSockConf;
 
 struct VHostVSock {
@@ -33,4 +34,7 @@ struct VHostVSock {
     /*< public >*/
 };
 
+#define MAX_VQS_WITHOUT_DGRAM 2
+#define MAX_VQS_WITH_DGRAM 4
+
 #endif /* QEMU_VHOST_VSOCK_H */
diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
index 5eac522ee2..6ff8c5084c 100644
--- a/include/standard-headers/linux/virtio_vsock.h
+++ b/include/standard-headers/linux/virtio_vsock.h
@@ -41,6 +41,9 @@
 /* The feature bitmap for virtio vsock */
 #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
 
+/* Feature bits */
+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
+
 struct virtio_vsock_config {
 	uint64_t guest_cid;
 } QEMU_PACKED;
-- 
2.11.0



^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-07-06 22:26 [RFC v3] virtio/vsock: add two more queues for datagram types Jiang Wang
@ 2021-07-07  8:33 ` Stefano Garzarella
  2021-07-07 16:52   ` Jiang Wang .
  0 siblings, 1 reply; 11+ messages in thread
From: Stefano Garzarella @ 2021-07-07  8:33 UTC (permalink / raw)
  To: Jiang Wang; +Cc: arseny.krasnov, jasowang, qemu-devel, stefanha, mst

On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>Datagram sockets are connectionless and unreliable.
>The sender does not know the capacity of the receiver
>and may send more packets than the receiver can handle.
>
>Add two more dedicate virtqueues for datagram sockets,
>so that it will not unfairly steal resources from
>stream and future connection-oriented sockets.
>---
>v1 -> v2: use qemu cmd option to control number of queues,
>        removed configuration settings for dgram.
>v2 -> v3: use ioctl to get features and decie numbr of
>       virt queues, instead of qemu cmd option.
>
>btw: this patch is based on Arseny's SEQPACKET patch.
>
> hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> hw/virtio/vhost-vsock.c                       |  3 ++
> include/hw/virtio/vhost-vsock-common.h        |  3 +-
> include/hw/virtio/vhost-vsock.h               |  4 ++
> include/standard-headers/linux/virtio_vsock.h |  3 ++
> 5 files changed, 63 insertions(+), 3 deletions(-)
>
>diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>index 4ad6e234ad..8164e09445 100644
>--- a/hw/virtio/vhost-vsock-common.c
>+++ b/hw/virtio/vhost-vsock-common.c
>@@ -17,6 +17,8 @@
> #include "hw/virtio/vhost-vsock.h"
> #include "qemu/iov.h"
> #include "monitor/monitor.h"
>+#include <sys/ioctl.h>
>+#include <linux/vhost.h>
>
> int vhost_vsock_common_start(VirtIODevice *vdev)
> {
>@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>     return 0;
> }
>
>+static int vhost_vsock_get_max_qps(void)
>+{
>+    uint64_t features;
>+    int ret;
>+    int fd = -1;
>+
>+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>+    if (fd == -1) {
>+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>+        return -1;
>+    }

You should use the `vhostfd` already opened in 
vhost_vsock_device_realize(), since QEMU may not have permission to 
access to the device, and the file descriptor can be passed from the 
management layer.

>+
>+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>+    if (ret) {
>+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>+        qemu_close(fd);
>+        return ret;
>+    }
>+
>+    qemu_close(fd);
>+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>+        return MAX_VQS_WITH_DGRAM;
>+
>+    return MAX_VQS_WITHOUT_DGRAM;
>+}
>+
> void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> {
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>
>     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>                 sizeof(struct virtio_vsock_config));
>@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);
>
>+    nvqs = vhost_vsock_get_max_qps();

You can't do this here, since the vhost-vsock-common.c functions are 
used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock 
device since the device is emulated in a separate user process.

I think you can use something similar to what you did in v2, where you 
passed a parameter to vhost_vsock_common_realize() to enable or not the 
datagram queues.

>+
>+    if (nvqs < 0)
>+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>+
>+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                              vhost_vsock_common_handle_output);
>+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                               vhost_vsock_common_handle_output);
>+    }
>+
>     /* The event queue belongs to QEMU */
>     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);

Did you do a test with a guest that doesn't support datagram with QEMU 
and hosts that do?

I repost my thoughts that I had on v2:

     What happen if the guest doesn't support dgram?

     I think we should dynamically use the 3rd queue or the 5th queue for 
     the events at runtime after the guest acked the features.

     Maybe better to switch to an array of VirtQueue.

>
>-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
>-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
>+    vvc->vhost_dev.nvqs = nvqs;
>+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
>
>     vvc->post_load_timer = NULL;
> }
>@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
>
>     virtio_delete_queue(vvc->recv_vq);
>     virtio_delete_queue(vvc->trans_vq);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
>+        virtio_delete_queue(vvc->dgram_recv_vq);
>+        virtio_delete_queue(vvc->dgram_trans_vq);
>+    }
>+
>+    if (vvc->vhost_dev.vqs)

g_free() already handles NULL pointers, so you can remove this check.

>+        g_free(vvc->vhost_dev.vqs);
>+
>     virtio_delete_queue(vvc->event_vq);
>     virtio_cleanup(vdev);
> }
>diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
>index 1b1a5c70ed..33bbe16983 100644
>--- a/hw/virtio/vhost-vsock.c
>+++ b/hw/virtio/vhost-vsock.c
>@@ -23,6 +23,7 @@
>
> const int feature_bits[] = {
>     VIRTIO_VSOCK_F_SEQPACKET,
>+    VIRTIO_VSOCK_F_DGRAM,
>     VHOST_INVALID_FEATURE_BIT
> };
>
>@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>
>     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
>+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
>     return vhost_get_features(&vvc->vhost_dev, feature_bits,
>                                 requested_features);
> }
>diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
>index e412b5ee98..798715241f 100644
>--- a/include/hw/virtio/vhost-vsock-common.h
>+++ b/include/hw/virtio/vhost-vsock-common.h
>@@ -27,12 +27,13 @@ enum {
> struct VHostVSockCommon {
>     VirtIODevice parent;
>
>-    struct vhost_virtqueue vhost_vqs[2];
>     struct vhost_dev vhost_dev;
>
>     VirtQueue *event_vq;
>     VirtQueue *recv_vq;
>     VirtQueue *trans_vq;
>+    VirtQueue *dgram_recv_vq;
>+    VirtQueue *dgram_trans_vq;
>
>     QEMUTimer *post_load_timer;
> };
>diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
>index 84f4e727c7..e10319785d 100644
>--- a/include/hw/virtio/vhost-vsock.h
>+++ b/include/hw/virtio/vhost-vsock.h
>@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> typedef struct {
>     uint64_t guest_cid;
>     char *vhostfd;
>+    bool enable_dgram;

Leftover?

> } VHostVSockConf;
>
> struct VHostVSock {
>@@ -33,4 +34,7 @@ struct VHostVSock {
>     /*< public >*/
> };
>
>+#define MAX_VQS_WITHOUT_DGRAM 2
>+#define MAX_VQS_WITH_DGRAM 4
>+
> #endif /* QEMU_VHOST_VSOCK_H */
>diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
>index 5eac522ee2..6ff8c5084c 100644
>--- a/include/standard-headers/linux/virtio_vsock.h
>+++ b/include/standard-headers/linux/virtio_vsock.h
>@@ -41,6 +41,9 @@
> /* The feature bitmap for virtio vsock */
> #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
>
>+/* Feature bits */
>+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */

Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you 
used bit 2 for DGRAM.

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-07-07  8:33 ` Stefano Garzarella
@ 2021-07-07 16:52   ` Jiang Wang .
  2021-07-07 17:27     ` Stefano Garzarella
  0 siblings, 1 reply; 11+ messages in thread
From: Jiang Wang . @ 2021-07-07 16:52 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >Datagram sockets are connectionless and unreliable.
> >The sender does not know the capacity of the receiver
> >and may send more packets than the receiver can handle.
> >
> >Add two more dedicate virtqueues for datagram sockets,
> >so that it will not unfairly steal resources from
> >stream and future connection-oriented sockets.
> >---
> >v1 -> v2: use qemu cmd option to control number of queues,
> >        removed configuration settings for dgram.
> >v2 -> v3: use ioctl to get features and decie numbr of
> >       virt queues, instead of qemu cmd option.
> >
> >btw: this patch is based on Arseny's SEQPACKET patch.
> >
> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> > hw/virtio/vhost-vsock.c                       |  3 ++
> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
> > include/hw/virtio/vhost-vsock.h               |  4 ++
> > include/standard-headers/linux/virtio_vsock.h |  3 ++
> > 5 files changed, 63 insertions(+), 3 deletions(-)
> >
> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >index 4ad6e234ad..8164e09445 100644
> >--- a/hw/virtio/vhost-vsock-common.c
> >+++ b/hw/virtio/vhost-vsock-common.c
> >@@ -17,6 +17,8 @@
> > #include "hw/virtio/vhost-vsock.h"
> > #include "qemu/iov.h"
> > #include "monitor/monitor.h"
> >+#include <sys/ioctl.h>
> >+#include <linux/vhost.h>
> >
> > int vhost_vsock_common_start(VirtIODevice *vdev)
> > {
> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >     return 0;
> > }
> >
> >+static int vhost_vsock_get_max_qps(void)
> >+{
> >+    uint64_t features;
> >+    int ret;
> >+    int fd = -1;
> >+
> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >+    if (fd == -1) {
> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >+        return -1;
> >+    }
>
> You should use the `vhostfd` already opened in
> vhost_vsock_device_realize(), since QEMU may not have permission to
> access to the device, and the file descriptor can be passed from the
> management layer.
>
Sure. Will do.

> >+
> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >+    if (ret) {
> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >+        qemu_close(fd);
> >+        return ret;
> >+    }
> >+
> >+    qemu_close(fd);
> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >+        return MAX_VQS_WITH_DGRAM;
> >+
> >+    return MAX_VQS_WITHOUT_DGRAM;
> >+}
> >+
> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> > {
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >
> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >                 sizeof(struct virtio_vsock_config));
> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
> >
> >+    nvqs = vhost_vsock_get_max_qps();
>
> You can't do this here, since the vhost-vsock-common.c functions are
> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
> device since the device is emulated in a separate user process.
>
> I think you can use something similar to what you did in v2, where you
> passed a parameter to vhost_vsock_common_realize() to enable or not the
> datagram queues.
>
Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
the qemu parameter?

> >+
> >+    if (nvqs < 0)
> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >+
> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                              vhost_vsock_common_handle_output);
> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                               vhost_vsock_common_handle_output);
> >+    }
> >+
> >     /* The event queue belongs to QEMU */
> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
>
> Did you do a test with a guest that doesn't support datagram with QEMU
> and hosts that do?
>
Yes, and it works.

> I repost my thoughts that I had on v2:
>
>      What happen if the guest doesn't support dgram?
>
>      I think we should dynamically use the 3rd queue or the 5th queue for
>      the events at runtime after the guest acked the features.
>
>      Maybe better to switch to an array of VirtQueue.
>
I think in current V3, it  already dynamically use 3rd or 5th queue depending
on the feature bit.

> >
> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >+    vvc->vhost_dev.nvqs = nvqs;
> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >
> >     vvc->post_load_timer = NULL;
> > }
> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >
> >     virtio_delete_queue(vvc->recv_vq);
> >     virtio_delete_queue(vvc->trans_vq);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >+    }
> >+
> >+    if (vvc->vhost_dev.vqs)
>
> g_free() already handles NULL pointers, so you can remove this check.
>
Got it.

> >+        g_free(vvc->vhost_dev.vqs);
> >+
> >     virtio_delete_queue(vvc->event_vq);
> >     virtio_cleanup(vdev);
> > }
> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >index 1b1a5c70ed..33bbe16983 100644
> >--- a/hw/virtio/vhost-vsock.c
> >+++ b/hw/virtio/vhost-vsock.c
> >@@ -23,6 +23,7 @@
> >
> > const int feature_bits[] = {
> >     VIRTIO_VSOCK_F_SEQPACKET,
> >+    VIRTIO_VSOCK_F_DGRAM,
> >     VHOST_INVALID_FEATURE_BIT
> > };
> >
> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >
> >     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >                                 requested_features);
> > }
> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >index e412b5ee98..798715241f 100644
> >--- a/include/hw/virtio/vhost-vsock-common.h
> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >@@ -27,12 +27,13 @@ enum {
> > struct VHostVSockCommon {
> >     VirtIODevice parent;
> >
> >-    struct vhost_virtqueue vhost_vqs[2];
> >     struct vhost_dev vhost_dev;
> >
> >     VirtQueue *event_vq;
> >     VirtQueue *recv_vq;
> >     VirtQueue *trans_vq;
> >+    VirtQueue *dgram_recv_vq;
> >+    VirtQueue *dgram_trans_vq;
> >
> >     QEMUTimer *post_load_timer;
> > };
> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >index 84f4e727c7..e10319785d 100644
> >--- a/include/hw/virtio/vhost-vsock.h
> >+++ b/include/hw/virtio/vhost-vsock.h
> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> > typedef struct {
> >     uint64_t guest_cid;
> >     char *vhostfd;
> >+    bool enable_dgram;
>
> Leftover?
>
Right, but to support vhost-vsock-user, I think I will add this parameter back?

> > } VHostVSockConf;
> >
> > struct VHostVSock {
> >@@ -33,4 +34,7 @@ struct VHostVSock {
> >     /*< public >*/
> > };
> >
> >+#define MAX_VQS_WITHOUT_DGRAM 2
> >+#define MAX_VQS_WITH_DGRAM 4
> >+
> > #endif /* QEMU_VHOST_VSOCK_H */
> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
> >index 5eac522ee2..6ff8c5084c 100644
> >--- a/include/standard-headers/linux/virtio_vsock.h
> >+++ b/include/standard-headers/linux/virtio_vsock.h
> >@@ -41,6 +41,9 @@
> > /* The feature bitmap for virtio vsock */
> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET supported */
> >
> >+/* Feature bits */
> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
>
> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
> used bit 2 for DGRAM.
>
My bad, I did not update the code here yet.

> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-07-07 16:52   ` Jiang Wang .
@ 2021-07-07 17:27     ` Stefano Garzarella
  2021-07-07 17:36       ` Jiang Wang .
  2021-08-03 18:58       ` Jiang Wang .
  0 siblings, 2 replies; 11+ messages in thread
From: Stefano Garzarella @ 2021-07-07 17:27 UTC (permalink / raw)
  To: Jiang Wang .
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
>On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>>
>> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>> >Datagram sockets are connectionless and unreliable.
>> >The sender does not know the capacity of the receiver
>> >and may send more packets than the receiver can handle.
>> >
>> >Add two more dedicate virtqueues for datagram sockets,
>> >so that it will not unfairly steal resources from
>> >stream and future connection-oriented sockets.
>> >---
>> >v1 -> v2: use qemu cmd option to control number of queues,
>> >        removed configuration settings for dgram.
>> >v2 -> v3: use ioctl to get features and decie numbr of
>> >       virt queues, instead of qemu cmd option.
>> >
>> >btw: this patch is based on Arseny's SEQPACKET patch.
>> >
>> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
>> > hw/virtio/vhost-vsock.c                       |  3 ++
>> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
>> > include/hw/virtio/vhost-vsock.h               |  4 ++
>> > include/standard-headers/linux/virtio_vsock.h |  3 ++
>> > 5 files changed, 63 insertions(+), 3 deletions(-)
>> >
>> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>> >index 4ad6e234ad..8164e09445 100644
>> >--- a/hw/virtio/vhost-vsock-common.c
>> >+++ b/hw/virtio/vhost-vsock-common.c
>> >@@ -17,6 +17,8 @@
>> > #include "hw/virtio/vhost-vsock.h"
>> > #include "qemu/iov.h"
>> > #include "monitor/monitor.h"
>> >+#include <sys/ioctl.h>
>> >+#include <linux/vhost.h>
>> >
>> > int vhost_vsock_common_start(VirtIODevice *vdev)
>> > {
>> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>> >     return 0;
>> > }
>> >
>> >+static int vhost_vsock_get_max_qps(void)
>> >+{
>> >+    uint64_t features;
>> >+    int ret;
>> >+    int fd = -1;
>> >+
>> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>> >+    if (fd == -1) {
>> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>> >+        return -1;
>> >+    }
>>
>> You should use the `vhostfd` already opened in
>> vhost_vsock_device_realize(), since QEMU may not have permission to
>> access to the device, and the file descriptor can be passed from the
>> management layer.
>>
>Sure. Will do.
>
>> >+
>> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>> >+    if (ret) {
>> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>> >+        qemu_close(fd);
>> >+        return ret;
>> >+    }
>> >+
>> >+    qemu_close(fd);
>> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>> >+        return MAX_VQS_WITH_DGRAM;
>> >+
>> >+    return MAX_VQS_WITHOUT_DGRAM;
>> >+}
>> >+
>> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>> > {
>> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >
>> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>> >                 sizeof(struct virtio_vsock_config));
>> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice 
>> >*vdev, const char *name)
>> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >                                        vhost_vsock_common_handle_output);
>> >
>> >+    nvqs = vhost_vsock_get_max_qps();
>>
>> You can't do this here, since the vhost-vsock-common.c functions are
>> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
>> device since the device is emulated in a separate user process.
>>
>> I think you can use something similar to what you did in v2, where you
>> passed a parameter to vhost_vsock_common_realize() to enable or not the
>> datagram queues.
>>
>Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
>right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
>the qemu parameter?

No, I mean a function parameter in vhost_vsock_common_realize() that we 
set to true when datagram is supported by the backend.

You can move the vhost_vsock_get_max_qps() call in 
vhost_vsock_device_realize(), just before call 
vhost_vsock_common_realize() where we can pass a parameter to specify if 
datagram is supported or not.

For now in vhost-user-vsock you can always pass `false`. When we will 
support it, we can add something similar to discover the features.

Just to be clear, we don't need any QEMU command line parameter.

>
>> >+
>> >+    if (nvqs < 0)
>> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >+
>> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+                                              vhost_vsock_common_handle_output);
>> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+                                               vhost_vsock_common_handle_output);
>> >+    }
>> >+
>> >     /* The event queue belongs to QEMU */
>> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >                                        vhost_vsock_common_handle_output);
>>
>> Did you do a test with a guest that doesn't support datagram with QEMU
>> and hosts that do?
>>
>Yes, and it works.
>
>> I repost my thoughts that I had on v2:
>>
>>      What happen if the guest doesn't support dgram?
>>
>>      I think we should dynamically use the 3rd queue or the 5th queue for
>>      the events at runtime after the guest acked the features.
>>
>>      Maybe better to switch to an array of VirtQueue.
>>
>I think in current V3, it  already dynamically use 3rd or 5th queue 
>depending
>on the feature bit.

I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't 
know the features acked by the guest, so how can it be dynamic?

Here we should know only if the host kernel supports it.

Maybe it works, because in QEMU we use the event queue only after a 
migration to send a reset event, so you can try to migrate a guest to 
check this path.

I'll be off until July 16th, after that I'll check better, but I think 
there is something wrong here and we should use the 3rd or 5th queue for 
events only after the guest acked the features.

>
>> >
>> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
>> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
>> >+    vvc->vhost_dev.nvqs = nvqs;
>> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
>> >
>> >     vvc->post_load_timer = NULL;
>> > }
>> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
>> >
>> >     virtio_delete_queue(vvc->recv_vq);
>> >     virtio_delete_queue(vvc->trans_vq);
>> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
>> >+        virtio_delete_queue(vvc->dgram_recv_vq);
>> >+        virtio_delete_queue(vvc->dgram_trans_vq);
>> >+    }
>> >+
>> >+    if (vvc->vhost_dev.vqs)
>>
>> g_free() already handles NULL pointers, so you can remove this check.
>>
>Got it.
>
>> >+        g_free(vvc->vhost_dev.vqs);
>> >+
>> >     virtio_delete_queue(vvc->event_vq);
>> >     virtio_cleanup(vdev);
>> > }
>> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
>> >index 1b1a5c70ed..33bbe16983 100644
>> >--- a/hw/virtio/vhost-vsock.c
>> >+++ b/hw/virtio/vhost-vsock.c
>> >@@ -23,6 +23,7 @@
>> >
>> > const int feature_bits[] = {
>> >     VIRTIO_VSOCK_F_SEQPACKET,
>> >+    VIRTIO_VSOCK_F_DGRAM,
>> >     VHOST_INVALID_FEATURE_BIT
>> > };
>> >
>> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
>> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>> >
>> >     virtio_add_feature(&requested_features, 
>> >     VIRTIO_VSOCK_F_SEQPACKET);
>> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
>> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
>> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
>> >                                 requested_features);
>> > }
>> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
>> >index e412b5ee98..798715241f 100644
>> >--- a/include/hw/virtio/vhost-vsock-common.h
>> >+++ b/include/hw/virtio/vhost-vsock-common.h
>> >@@ -27,12 +27,13 @@ enum {
>> > struct VHostVSockCommon {
>> >     VirtIODevice parent;
>> >
>> >-    struct vhost_virtqueue vhost_vqs[2];
>> >     struct vhost_dev vhost_dev;
>> >
>> >     VirtQueue *event_vq;
>> >     VirtQueue *recv_vq;
>> >     VirtQueue *trans_vq;
>> >+    VirtQueue *dgram_recv_vq;
>> >+    VirtQueue *dgram_trans_vq;
>> >
>> >     QEMUTimer *post_load_timer;
>> > };
>> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
>> >index 84f4e727c7..e10319785d 100644
>> >--- a/include/hw/virtio/vhost-vsock.h
>> >+++ b/include/hw/virtio/vhost-vsock.h
>> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
>> > typedef struct {
>> >     uint64_t guest_cid;
>> >     char *vhostfd;
>> >+    bool enable_dgram;
>>
>> Leftover?
>>
>Right, but to support vhost-vsock-user, I think I will add this 
>parameter back?

I'm not sure it is needed.

>
>> > } VHostVSockConf;
>> >
>> > struct VHostVSock {
>> >@@ -33,4 +34,7 @@ struct VHostVSock {
>> >     /*< public >*/
>> > };
>> >
>> >+#define MAX_VQS_WITHOUT_DGRAM 2
>> >+#define MAX_VQS_WITH_DGRAM 4
>> >+
>> > #endif /* QEMU_VHOST_VSOCK_H */
>> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
>> >index 5eac522ee2..6ff8c5084c 100644
>> >--- a/include/standard-headers/linux/virtio_vsock.h
>> >+++ b/include/standard-headers/linux/virtio_vsock.h
>> >@@ -41,6 +41,9 @@
>> > /* The feature bitmap for virtio vsock */
>> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET 
>> > supported */
>> >
>> >+/* Feature bits */
>> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
>>
>> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
>> used bit 2 for DGRAM.
>>
>My bad, I did not update the code here yet.
>

No problems :-)

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-07-07 17:27     ` Stefano Garzarella
@ 2021-07-07 17:36       ` Jiang Wang .
  2021-08-03 18:58       ` Jiang Wang .
  1 sibling, 0 replies; 11+ messages in thread
From: Jiang Wang . @ 2021-07-07 17:36 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >> >Datagram sockets are connectionless and unreliable.
> >> >The sender does not know the capacity of the receiver
> >> >and may send more packets than the receiver can handle.
> >> >
> >> >Add two more dedicate virtqueues for datagram sockets,
> >> >so that it will not unfairly steal resources from
> >> >stream and future connection-oriented sockets.
> >> >---
> >> >v1 -> v2: use qemu cmd option to control number of queues,
> >> >        removed configuration settings for dgram.
> >> >v2 -> v3: use ioctl to get features and decie numbr of
> >> >       virt queues, instead of qemu cmd option.
> >> >
> >> >btw: this patch is based on Arseny's SEQPACKET patch.
> >> >
> >> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> >> > hw/virtio/vhost-vsock.c                       |  3 ++
> >> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
> >> > include/hw/virtio/vhost-vsock.h               |  4 ++
> >> > include/standard-headers/linux/virtio_vsock.h |  3 ++
> >> > 5 files changed, 63 insertions(+), 3 deletions(-)
> >> >
> >> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >> >index 4ad6e234ad..8164e09445 100644
> >> >--- a/hw/virtio/vhost-vsock-common.c
> >> >+++ b/hw/virtio/vhost-vsock-common.c
> >> >@@ -17,6 +17,8 @@
> >> > #include "hw/virtio/vhost-vsock.h"
> >> > #include "qemu/iov.h"
> >> > #include "monitor/monitor.h"
> >> >+#include <sys/ioctl.h>
> >> >+#include <linux/vhost.h>
> >> >
> >> > int vhost_vsock_common_start(VirtIODevice *vdev)
> >> > {
> >> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >> >     return 0;
> >> > }
> >> >
> >> >+static int vhost_vsock_get_max_qps(void)
> >> >+{
> >> >+    uint64_t features;
> >> >+    int ret;
> >> >+    int fd = -1;
> >> >+
> >> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >> >+    if (fd == -1) {
> >> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >> >+        return -1;
> >> >+    }
> >>
> >> You should use the `vhostfd` already opened in
> >> vhost_vsock_device_realize(), since QEMU may not have permission to
> >> access to the device, and the file descriptor can be passed from the
> >> management layer.
> >>
> >Sure. Will do.
> >
> >> >+
> >> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >> >+    if (ret) {
> >> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >> >+        qemu_close(fd);
> >> >+        return ret;
> >> >+    }
> >> >+
> >> >+    qemu_close(fd);
> >> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >> >+        return MAX_VQS_WITH_DGRAM;
> >> >+
> >> >+    return MAX_VQS_WITHOUT_DGRAM;
> >> >+}
> >> >+
> >> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >> > {
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >
> >> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >> >                 sizeof(struct virtio_vsock_config));
> >> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >> >*vdev, const char *name)
> >> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >> >
> >> >+    nvqs = vhost_vsock_get_max_qps();
> >>
> >> You can't do this here, since the vhost-vsock-common.c functions are
> >> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
> >> device since the device is emulated in a separate user process.
> >>
> >> I think you can use something similar to what you did in v2, where you
> >> passed a parameter to vhost_vsock_common_realize() to enable or not the
> >> datagram queues.
> >>
> >Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
> >right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
> >the qemu parameter?
>
> No, I mean a function parameter in vhost_vsock_common_realize() that we
> set to true when datagram is supported by the backend.
>
> You can move the vhost_vsock_get_max_qps() call in
> vhost_vsock_device_realize(), just before call
> vhost_vsock_common_realize() where we can pass a parameter to specify if
> datagram is supported or not.
>
> For now in vhost-user-vsock you can always pass `false`. When we will
> support it, we can add something similar to discover the features.
>
> Just to be clear, we don't need any QEMU command line parameter.
>
Got it. That sounds good.

> >
> >> >+
> >> >+    if (nvqs < 0)
> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >+
> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                              vhost_vsock_common_handle_output);
> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                               vhost_vsock_common_handle_output);
> >> >+    }
> >> >+
> >> >     /* The event queue belongs to QEMU */
> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >>
> >> Did you do a test with a guest that doesn't support datagram with QEMU
> >> and hosts that do?
> >>
> >Yes, and it works.
> >
> >> I repost my thoughts that I had on v2:
> >>
> >>      What happen if the guest doesn't support dgram?
> >>
> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> >>      the events at runtime after the guest acked the features.
> >>
> >>      Maybe better to switch to an array of VirtQueue.
> >>
> >I think in current V3, it  already dynamically use 3rd or 5th queue
> >depending
> >on the feature bit.
>
> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> know the features acked by the guest, so how can it be dynamic?
>
> Here we should know only if the host kernel supports it.
>
> Maybe it works, because in QEMU we use the event queue only after a
> migration to send a reset event, so you can try to migrate a guest to
> check this path.
>
I see. I did not test migration, so likely I did not check that code path.
Will give it a try.

> I'll be off until July 16th, after that I'll check better, but I think
> there is something wrong here and we should use the 3rd or 5th queue for
> events only after the guest acked the features.
>
Got it.

> >
> >> >
> >> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >> >+    vvc->vhost_dev.nvqs = nvqs;
> >> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >> >
> >> >     vvc->post_load_timer = NULL;
> >> > }
> >> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >> >
> >> >     virtio_delete_queue(vvc->recv_vq);
> >> >     virtio_delete_queue(vvc->trans_vq);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >> >+    }
> >> >+
> >> >+    if (vvc->vhost_dev.vqs)
> >>
> >> g_free() already handles NULL pointers, so you can remove this check.
> >>
> >Got it.
> >
> >> >+        g_free(vvc->vhost_dev.vqs);
> >> >+
> >> >     virtio_delete_queue(vvc->event_vq);
> >> >     virtio_cleanup(vdev);
> >> > }
> >> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >> >index 1b1a5c70ed..33bbe16983 100644
> >> >--- a/hw/virtio/vhost-vsock.c
> >> >+++ b/hw/virtio/vhost-vsock.c
> >> >@@ -23,6 +23,7 @@
> >> >
> >> > const int feature_bits[] = {
> >> >     VIRTIO_VSOCK_F_SEQPACKET,
> >> >+    VIRTIO_VSOCK_F_DGRAM,
> >> >     VHOST_INVALID_FEATURE_BIT
> >> > };
> >> >
> >> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >
> >> >     virtio_add_feature(&requested_features,
> >> >     VIRTIO_VSOCK_F_SEQPACKET);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >> >                                 requested_features);
> >> > }
> >> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >> >index e412b5ee98..798715241f 100644
> >> >--- a/include/hw/virtio/vhost-vsock-common.h
> >> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >> >@@ -27,12 +27,13 @@ enum {
> >> > struct VHostVSockCommon {
> >> >     VirtIODevice parent;
> >> >
> >> >-    struct vhost_virtqueue vhost_vqs[2];
> >> >     struct vhost_dev vhost_dev;
> >> >
> >> >     VirtQueue *event_vq;
> >> >     VirtQueue *recv_vq;
> >> >     VirtQueue *trans_vq;
> >> >+    VirtQueue *dgram_recv_vq;
> >> >+    VirtQueue *dgram_trans_vq;
> >> >
> >> >     QEMUTimer *post_load_timer;
> >> > };
> >> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >> >index 84f4e727c7..e10319785d 100644
> >> >--- a/include/hw/virtio/vhost-vsock.h
> >> >+++ b/include/hw/virtio/vhost-vsock.h
> >> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> >> > typedef struct {
> >> >     uint64_t guest_cid;
> >> >     char *vhostfd;
> >> >+    bool enable_dgram;
> >>
> >> Leftover?
> >>
> >Right, but to support vhost-vsock-user, I think I will add this
> >parameter back?
>
> I'm not sure it is needed.
>
> >
> >> > } VHostVSockConf;
> >> >
> >> > struct VHostVSock {
> >> >@@ -33,4 +34,7 @@ struct VHostVSock {
> >> >     /*< public >*/
> >> > };
> >> >
> >> >+#define MAX_VQS_WITHOUT_DGRAM 2
> >> >+#define MAX_VQS_WITH_DGRAM 4
> >> >+
> >> > #endif /* QEMU_VHOST_VSOCK_H */
> >> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
> >> >index 5eac522ee2..6ff8c5084c 100644
> >> >--- a/include/standard-headers/linux/virtio_vsock.h
> >> >+++ b/include/standard-headers/linux/virtio_vsock.h
> >> >@@ -41,6 +41,9 @@
> >> > /* The feature bitmap for virtio vsock */
> >> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET
> >> > supported */
> >> >
> >> >+/* Feature bits */
> >> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
> >>
> >> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
> >> used bit 2 for DGRAM.
> >>
> >My bad, I did not update the code here yet.
> >
>
> No problems :-)
>
> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-07-07 17:27     ` Stefano Garzarella
  2021-07-07 17:36       ` Jiang Wang .
@ 2021-08-03 18:58       ` Jiang Wang .
  2021-08-04  6:41         ` Stefano Garzarella
  1 sibling, 1 reply; 11+ messages in thread
From: Jiang Wang . @ 2021-08-03 18:58 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >> >Datagram sockets are connectionless and unreliable.
> >> >The sender does not know the capacity of the receiver
> >> >and may send more packets than the receiver can handle.
> >> >
> >> >Add two more dedicate virtqueues for datagram sockets,
> >> >so that it will not unfairly steal resources from
> >> >stream and future connection-oriented sockets.
> >> >---
> >> >v1 -> v2: use qemu cmd option to control number of queues,
> >> >        removed configuration settings for dgram.
> >> >v2 -> v3: use ioctl to get features and decie numbr of
> >> >       virt queues, instead of qemu cmd option.
> >> >
> >> >btw: this patch is based on Arseny's SEQPACKET patch.
> >> >
> >> > hw/virtio/vhost-vsock-common.c                | 53 ++++++++++++++++++++++++++-
> >> > hw/virtio/vhost-vsock.c                       |  3 ++
> >> > include/hw/virtio/vhost-vsock-common.h        |  3 +-
> >> > include/hw/virtio/vhost-vsock.h               |  4 ++
> >> > include/standard-headers/linux/virtio_vsock.h |  3 ++
> >> > 5 files changed, 63 insertions(+), 3 deletions(-)
> >> >
> >> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >> >index 4ad6e234ad..8164e09445 100644
> >> >--- a/hw/virtio/vhost-vsock-common.c
> >> >+++ b/hw/virtio/vhost-vsock-common.c
> >> >@@ -17,6 +17,8 @@
> >> > #include "hw/virtio/vhost-vsock.h"
> >> > #include "qemu/iov.h"
> >> > #include "monitor/monitor.h"
> >> >+#include <sys/ioctl.h>
> >> >+#include <linux/vhost.h>
> >> >
> >> > int vhost_vsock_common_start(VirtIODevice *vdev)
> >> > {
> >> >@@ -196,9 +198,36 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >> >     return 0;
> >> > }
> >> >
> >> >+static int vhost_vsock_get_max_qps(void)
> >> >+{
> >> >+    uint64_t features;
> >> >+    int ret;
> >> >+    int fd = -1;
> >> >+
> >> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >> >+    if (fd == -1) {
> >> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >> >+        return -1;
> >> >+    }
> >>
> >> You should use the `vhostfd` already opened in
> >> vhost_vsock_device_realize(), since QEMU may not have permission to
> >> access to the device, and the file descriptor can be passed from the
> >> management layer.
> >>
> >Sure. Will do.
> >
> >> >+
> >> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >> >+    if (ret) {
> >> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >> >+        qemu_close(fd);
> >> >+        return ret;
> >> >+    }
> >> >+
> >> >+    qemu_close(fd);
> >> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >> >+        return MAX_VQS_WITH_DGRAM;
> >> >+
> >> >+    return MAX_VQS_WITHOUT_DGRAM;
> >> >+}
> >> >+
> >> > void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >> > {
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >
> >> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >> >                 sizeof(struct virtio_vsock_config));
> >> >@@ -209,12 +238,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >> >*vdev, const char *name)
> >> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >> >
> >> >+    nvqs = vhost_vsock_get_max_qps();
> >>
> >> You can't do this here, since the vhost-vsock-common.c functions are
> >> used also by vhost-user-vsock, that doesn't use the /dev/vhost-vsock
> >> device since the device is emulated in a separate user process.
> >>
> >> I think you can use something similar to what you did in v2, where you
> >> passed a parameter to vhost_vsock_common_realize() to enable or not the
> >> datagram queues.
> >>
> >Just to make sure, the qemu parameter will only be used for vhost-user-vsock,
> >right? I think for the vhost-vsock kernel module, we will use ioctl and ignore
> >the qemu parameter?
>
> No, I mean a function parameter in vhost_vsock_common_realize() that we
> set to true when datagram is supported by the backend.
>
> You can move the vhost_vsock_get_max_qps() call in
> vhost_vsock_device_realize(), just before call
> vhost_vsock_common_realize() where we can pass a parameter to specify if
> datagram is supported or not.
>
> For now in vhost-user-vsock you can always pass `false`. When we will
> support it, we can add something similar to discover the features.
>
> Just to be clear, we don't need any QEMU command line parameter.
>
> >
> >> >+
> >> >+    if (nvqs < 0)
> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >+
> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                              vhost_vsock_common_handle_output);
> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                               vhost_vsock_common_handle_output);
> >> >+    }
> >> >+
> >> >     /* The event queue belongs to QEMU */
> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >>
> >> Did you do a test with a guest that doesn't support datagram with QEMU
> >> and hosts that do?
> >>
> >Yes, and it works.
> >
> >> I repost my thoughts that I had on v2:
> >>
> >>      What happen if the guest doesn't support dgram?
> >>
> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> >>      the events at runtime after the guest acked the features.
> >>
> >>      Maybe better to switch to an array of VirtQueue.
> >>
> >I think in current V3, it  already dynamically use 3rd or 5th queue
> >depending
> >on the feature bit.
>
> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> know the features acked by the guest, so how can it be dynamic?
>
> Here we should know only if the host kernel supports it.
>
> Maybe it works, because in QEMU we use the event queue only after a
> migration to send a reset event, so you can try to migrate a guest to
> check this path.
>
I tried VM migration and didn't see any problems. The migration looks fine
and vsock dgram still works after migration. Is there any more specific test
you want to run to check for this code path?

btw, I will address the rest of the comments and send a new version soon.

> I'll be off until July 16th, after that I'll check better, but I think
> there is something wrong here and we should use the 3rd or 5th queue for
> events only after the guest acked the features.
>
> >
> >> >
> >> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >> >+    vvc->vhost_dev.nvqs = nvqs;
> >> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >> >
> >> >     vvc->post_load_timer = NULL;
> >> > }
> >> >@@ -227,6 +268,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >> >
> >> >     virtio_delete_queue(vvc->recv_vq);
> >> >     virtio_delete_queue(vvc->trans_vq);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >> >+    }
> >> >+
> >> >+    if (vvc->vhost_dev.vqs)
> >>
> >> g_free() already handles NULL pointers, so you can remove this check.
> >>
> >Got it.
> >
> >> >+        g_free(vvc->vhost_dev.vqs);
> >> >+
> >> >     virtio_delete_queue(vvc->event_vq);
> >> >     virtio_cleanup(vdev);
> >> > }
> >> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >> >index 1b1a5c70ed..33bbe16983 100644
> >> >--- a/hw/virtio/vhost-vsock.c
> >> >+++ b/hw/virtio/vhost-vsock.c
> >> >@@ -23,6 +23,7 @@
> >> >
> >> > const int feature_bits[] = {
> >> >     VIRTIO_VSOCK_F_SEQPACKET,
> >> >+    VIRTIO_VSOCK_F_DGRAM,
> >> >     VHOST_INVALID_FEATURE_BIT
> >> > };
> >> >
> >> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >
> >> >     virtio_add_feature(&requested_features,
> >> >     VIRTIO_VSOCK_F_SEQPACKET);
> >> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >> >                                 requested_features);
> >> > }
> >> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >> >index e412b5ee98..798715241f 100644
> >> >--- a/include/hw/virtio/vhost-vsock-common.h
> >> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >> >@@ -27,12 +27,13 @@ enum {
> >> > struct VHostVSockCommon {
> >> >     VirtIODevice parent;
> >> >
> >> >-    struct vhost_virtqueue vhost_vqs[2];
> >> >     struct vhost_dev vhost_dev;
> >> >
> >> >     VirtQueue *event_vq;
> >> >     VirtQueue *recv_vq;
> >> >     VirtQueue *trans_vq;
> >> >+    VirtQueue *dgram_recv_vq;
> >> >+    VirtQueue *dgram_trans_vq;
> >> >
> >> >     QEMUTimer *post_load_timer;
> >> > };
> >> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >> >index 84f4e727c7..e10319785d 100644
> >> >--- a/include/hw/virtio/vhost-vsock.h
> >> >+++ b/include/hw/virtio/vhost-vsock.h
> >> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> >> > typedef struct {
> >> >     uint64_t guest_cid;
> >> >     char *vhostfd;
> >> >+    bool enable_dgram;
> >>
> >> Leftover?
> >>
> >Right, but to support vhost-vsock-user, I think I will add this
> >parameter back?
>
> I'm not sure it is needed.
>
> >
> >> > } VHostVSockConf;
> >> >
> >> > struct VHostVSock {
> >> >@@ -33,4 +34,7 @@ struct VHostVSock {
> >> >     /*< public >*/
> >> > };
> >> >
> >> >+#define MAX_VQS_WITHOUT_DGRAM 2
> >> >+#define MAX_VQS_WITH_DGRAM 4
> >> >+
> >> > #endif /* QEMU_VHOST_VSOCK_H */
> >> >diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
> >> >index 5eac522ee2..6ff8c5084c 100644
> >> >--- a/include/standard-headers/linux/virtio_vsock.h
> >> >+++ b/include/standard-headers/linux/virtio_vsock.h
> >> >@@ -41,6 +41,9 @@
> >> > /* The feature bitmap for virtio vsock */
> >> > #define VIRTIO_VSOCK_F_SEQPACKET       1       /* SOCK_SEQPACKET
> >> > supported */
> >> >
> >> >+/* Feature bits */
> >> >+#define VIRTIO_VSOCK_F_DGRAM 0 /*Does vsock support dgram */
> >>
> >> Bit 0 is reserved for STREAM, IIRC also in the virtio-spec proposal you
> >> used bit 2 for DGRAM.
> >>
> >My bad, I did not update the code here yet.
> >
>
> No problems :-)
>
> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-08-03 18:58       ` Jiang Wang .
@ 2021-08-04  6:41         ` Stefano Garzarella
  2021-08-04  6:49           ` Stefano Garzarella
  0 siblings, 1 reply; 11+ messages in thread
From: Stefano Garzarella @ 2021-08-04  6:41 UTC (permalink / raw)
  To: Jiang Wang .
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Tue, Aug 03, 2021 at 11:58:27AM -0700, Jiang Wang . wrote:
>On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
>> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:

[...]

>> >> >+
>> >> >+    if (nvqs < 0)
>> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >> >+
>> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >> >+                                              vhost_vsock_common_handle_output);
>> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >> >+                                               vhost_vsock_common_handle_output);
>> >> >+    }
>> >> >+
>> >> >     /* The event queue belongs to QEMU */
>> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >> >                                        vhost_vsock_common_handle_output);
>> >>
>> >> Did you do a test with a guest that doesn't support datagram with QEMU
>> >> and hosts that do?
>> >>
>> >Yes, and it works.
>> >
>> >> I repost my thoughts that I had on v2:
>> >>
>> >>      What happen if the guest doesn't support dgram?
>> >>
>> >>      I think we should dynamically use the 3rd queue or the 5th queue for
>> >>      the events at runtime after the guest acked the features.
>> >>
>> >>      Maybe better to switch to an array of VirtQueue.
>> >>
>> >I think in current V3, it  already dynamically use 3rd or 5th queue
>> >depending
>> >on the feature bit.
>>
>> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
>> know the features acked by the guest, so how can it be dynamic?
>>
>> Here we should know only if the host kernel supports it.
>>
>> Maybe it works, because in QEMU we use the event queue only after a
>> migration to send a reset event, so you can try to migrate a guest to
>> check this path.
>>
>I tried VM migration and didn't see any problems. The migration looks fine
>and vsock dgram still works after migration. Is there any more specific test
>you want to run to check for this code path?
>

I meant a migration of a guest from QEMU without this patch to a QEMU 
with this patch. Of course in that case testing a socket stream.

>btw, I will address the rest of the comments and send a new version 
>soon.
>

Great!

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-08-04  6:41         ` Stefano Garzarella
@ 2021-08-04  6:49           ` Stefano Garzarella
  2021-08-05 19:00             ` Jiang Wang .
  0 siblings, 1 reply; 11+ messages in thread
From: Stefano Garzarella @ 2021-08-04  6:49 UTC (permalink / raw)
  To: Jiang Wang .
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Aug 4, 2021 at 8:41 AM Stefano Garzarella <sgarzare@redhat.com> 
wrote:
>
> On Tue, Aug 03, 2021 at 11:58:27AM -0700, Jiang Wang . wrote:
> >On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> >> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>
> [...]
>
> >> >> >+
> >> >> >+    if (nvqs < 0)
> >> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >> >+
> >> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >> >+                                              vhost_vsock_common_handle_output);
> >> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >> >+                                               vhost_vsock_common_handle_output);
> >> >> >+    }
> >> >> >+
> >> >> >     /* The event queue belongs to QEMU */
> >> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >> >                                        vhost_vsock_common_handle_output);
> >> >>
> >> >> Did you do a test with a guest that doesn't support datagram with QEMU
> >> >> and hosts that do?
> >> >>
> >> >Yes, and it works.
> >> >
> >> >> I repost my thoughts that I had on v2:
> >> >>
> >> >>      What happen if the guest doesn't support dgram?
> >> >>
> >> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> >> >>      the events at runtime after the guest acked the features.
> >> >>
> >> >>      Maybe better to switch to an array of VirtQueue.
> >> >>
> >> >I think in current V3, it  already dynamically use 3rd or 5th queue
> >> >depending
> >> >on the feature bit.
> >>
> >> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> >> know the features acked by the guest, so how can it be dynamic?
> >>
> >> Here we should know only if the host kernel supports it.
> >>
> >> Maybe it works, because in QEMU we use the event queue only after a
> >> migration to send a reset event, so you can try to migrate a guest to
> >> check this path.
> >>
> >I tried VM migration and didn't see any problems. The migration looks fine
> >and vsock dgram still works after migration. Is there any more specific test
> >you want to run to check for this code path?
> >
>
> I meant a migration of a guest from QEMU without this patch to a QEMU
> with this patch. Of course in that case testing a socket stream.
>

Sorry, I meant the opposite.

You should try to migrate a guest that does not support dgrams starting 
from a QEMU with this patch (and kernel that supports dgram, so qemu 
uses the 5th queue for event), to a QEMU without this patch.

I still think the event queue should remain the third and those dgrams 
the next 2.

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-08-04  6:49           ` Stefano Garzarella
@ 2021-08-05 19:00             ` Jiang Wang .
  2021-08-09 10:49               ` Stefano Garzarella
  0 siblings, 1 reply; 11+ messages in thread
From: Jiang Wang . @ 2021-08-05 19:00 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Tue, Aug 3, 2021 at 11:49 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Wed, Aug 4, 2021 at 8:41 AM Stefano Garzarella <sgarzare@redhat.com>
> wrote:
> >
> > On Tue, Aug 03, 2021 at 11:58:27AM -0700, Jiang Wang . wrote:
> > >On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> > >> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> > >> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> > >> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >
> > [...]
> >
> > >> >> >+
> > >> >> >+    if (nvqs < 0)
> > >> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> > >> >> >+
> > >> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> > >> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> > >> >> >+                                              vhost_vsock_common_handle_output);
> > >> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> > >> >> >+                                               vhost_vsock_common_handle_output);
> > >> >> >+    }
> > >> >> >+
> > >> >> >     /* The event queue belongs to QEMU */
> > >> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> > >> >> >                                        vhost_vsock_common_handle_output);
> > >> >>
> > >> >> Did you do a test with a guest that doesn't support datagram with QEMU
> > >> >> and hosts that do?
> > >> >>
> > >> >Yes, and it works.
> > >> >
> > >> >> I repost my thoughts that I had on v2:
> > >> >>
> > >> >>      What happen if the guest doesn't support dgram?
> > >> >>
> > >> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> > >> >>      the events at runtime after the guest acked the features.
> > >> >>
> > >> >>      Maybe better to switch to an array of VirtQueue.
> > >> >>
> > >> >I think in current V3, it  already dynamically use 3rd or 5th queue
> > >> >depending
> > >> >on the feature bit.
> > >>
> > >> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> > >> know the features acked by the guest, so how can it be dynamic?
> > >>
> > >> Here we should know only if the host kernel supports it.
> > >>
> > >> Maybe it works, because in QEMU we use the event queue only after a
> > >> migration to send a reset event, so you can try to migrate a guest to
> > >> check this path.
> > >>
> > >I tried VM migration and didn't see any problems. The migration looks fine
> > >and vsock dgram still works after migration. Is there any more specific test
> > >you want to run to check for this code path?
> > >
> >
> > I meant a migration of a guest from QEMU without this patch to a QEMU
> > with this patch. Of course in that case testing a socket stream.
> >
>
> Sorry, I meant the opposite.
>
> You should try to migrate a guest that does not support dgrams starting
> from a QEMU with this patch (and kernel that supports dgram, so qemu
> uses the 5th queue for event), to a QEMU without this patch.
>
Got it. I tried what you said and saw errors on the destination qemu. Then
I moved event queue up to be number 3 (before adding dgram vqs),
as the following:

+    /* The event queue belongs to QEMU */
+    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                       vhost_vsock_common_handle_output);
+

     nvqs = vhost_vsock_get_max_qps(enable_dgram);

@@ -253,10 +257,6 @@ void vhost_vsock_common_realize(VirtIODevice
*vdev, const char *name, bool enabl

vhost_vsock_common_handle_output);
     }

-    /* The event queue belongs to QEMU */
-    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
-                                       vhost_vsock_common_handle_output);
-

But I still see following errors on the destination qemu:
qemu-system-x86_64: Error starting vhost vsock: 14

Any idea if my above code change is wrong or missing something?

Take one step back, what should be the host kernel version? With or
without dgram support? I tried both.  The new dest kernel shows the above error.
The old dest kernel shows a msr error probably not related to vsock.

To explain the above qemu 14 error, I think the issue is that the
source host kernel
supports dgram by always setting the DGRAM feature bit(in my
implementation). Then the source
qemu query the source host kernel, and use 5 for event vq. Even if the source
guest kernel does not support dgram, it currently has no way to tell the source
host or the source qemu.

On the source machine, when we start qemu process,  and the guest VM
is still in BIOS or early boot process ( before vsock is initialized), I think
at this time, the qemu vhost_vsock_common_realize() is already called.
So qemu can only check if the host kernel supports dgram or not, but has
no knowledge about the guest kernel. After the guest kernel is fully boot up,
it can tell qemu or the host if it supports dgram or not ( or the host or qemu
detect for that). But I don't think we will change the vq numbers at that time.

Maybe we should fix that too and change vq numbers ( delete vq and add
back?) at a later
time after the guest kernel is fully boot-up?

> I still think the event queue should remain the third and those dgrams
> the next 2.
>
> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-08-05 19:00             ` Jiang Wang .
@ 2021-08-09 10:49               ` Stefano Garzarella
  2021-08-09 22:47                 ` Jiang Wang .
  0 siblings, 1 reply; 11+ messages in thread
From: Stefano Garzarella @ 2021-08-09 10:49 UTC (permalink / raw)
  To: Jiang Wang .
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Thu, Aug 05, 2021 at 12:00:05PM -0700, Jiang Wang . wrote:
>On Tue, Aug 3, 2021 at 11:49 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
>>
>> On Wed, Aug 4, 2021 at 8:41 AM Stefano Garzarella <sgarzare@redhat.com>
>> wrote:
>> >
>> > On Tue, Aug 03, 2021 at 11:58:27AM -0700, Jiang Wang . wrote:
>> > >On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>> > >> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
>> > >> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>> > >> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
>> >
>> > [...]
>> >
>> > >> >> >+
>> > >> >> >+    if (nvqs < 0)
>> > >> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>> > >> >> >+
>> > >> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>> > >> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> > >> >> >+                                              vhost_vsock_common_handle_output);
>> > >> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> > >> >> >+                                               vhost_vsock_common_handle_output);
>> > >> >> >+    }
>> > >> >> >+
>> > >> >> >     /* The event queue belongs to QEMU */
>> > >> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> > >> >> >                                        vhost_vsock_common_handle_output);
>> > >> >>
>> > >> >> Did you do a test with a guest that doesn't support datagram with QEMU
>> > >> >> and hosts that do?
>> > >> >>
>> > >> >Yes, and it works.
>> > >> >
>> > >> >> I repost my thoughts that I had on v2:
>> > >> >>
>> > >> >>      What happen if the guest doesn't support dgram?
>> > >> >>
>> > >> >>      I think we should dynamically use the 3rd queue or the 5th queue for
>> > >> >>      the events at runtime after the guest acked the features.
>> > >> >>
>> > >> >>      Maybe better to switch to an array of VirtQueue.
>> > >> >>
>> > >> >I think in current V3, it  already dynamically use 3rd or 5th queue
>> > >> >depending
>> > >> >on the feature bit.
>> > >>
>> > >> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
>> > >> know the features acked by the guest, so how can it be dynamic?
>> > >>
>> > >> Here we should know only if the host kernel supports it.
>> > >>
>> > >> Maybe it works, because in QEMU we use the event queue only after a
>> > >> migration to send a reset event, so you can try to migrate a guest to
>> > >> check this path.
>> > >>
>> > >I tried VM migration and didn't see any problems. The migration looks fine
>> > >and vsock dgram still works after migration. Is there any more specific test
>> > >you want to run to check for this code path?
>> > >
>> >
>> > I meant a migration of a guest from QEMU without this patch to a QEMU
>> > with this patch. Of course in that case testing a socket stream.
>> >
>>
>> Sorry, I meant the opposite.
>>
>> You should try to migrate a guest that does not support dgrams starting
>> from a QEMU with this patch (and kernel that supports dgram, so qemu
>> uses the 5th queue for event), to a QEMU without this patch.
>>
>Got it. I tried what you said and saw errors on the destination qemu. Then
>I moved event queue up to be number 3 (before adding dgram vqs),
>as the following:
>
>+    /* The event queue belongs to QEMU */
>+    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                       vhost_vsock_common_handle_output);
>+
>
>     nvqs = vhost_vsock_get_max_qps(enable_dgram);
>
>@@ -253,10 +257,6 @@ void vhost_vsock_common_realize(VirtIODevice
>*vdev, const char *name, bool enabl
>
>vhost_vsock_common_handle_output);
>     }
>
>-    /* The event queue belongs to QEMU */
>-    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>-                                       vhost_vsock_common_handle_output);
>-
>
>But I still see following errors on the destination qemu:
>qemu-system-x86_64: Error starting vhost vsock: 14
>
>Any idea if my above code change is wrong or missing something?

No, sorry.
Do you have some kernel messages in the host?

>
>Take one step back, what should be the host kernel version? With or
>without dgram support? I tried both.  The new dest kernel shows the above error.
>The old dest kernel shows a msr error probably not related to vsock.

I think the best is to try the host kernel with DGRAM support, so QEMU 
will allocate all the queues.

>
>To explain the above qemu 14 error, I think the issue is that the
>source host kernel
>supports dgram by always setting the DGRAM feature bit(in my
>implementation). Then the source
>qemu query the source host kernel, and use 5 for event vq. Even if the source
>guest kernel does not support dgram, it currently has no way to tell the source
>host or the source qemu.

Initially I think is better to try the migration on the same host, so we 
can exclude other issues. When it works properly, you can try to migrate 
to a different host kernel.

>
>On the source machine, when we start qemu process,  and the guest VM
>is still in BIOS or early boot process ( before vsock is initialized), I think
>at this time, the qemu vhost_vsock_common_realize() is already called.
>So qemu can only check if the host kernel supports dgram or not, but has
>no knowledge about the guest kernel. After the guest kernel is fully boot up,
>it can tell qemu or the host if it supports dgram or not ( or the host or qemu
>detect for that). But I don't think we will change the vq numbers at that time.
>
>Maybe we should fix that too and change vq numbers ( delete vq and add
>back?) at a later
>time after the guest kernel is fully boot-up?

IIRC vhost-net allocates the maximum number of queues, and then it uses 
only the queues supported/requested, so I think we can do something 
similar.

Allocates 5 queues and, at runtime, we can decide which queue to use.

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: Re: [RFC v3] virtio/vsock: add two more queues for datagram types
  2021-08-09 10:49               ` Stefano Garzarella
@ 2021-08-09 22:47                 ` Jiang Wang .
  0 siblings, 0 replies; 11+ messages in thread
From: Jiang Wang . @ 2021-08-09 22:47 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Mon, Aug 9, 2021 at 3:49 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Thu, Aug 05, 2021 at 12:00:05PM -0700, Jiang Wang . wrote:
> >On Tue, Aug 3, 2021 at 11:49 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Wed, Aug 4, 2021 at 8:41 AM Stefano Garzarella <sgarzare@redhat.com>
> >> wrote:
> >> >
> >> > On Tue, Aug 03, 2021 at 11:58:27AM -0700, Jiang Wang . wrote:
> >> > >On Wed, Jul 7, 2021 at 10:27 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >> > >> On Wed, Jul 07, 2021 at 09:52:46AM -0700, Jiang Wang . wrote:
> >> > >> >On Wed, Jul 7, 2021 at 1:33 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >> > >> >> On Tue, Jul 06, 2021 at 10:26:07PM +0000, Jiang Wang wrote:
> >> >
> >> > [...]
> >> >
> >> > >> >> >+
> >> > >> >> >+    if (nvqs < 0)
> >> > >> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> > >> >> >+
> >> > >> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> > >> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> > >> >> >+                                              vhost_vsock_common_handle_output);
> >> > >> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> > >> >> >+                                               vhost_vsock_common_handle_output);
> >> > >> >> >+    }
> >> > >> >> >+
> >> > >> >> >     /* The event queue belongs to QEMU */
> >> > >> >> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> > >> >> >                                        vhost_vsock_common_handle_output);
> >> > >> >>
> >> > >> >> Did you do a test with a guest that doesn't support datagram with QEMU
> >> > >> >> and hosts that do?
> >> > >> >>
> >> > >> >Yes, and it works.
> >> > >> >
> >> > >> >> I repost my thoughts that I had on v2:
> >> > >> >>
> >> > >> >>      What happen if the guest doesn't support dgram?
> >> > >> >>
> >> > >> >>      I think we should dynamically use the 3rd queue or the 5th queue for
> >> > >> >>      the events at runtime after the guest acked the features.
> >> > >> >>
> >> > >> >>      Maybe better to switch to an array of VirtQueue.
> >> > >> >>
> >> > >> >I think in current V3, it  already dynamically use 3rd or 5th queue
> >> > >> >depending
> >> > >> >on the feature bit.
> >> > >>
> >> > >> I'm not sure. IIUC when vhost_vsock_common_realize() is called, we don't
> >> > >> know the features acked by the guest, so how can it be dynamic?
> >> > >>
> >> > >> Here we should know only if the host kernel supports it.
> >> > >>
> >> > >> Maybe it works, because in QEMU we use the event queue only after a
> >> > >> migration to send a reset event, so you can try to migrate a guest to
> >> > >> check this path.
> >> > >>
> >> > >I tried VM migration and didn't see any problems. The migration looks fine
> >> > >and vsock dgram still works after migration. Is there any more specific test
> >> > >you want to run to check for this code path?
> >> > >
> >> >
> >> > I meant a migration of a guest from QEMU without this patch to a QEMU
> >> > with this patch. Of course in that case testing a socket stream.
> >> >
> >>
> >> Sorry, I meant the opposite.
> >>
> >> You should try to migrate a guest that does not support dgrams starting
> >> from a QEMU with this patch (and kernel that supports dgram, so qemu
> >> uses the 5th queue for event), to a QEMU without this patch.
> >>
> >Got it. I tried what you said and saw errors on the destination qemu. Then
> >I moved event queue up to be number 3 (before adding dgram vqs),
> >as the following:
> >
> >+    /* The event queue belongs to QEMU */
> >+    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                       vhost_vsock_common_handle_output);
> >+
> >
> >     nvqs = vhost_vsock_get_max_qps(enable_dgram);
> >
> >@@ -253,10 +257,6 @@ void vhost_vsock_common_realize(VirtIODevice
> >*vdev, const char *name, bool enabl
> >
> >vhost_vsock_common_handle_output);
> >     }
> >
> >-    /* The event queue belongs to QEMU */
> >-    vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >-                                       vhost_vsock_common_handle_output);
> >-
> >
> >But I still see following errors on the destination qemu:
> >qemu-system-x86_64: Error starting vhost vsock: 14
> >
> >Any idea if my above code change is wrong or missing something?
>
> No, sorry.
> Do you have some kernel messages in the host?
>

I checked dmesg but did not find any errors. I will debug more.

> >
> >Take one step back, what should be the host kernel version? With or
> >without dgram support? I tried both.  The new dest kernel shows the above error.
> >The old dest kernel shows a msr error probably not related to vsock.
>
> I think the best is to try the host kernel with DGRAM support, so QEMU
> will allocate all the queues.
>
> >
> >To explain the above qemu 14 error, I think the issue is that the
> >source host kernel
> >supports dgram by always setting the DGRAM feature bit(in my
> >implementation). Then the source
> >qemu query the source host kernel, and use 5 for event vq. Even if the source
> >guest kernel does not support dgram, it currently has no way to tell the source
> >host or the source qemu.
>
> Initially I think is better to try the migration on the same host, so we
> can exclude other issues. When it works properly, you can try to migrate
> to a different host kernel.
>
Got it.

> >
> >On the source machine, when we start qemu process,  and the guest VM
> >is still in BIOS or early boot process ( before vsock is initialized), I think
> >at this time, the qemu vhost_vsock_common_realize() is already called.
> >So qemu can only check if the host kernel supports dgram or not, but has
> >no knowledge about the guest kernel. After the guest kernel is fully boot up,
> >it can tell qemu or the host if it supports dgram or not ( or the host or qemu
> >detect for that). But I don't think we will change the vq numbers at that time.
> >
> >Maybe we should fix that too and change vq numbers ( delete vq and add
> >back?) at a later
> >time after the guest kernel is fully boot-up?
>
> IIRC vhost-net allocates the maximum number of queues, and then it uses
> only the queues supported/requested, so I think we can do something
> similar.
>
> Allocates 5 queues and, at runtime, we can decide which queue to use.
>
I see. Will dig more. thanks.

> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2021-08-09 22:49 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-06 22:26 [RFC v3] virtio/vsock: add two more queues for datagram types Jiang Wang
2021-07-07  8:33 ` Stefano Garzarella
2021-07-07 16:52   ` Jiang Wang .
2021-07-07 17:27     ` Stefano Garzarella
2021-07-07 17:36       ` Jiang Wang .
2021-08-03 18:58       ` Jiang Wang .
2021-08-04  6:41         ` Stefano Garzarella
2021-08-04  6:49           ` Stefano Garzarella
2021-08-05 19:00             ` Jiang Wang .
2021-08-09 10:49               ` Stefano Garzarella
2021-08-09 22:47                 ` Jiang Wang .

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.