All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] virtio/vsock: add two more queues for datagram types
@ 2021-08-03 23:41 Jiang Wang
  2021-08-04  8:13 ` Stefano Garzarella
  0 siblings, 1 reply; 6+ messages in thread
From: Jiang Wang @ 2021-08-03 23:41 UTC (permalink / raw)
  To: mst, qemu-devel; +Cc: jasowang, arseny.krasnov, stefanha, sgarzare

Datagram sockets are connectionless and unreliable.
The sender does not know the capacity of the receiver
and may send more packets than the receiver can handle.

Add two more dedicate virtqueues for datagram sockets,
so that it will not unfairly steal resources from
stream and future connection-oriented sockets.

Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
---
v1 -> v2: use qemu cmd option to control number of queues,
	removed configuration settings for dgram.
v2 -> v3: use ioctl to get features and decide number of
        virt queues, instead of qemu cmd option.
v3 -> v4: change DGRAM feature bit value to 2. Add an argument
	in vhost_vsock_common_realize to indicate dgram is supported or not.

 hw/virtio/vhost-user-vsock.c                  |  2 +-
 hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
 hw/virtio/vhost-vsock.c                       |  5 +-
 include/hw/virtio/vhost-vsock-common.h        |  6 +-
 include/hw/virtio/vhost-vsock.h               |  4 ++
 include/standard-headers/linux/virtio_vsock.h |  1 +
 6 files changed, 69 insertions(+), 7 deletions(-)

diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
index 6095ed7349..e9ec0e1c00 100644
--- a/hw/virtio/vhost-user-vsock.c
+++ b/hw/virtio/vhost-user-vsock.c
@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
         return;
     }
 
-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
 
     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
 
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 4ad6e234ad..c78536911a 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -17,6 +17,8 @@
 #include "hw/virtio/vhost-vsock.h"
 #include "qemu/iov.h"
 #include "monitor/monitor.h"
+#include <sys/ioctl.h>
+#include <linux/vhost.h>
 
 int vhost_vsock_common_start(VirtIODevice *vdev)
 {
@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
     return 0;
 }
 
-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
+static int vhost_vsock_get_max_qps(bool enable_dgram)
+{
+    uint64_t features;
+    int ret;
+    int fd = -1;
+
+    if (!enable_dgram)
+        return MAX_VQS_WITHOUT_DGRAM;
+
+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
+    if (fd == -1) {
+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
+        return -1;
+    }
+
+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
+    if (ret) {
+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
+        qemu_close(fd);
+        return ret;
+    }
+
+    qemu_close(fd);
+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
+        return MAX_VQS_WITH_DGRAM;
+
+    return MAX_VQS_WITHOUT_DGRAM;
+}
+
+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
 {
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
 
     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
                 sizeof(struct virtio_vsock_config));
@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
+
+    if (nvqs < 0)
+        nvqs = MAX_VQS_WITHOUT_DGRAM;
+
+    if (nvqs == MAX_VQS_WITH_DGRAM) {
+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                              vhost_vsock_common_handle_output);
+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
+                                               vhost_vsock_common_handle_output);
+    }
+
     /* The event queue belongs to QEMU */
     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
                                        vhost_vsock_common_handle_output);
 
-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
+    vvc->vhost_dev.nvqs = nvqs;
+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
 
     vvc->post_load_timer = NULL;
 }
@@ -227,6 +271,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
 
     virtio_delete_queue(vvc->recv_vq);
     virtio_delete_queue(vvc->trans_vq);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
+        virtio_delete_queue(vvc->dgram_recv_vq);
+        virtio_delete_queue(vvc->dgram_trans_vq);
+    }
+
+    if (vvc->vhost_dev.vqs)
+        g_free(vvc->vhost_dev.vqs);
+
     virtio_delete_queue(vvc->event_vq);
     virtio_cleanup(vdev);
 }
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 1b1a5c70ed..f73523afaf 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -23,6 +23,7 @@
 
 const int feature_bits[] = {
     VIRTIO_VSOCK_F_SEQPACKET,
+    VIRTIO_VSOCK_F_DGRAM,
     VHOST_INVALID_FEATURE_BIT
 };
 
@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
 
     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
     return vhost_get_features(&vvc->vhost_dev, feature_bits,
                                 requested_features);
 }
@@ -175,7 +178,7 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
         qemu_set_nonblock(vhostfd);
     }
 
-    vhost_vsock_common_realize(vdev, "vhost-vsock");
+    vhost_vsock_common_realize(vdev, "vhost-vsock", true);
 
     ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
                          VHOST_BACKEND_TYPE_KERNEL, 0, errp);
diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
index e412b5ee98..6669d24714 100644
--- a/include/hw/virtio/vhost-vsock-common.h
+++ b/include/hw/virtio/vhost-vsock-common.h
@@ -27,12 +27,13 @@ enum {
 struct VHostVSockCommon {
     VirtIODevice parent;
 
-    struct vhost_virtqueue vhost_vqs[2];
     struct vhost_dev vhost_dev;
 
     VirtQueue *event_vq;
     VirtQueue *recv_vq;
     VirtQueue *trans_vq;
+    VirtQueue *dgram_recv_vq;
+    VirtQueue *dgram_trans_vq;
 
     QEMUTimer *post_load_timer;
 };
@@ -41,7 +42,8 @@ int vhost_vsock_common_start(VirtIODevice *vdev);
 void vhost_vsock_common_stop(VirtIODevice *vdev);
 int vhost_vsock_common_pre_save(void *opaque);
 int vhost_vsock_common_post_load(void *opaque, int version_id);
-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name);
+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name,
+			       bool enable_dgram);
 void vhost_vsock_common_unrealize(VirtIODevice *vdev);
 
 #endif /* _QEMU_VHOST_VSOCK_COMMON_H */
diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
index 84f4e727c7..e10319785d 100644
--- a/include/hw/virtio/vhost-vsock.h
+++ b/include/hw/virtio/vhost-vsock.h
@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
 typedef struct {
     uint64_t guest_cid;
     char *vhostfd;
+    bool enable_dgram;
 } VHostVSockConf;
 
 struct VHostVSock {
@@ -33,4 +34,7 @@ struct VHostVSock {
     /*< public >*/
 };
 
+#define MAX_VQS_WITHOUT_DGRAM 2
+#define MAX_VQS_WITH_DGRAM 4
+
 #endif /* QEMU_VHOST_VSOCK_H */
diff --git a/include/standard-headers/linux/virtio_vsock.h b/include/standard-headers/linux/virtio_vsock.h
index 3a23488e42..7e35acf3d4 100644
--- a/include/standard-headers/linux/virtio_vsock.h
+++ b/include/standard-headers/linux/virtio_vsock.h
@@ -40,6 +40,7 @@
 
 /* The feature bitmap for virtio vsock */
 #define VIRTIO_VSOCK_F_SEQPACKET	1	/* SOCK_SEQPACKET supported */
+#define VIRTIO_VSOCK_F_DGRAM		2	/* SOCK_DGRAM supported */
 
 struct virtio_vsock_config {
 	uint64_t guest_cid;
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v4] virtio/vsock: add two more queues for datagram types
  2021-08-03 23:41 [PATCH v4] virtio/vsock: add two more queues for datagram types Jiang Wang
@ 2021-08-04  8:13 ` Stefano Garzarella
  2021-08-05 19:07   ` Jiang Wang .
  0 siblings, 1 reply; 6+ messages in thread
From: Stefano Garzarella @ 2021-08-04  8:13 UTC (permalink / raw)
  To: Jiang Wang; +Cc: arseny.krasnov, jasowang, qemu-devel, stefanha, mst

On Tue, Aug 03, 2021 at 11:41:32PM +0000, Jiang Wang wrote:
>Datagram sockets are connectionless and unreliable.
>The sender does not know the capacity of the receiver
>and may send more packets than the receiver can handle.
>
>Add two more dedicate virtqueues for datagram sockets,
>so that it will not unfairly steal resources from
>stream and future connection-oriented sockets.
>
>Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
>---
>v1 -> v2: use qemu cmd option to control number of queues,
>	removed configuration settings for dgram.
>v2 -> v3: use ioctl to get features and decide number of
>        virt queues, instead of qemu cmd option.
>v3 -> v4: change DGRAM feature bit value to 2. Add an argument
>	in vhost_vsock_common_realize to indicate dgram is supported or not.
>
> hw/virtio/vhost-user-vsock.c                  |  2 +-
> hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
> hw/virtio/vhost-vsock.c                       |  5 +-
> include/hw/virtio/vhost-vsock-common.h        |  6 +-
> include/hw/virtio/vhost-vsock.h               |  4 ++
> include/standard-headers/linux/virtio_vsock.h |  1 +
> 6 files changed, 69 insertions(+), 7 deletions(-)
>
>diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
>index 6095ed7349..e9ec0e1c00 100644
>--- a/hw/virtio/vhost-user-vsock.c
>+++ b/hw/virtio/vhost-user-vsock.c
>@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
>         return;
>     }
>
>-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
>+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
>
>     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
>
>diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>index 4ad6e234ad..c78536911a 100644
>--- a/hw/virtio/vhost-vsock-common.c
>+++ b/hw/virtio/vhost-vsock-common.c
>@@ -17,6 +17,8 @@
> #include "hw/virtio/vhost-vsock.h"
> #include "qemu/iov.h"
> #include "monitor/monitor.h"
>+#include <sys/ioctl.h>
>+#include <linux/vhost.h>
>
> int vhost_vsock_common_start(VirtIODevice *vdev)
> {
>@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>     return 0;
> }
>
>-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>+static int vhost_vsock_get_max_qps(bool enable_dgram)
>+{
>+    uint64_t features;
>+    int ret;
>+    int fd = -1;
>+
>+    if (!enable_dgram)
>+        return MAX_VQS_WITHOUT_DGRAM;
>+
>+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);


As I said in the previous version, we cannot directly open 
/dev/vhost-vsock, for two reasons:

   1. this code is common with vhost-user-vsock which does not use 
   /dev/vhost-vsock.

   2. the fd may have been passed from the management layer and qemu may 
   not be able to directly open /dev/vhost-vsock.

I think is better to move this function in hw/virtio/vhost-vsock.c, 
using the `vhostfd`, returning true or false if dgram is supported, then 
you can use it for `enable_dgram` param ...

>+    if (fd == -1) {
>+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>+        return -1;
>+    }
>+
>+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>+    if (ret) {
>+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>+        qemu_close(fd);
>+        return ret;
>+    }
>+
>+    qemu_close(fd);
>+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>+        return MAX_VQS_WITH_DGRAM;
>+
>+    return MAX_VQS_WITHOUT_DGRAM;
>+}
>+
>+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
> {
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>
>     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>                 sizeof(struct virtio_vsock_config));
>@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice 
>*vdev, const char *name)
>     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);
>
>+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
>+
>+    if (nvqs < 0)
>+        nvqs = MAX_VQS_WITHOUT_DGRAM;

... and here, if `enable_dgram` is true, you can set `nvqs = 
MAX_VQS_WITH_DGRAM``

>+
>+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                              vhost_vsock_common_handle_output);
>+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>+                                               
>vhost_vsock_common_handle_output);
>+    }
>+

I'm still concerned about compatibility with guests that don't support 
dgram, as I mentioned in the previous email.

I need to do some testing, but my guess is it won't work if the host 
(QEMU and vhost-vsock) supports it and the guest doesn't.

I still think that we should allocate an array of queues and then decide 
at runtime which one to use for events (third or fifth) after the guest 
has acked the features.

>     /* The event queue belongs to QEMU */
>     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>                                        vhost_vsock_common_handle_output);
>
>-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
>-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
>+    vvc->vhost_dev.nvqs = nvqs;
>+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
>
>     vvc->post_load_timer = NULL;
> }
>@@ -227,6 +271,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
>
>     virtio_delete_queue(vvc->recv_vq);
>     virtio_delete_queue(vvc->trans_vq);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
>+        virtio_delete_queue(vvc->dgram_recv_vq);
>+        virtio_delete_queue(vvc->dgram_trans_vq);
>+    }
>+
>+    if (vvc->vhost_dev.vqs)
>+        g_free(vvc->vhost_dev.vqs);
>+
>     virtio_delete_queue(vvc->event_vq);
>     virtio_cleanup(vdev);
> }
>diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
>index 1b1a5c70ed..f73523afaf 100644
>--- a/hw/virtio/vhost-vsock.c
>+++ b/hw/virtio/vhost-vsock.c
>@@ -23,6 +23,7 @@
>
> const int feature_bits[] = {
>     VIRTIO_VSOCK_F_SEQPACKET,
>+    VIRTIO_VSOCK_F_DGRAM,
>     VHOST_INVALID_FEATURE_BIT
> };
>
>@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
>     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>
>     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
>+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
>+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
>     return vhost_get_features(&vvc->vhost_dev, feature_bits,
>                                 requested_features);
> }
>@@ -175,7 +178,7 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
>         qemu_set_nonblock(vhostfd);
>     }
>
>-    vhost_vsock_common_realize(vdev, "vhost-vsock");
>+    vhost_vsock_common_realize(vdev, "vhost-vsock", true);
>
>     ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
>                          VHOST_BACKEND_TYPE_KERNEL, 0, errp);
>diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
>index e412b5ee98..6669d24714 100644
>--- a/include/hw/virtio/vhost-vsock-common.h
>+++ b/include/hw/virtio/vhost-vsock-common.h
>@@ -27,12 +27,13 @@ enum {
> struct VHostVSockCommon {
>     VirtIODevice parent;
>
>-    struct vhost_virtqueue vhost_vqs[2];
>     struct vhost_dev vhost_dev;
>
>     VirtQueue *event_vq;
>     VirtQueue *recv_vq;
>     VirtQueue *trans_vq;
>+    VirtQueue *dgram_recv_vq;
>+    VirtQueue *dgram_trans_vq;
>
>     QEMUTimer *post_load_timer;
> };
>@@ -41,7 +42,8 @@ int vhost_vsock_common_start(VirtIODevice *vdev);
> void vhost_vsock_common_stop(VirtIODevice *vdev);
> int vhost_vsock_common_pre_save(void *opaque);
> int vhost_vsock_common_post_load(void *opaque, int version_id);
>-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name);
>+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name,
>+			       bool enable_dgram);
> void vhost_vsock_common_unrealize(VirtIODevice *vdev);
>
> #endif /* _QEMU_VHOST_VSOCK_COMMON_H */
>diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
>index 84f4e727c7..e10319785d 100644
>--- a/include/hw/virtio/vhost-vsock.h
>+++ b/include/hw/virtio/vhost-vsock.h
>@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> typedef struct {
>     uint64_t guest_cid;
>     char *vhostfd;
>+    bool enable_dgram;

This seems unused.

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Re: [PATCH v4] virtio/vsock: add two more queues for datagram types
  2021-08-04  8:13 ` Stefano Garzarella
@ 2021-08-05 19:07   ` Jiang Wang .
  2021-08-09 10:58     ` Stefano Garzarella
  0 siblings, 1 reply; 6+ messages in thread
From: Jiang Wang . @ 2021-08-05 19:07 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Wed, Aug 4, 2021 at 1:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Tue, Aug 03, 2021 at 11:41:32PM +0000, Jiang Wang wrote:
> >Datagram sockets are connectionless and unreliable.
> >The sender does not know the capacity of the receiver
> >and may send more packets than the receiver can handle.
> >
> >Add two more dedicate virtqueues for datagram sockets,
> >so that it will not unfairly steal resources from
> >stream and future connection-oriented sockets.
> >
> >Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
> >---
> >v1 -> v2: use qemu cmd option to control number of queues,
> >       removed configuration settings for dgram.
> >v2 -> v3: use ioctl to get features and decide number of
> >        virt queues, instead of qemu cmd option.
> >v3 -> v4: change DGRAM feature bit value to 2. Add an argument
> >       in vhost_vsock_common_realize to indicate dgram is supported or not.
> >
> > hw/virtio/vhost-user-vsock.c                  |  2 +-
> > hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
> > hw/virtio/vhost-vsock.c                       |  5 +-
> > include/hw/virtio/vhost-vsock-common.h        |  6 +-
> > include/hw/virtio/vhost-vsock.h               |  4 ++
> > include/standard-headers/linux/virtio_vsock.h |  1 +
> > 6 files changed, 69 insertions(+), 7 deletions(-)
> >
> >diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
> >index 6095ed7349..e9ec0e1c00 100644
> >--- a/hw/virtio/vhost-user-vsock.c
> >+++ b/hw/virtio/vhost-user-vsock.c
> >@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
> >         return;
> >     }
> >
> >-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
> >+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
> >
> >     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
> >
> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >index 4ad6e234ad..c78536911a 100644
> >--- a/hw/virtio/vhost-vsock-common.c
> >+++ b/hw/virtio/vhost-vsock-common.c
> >@@ -17,6 +17,8 @@
> > #include "hw/virtio/vhost-vsock.h"
> > #include "qemu/iov.h"
> > #include "monitor/monitor.h"
> >+#include <sys/ioctl.h>
> >+#include <linux/vhost.h>
> >
> > int vhost_vsock_common_start(VirtIODevice *vdev)
> > {
> >@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >     return 0;
> > }
> >
> >-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >+static int vhost_vsock_get_max_qps(bool enable_dgram)
> >+{
> >+    uint64_t features;
> >+    int ret;
> >+    int fd = -1;
> >+
> >+    if (!enable_dgram)
> >+        return MAX_VQS_WITHOUT_DGRAM;
> >+
> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>
>
> As I said in the previous version, we cannot directly open
> /dev/vhost-vsock, for two reasons:
>
>    1. this code is common with vhost-user-vsock which does not use
>    /dev/vhost-vsock.
>
>    2. the fd may have been passed from the management layer and qemu may
>    not be able to directly open /dev/vhost-vsock.
>
> I think is better to move this function in hw/virtio/vhost-vsock.c,
> using the `vhostfd`, returning true or false if dgram is supported, then
> you can use it for `enable_dgram` param ...
>

Yes, you are right. Now I remember you said that before but I forgot about that
when I changed the code. I will fix it. Sorry about that.

> >+    if (fd == -1) {
> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >+        return -1;
> >+    }
> >+
> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >+    if (ret) {
> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >+        qemu_close(fd);
> >+        return ret;
> >+    }
> >+
> >+    qemu_close(fd);
> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >+        return MAX_VQS_WITH_DGRAM;
> >+
> >+    return MAX_VQS_WITHOUT_DGRAM;
> >+}
> >+
> >+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
> > {
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >
> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >                 sizeof(struct virtio_vsock_config));
> >@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >*vdev, const char *name)
> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
> >
> >+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
> >+
> >+    if (nvqs < 0)
> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>
> ... and here, if `enable_dgram` is true, you can set `nvqs =
> MAX_VQS_WITH_DGRAM``
>
sure.

> >+
> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+                                              vhost_vsock_common_handle_output);
> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >+
> >vhost_vsock_common_handle_output);
> >+    }
> >+
>
> I'm still concerned about compatibility with guests that don't support
> dgram, as I mentioned in the previous email.
>
> I need to do some testing, but my guess is it won't work if the host
> (QEMU and vhost-vsock) supports it and the guest doesn't.
>
> I still think that we should allocate an array of queues and then decide
> at runtime which one to use for events (third or fifth) after the guest
> has acked the features.
>
Agree. I will check where the guest ack the feature. If you have any pointers,
just let me know. Also, could we just remove the vq allocation in common_realize
and do it at a later time? Or need to delete and add again as I mentioned in the
previous email?

> >     /* The event queue belongs to QEMU */
> >     vvc->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >                                        vhost_vsock_common_handle_output);
> >
> >-    vvc->vhost_dev.nvqs = ARRAY_SIZE(vvc->vhost_vqs);
> >-    vvc->vhost_dev.vqs = vvc->vhost_vqs;
> >+    vvc->vhost_dev.nvqs = nvqs;
> >+    vvc->vhost_dev.vqs = g_new0(struct vhost_virtqueue, vvc->vhost_dev.nvqs);
> >
> >     vvc->post_load_timer = NULL;
> > }
> >@@ -227,6 +271,14 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev)
> >
> >     virtio_delete_queue(vvc->recv_vq);
> >     virtio_delete_queue(vvc->trans_vq);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM) {
> >+        virtio_delete_queue(vvc->dgram_recv_vq);
> >+        virtio_delete_queue(vvc->dgram_trans_vq);
> >+    }
> >+
> >+    if (vvc->vhost_dev.vqs)
> >+        g_free(vvc->vhost_dev.vqs);
> >+
> >     virtio_delete_queue(vvc->event_vq);
> >     virtio_cleanup(vdev);
> > }
> >diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
> >index 1b1a5c70ed..f73523afaf 100644
> >--- a/hw/virtio/vhost-vsock.c
> >+++ b/hw/virtio/vhost-vsock.c
> >@@ -23,6 +23,7 @@
> >
> > const int feature_bits[] = {
> >     VIRTIO_VSOCK_F_SEQPACKET,
> >+    VIRTIO_VSOCK_F_DGRAM,
> >     VHOST_INVALID_FEATURE_BIT
> > };
> >
> >@@ -116,6 +117,8 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >
> >     virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET);
> >+    if (vvc->vhost_dev.nvqs == MAX_VQS_WITH_DGRAM)
> >+        virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_DGRAM);
> >     return vhost_get_features(&vvc->vhost_dev, feature_bits,
> >                                 requested_features);
> > }
> >@@ -175,7 +178,7 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
> >         qemu_set_nonblock(vhostfd);
> >     }
> >
> >-    vhost_vsock_common_realize(vdev, "vhost-vsock");
> >+    vhost_vsock_common_realize(vdev, "vhost-vsock", true);
> >
> >     ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
> >                          VHOST_BACKEND_TYPE_KERNEL, 0, errp);
> >diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h
> >index e412b5ee98..6669d24714 100644
> >--- a/include/hw/virtio/vhost-vsock-common.h
> >+++ b/include/hw/virtio/vhost-vsock-common.h
> >@@ -27,12 +27,13 @@ enum {
> > struct VHostVSockCommon {
> >     VirtIODevice parent;
> >
> >-    struct vhost_virtqueue vhost_vqs[2];
> >     struct vhost_dev vhost_dev;
> >
> >     VirtQueue *event_vq;
> >     VirtQueue *recv_vq;
> >     VirtQueue *trans_vq;
> >+    VirtQueue *dgram_recv_vq;
> >+    VirtQueue *dgram_trans_vq;
> >
> >     QEMUTimer *post_load_timer;
> > };
> >@@ -41,7 +42,8 @@ int vhost_vsock_common_start(VirtIODevice *vdev);
> > void vhost_vsock_common_stop(VirtIODevice *vdev);
> > int vhost_vsock_common_pre_save(void *opaque);
> > int vhost_vsock_common_post_load(void *opaque, int version_id);
> >-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name);
> >+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name,
> >+                             bool enable_dgram);
> > void vhost_vsock_common_unrealize(VirtIODevice *vdev);
> >
> > #endif /* _QEMU_VHOST_VSOCK_COMMON_H */
> >diff --git a/include/hw/virtio/vhost-vsock.h b/include/hw/virtio/vhost-vsock.h
> >index 84f4e727c7..e10319785d 100644
> >--- a/include/hw/virtio/vhost-vsock.h
> >+++ b/include/hw/virtio/vhost-vsock.h
> >@@ -23,6 +23,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostVSock, VHOST_VSOCK)
> > typedef struct {
> >     uint64_t guest_cid;
> >     char *vhostfd;
> >+    bool enable_dgram;
>
> This seems unused.
>
Sure, I will remove it.

> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v4] virtio/vsock: add two more queues for datagram types
  2021-08-05 19:07   ` Jiang Wang .
@ 2021-08-09 10:58     ` Stefano Garzarella
  2021-08-09 22:49       ` Jiang Wang .
  0 siblings, 1 reply; 6+ messages in thread
From: Stefano Garzarella @ 2021-08-09 10:58 UTC (permalink / raw)
  To: Jiang Wang .
  Cc: Arseny Krasnov, Jason Wang, qemu-devel, Stefan Hajnoczi,
	Michael S. Tsirkin

On Thu, Aug 05, 2021 at 12:07:02PM -0700, Jiang Wang . wrote:
>On Wed, Aug 4, 2021 at 1:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>>
>> On Tue, Aug 03, 2021 at 11:41:32PM +0000, Jiang Wang wrote:
>> >Datagram sockets are connectionless and unreliable.
>> >The sender does not know the capacity of the receiver
>> >and may send more packets than the receiver can handle.
>> >
>> >Add two more dedicate virtqueues for datagram sockets,
>> >so that it will not unfairly steal resources from
>> >stream and future connection-oriented sockets.
>> >
>> >Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
>> >---
>> >v1 -> v2: use qemu cmd option to control number of queues,
>> >       removed configuration settings for dgram.
>> >v2 -> v3: use ioctl to get features and decide number of
>> >        virt queues, instead of qemu cmd option.
>> >v3 -> v4: change DGRAM feature bit value to 2. Add an argument
>> >       in vhost_vsock_common_realize to indicate dgram is supported or not.
>> >
>> > hw/virtio/vhost-user-vsock.c                  |  2 +-
>> > hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
>> > hw/virtio/vhost-vsock.c                       |  5 +-
>> > include/hw/virtio/vhost-vsock-common.h        |  6 +-
>> > include/hw/virtio/vhost-vsock.h               |  4 ++
>> > include/standard-headers/linux/virtio_vsock.h |  1 +
>> > 6 files changed, 69 insertions(+), 7 deletions(-)
>> >
>> >diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
>> >index 6095ed7349..e9ec0e1c00 100644
>> >--- a/hw/virtio/vhost-user-vsock.c
>> >+++ b/hw/virtio/vhost-user-vsock.c
>> >@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
>> >         return;
>> >     }
>> >
>> >-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
>> >+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
>> >
>> >     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
>> >
>> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
>> >index 4ad6e234ad..c78536911a 100644
>> >--- a/hw/virtio/vhost-vsock-common.c
>> >+++ b/hw/virtio/vhost-vsock-common.c
>> >@@ -17,6 +17,8 @@
>> > #include "hw/virtio/vhost-vsock.h"
>> > #include "qemu/iov.h"
>> > #include "monitor/monitor.h"
>> >+#include <sys/ioctl.h>
>> >+#include <linux/vhost.h>
>> >
>> > int vhost_vsock_common_start(VirtIODevice *vdev)
>> > {
>> >@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
>> >     return 0;
>> > }
>> >
>> >-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
>> >+static int vhost_vsock_get_max_qps(bool enable_dgram)
>> >+{
>> >+    uint64_t features;
>> >+    int ret;
>> >+    int fd = -1;
>> >+
>> >+    if (!enable_dgram)
>> >+        return MAX_VQS_WITHOUT_DGRAM;
>> >+
>> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
>>
>>
>> As I said in the previous version, we cannot directly open
>> /dev/vhost-vsock, for two reasons:
>>
>>    1. this code is common with vhost-user-vsock which does not use
>>    /dev/vhost-vsock.
>>
>>    2. the fd may have been passed from the management layer and qemu may
>>    not be able to directly open /dev/vhost-vsock.
>>
>> I think is better to move this function in hw/virtio/vhost-vsock.c,
>> using the `vhostfd`, returning true or false if dgram is supported, then
>> you can use it for `enable_dgram` param ...
>>
>
>Yes, you are right. Now I remember you said that before but I forgot about that
>when I changed the code. I will fix it. Sorry about that.

No problem :-)

>
>> >+    if (fd == -1) {
>> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
>> >+        return -1;
>> >+    }
>> >+
>> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
>> >+    if (ret) {
>> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
>> >+        qemu_close(fd);
>> >+        return ret;
>> >+    }
>> >+
>> >+    qemu_close(fd);
>> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
>> >+        return MAX_VQS_WITH_DGRAM;
>> >+
>> >+    return MAX_VQS_WITHOUT_DGRAM;
>> >+}
>> >+
>> >+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
>> > {
>> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
>> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
>> >
>> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
>> >                 sizeof(struct virtio_vsock_config));
>> >@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice
>> >*vdev, const char *name)
>> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >                                        vhost_vsock_common_handle_output);
>> >
>> >+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
>> >+
>> >+    if (nvqs < 0)
>> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
>>
>> ... and here, if `enable_dgram` is true, you can set `nvqs =
>> MAX_VQS_WITH_DGRAM``
>>
>sure.
>
>> >+
>> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
>> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+                                              vhost_vsock_common_handle_output);
>> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
>> >+
>> >vhost_vsock_common_handle_output);
>> >+    }
>> >+
>>
>> I'm still concerned about compatibility with guests that don't 
>> support
>> dgram, as I mentioned in the previous email.
>>
>> I need to do some testing, but my guess is it won't work if the host
>> (QEMU and vhost-vsock) supports it and the guest doesn't.
>>
>> I still think that we should allocate an array of queues and then decide
>> at runtime which one to use for events (third or fifth) after the guest
>> has acked the features.
>>
>Agree. I will check where the guest ack the feature. If you have any 

I'm not sure we should delete them, I think we can allocate 5 queues and 
then use queue 3 or 5 for events in vhost_vsock_common_start(), when the 
guest already acked the features.

>pointers,
>just let me know. Also, could we just remove the vq allocation in common_realize
>and do it at a later time? Or need to delete and add again as I mentioned in the
>previous email?

Instead of having 'recv_vq', 'trans_vq', 'event_vq' fields, we can have 
an array of VirtQueue pointers and a field that indicates what the index 
of the event queue is. (or a boolean that indicates if we are enabling 
dgram or not).

This should simplify the management.

Thanks,
Stefano



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Re: [PATCH v4] virtio/vsock: add two more queues for datagram types
  2021-08-09 10:58     ` Stefano Garzarella
@ 2021-08-09 22:49       ` Jiang Wang .
  0 siblings, 0 replies; 6+ messages in thread
From: Jiang Wang . @ 2021-08-09 22:49 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Mon, Aug 9, 2021 at 3:58 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Thu, Aug 05, 2021 at 12:07:02PM -0700, Jiang Wang . wrote:
> >On Wed, Aug 4, 2021 at 1:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Tue, Aug 03, 2021 at 11:41:32PM +0000, Jiang Wang wrote:
> >> >Datagram sockets are connectionless and unreliable.
> >> >The sender does not know the capacity of the receiver
> >> >and may send more packets than the receiver can handle.
> >> >
> >> >Add two more dedicate virtqueues for datagram sockets,
> >> >so that it will not unfairly steal resources from
> >> >stream and future connection-oriented sockets.
> >> >
> >> >Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
> >> >---
> >> >v1 -> v2: use qemu cmd option to control number of queues,
> >> >       removed configuration settings for dgram.
> >> >v2 -> v3: use ioctl to get features and decide number of
> >> >        virt queues, instead of qemu cmd option.
> >> >v3 -> v4: change DGRAM feature bit value to 2. Add an argument
> >> >       in vhost_vsock_common_realize to indicate dgram is supported or not.
> >> >
> >> > hw/virtio/vhost-user-vsock.c                  |  2 +-
> >> > hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
> >> > hw/virtio/vhost-vsock.c                       |  5 +-
> >> > include/hw/virtio/vhost-vsock-common.h        |  6 +-
> >> > include/hw/virtio/vhost-vsock.h               |  4 ++
> >> > include/standard-headers/linux/virtio_vsock.h |  1 +
> >> > 6 files changed, 69 insertions(+), 7 deletions(-)
> >> >
> >> >diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
> >> >index 6095ed7349..e9ec0e1c00 100644
> >> >--- a/hw/virtio/vhost-user-vsock.c
> >> >+++ b/hw/virtio/vhost-user-vsock.c
> >> >@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
> >> >         return;
> >> >     }
> >> >
> >> >-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
> >> >+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
> >> >
> >> >     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
> >> >
> >> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >> >index 4ad6e234ad..c78536911a 100644
> >> >--- a/hw/virtio/vhost-vsock-common.c
> >> >+++ b/hw/virtio/vhost-vsock-common.c
> >> >@@ -17,6 +17,8 @@
> >> > #include "hw/virtio/vhost-vsock.h"
> >> > #include "qemu/iov.h"
> >> > #include "monitor/monitor.h"
> >> >+#include <sys/ioctl.h>
> >> >+#include <linux/vhost.h>
> >> >
> >> > int vhost_vsock_common_start(VirtIODevice *vdev)
> >> > {
> >> >@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >> >     return 0;
> >> > }
> >> >
> >> >-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >> >+static int vhost_vsock_get_max_qps(bool enable_dgram)
> >> >+{
> >> >+    uint64_t features;
> >> >+    int ret;
> >> >+    int fd = -1;
> >> >+
> >> >+    if (!enable_dgram)
> >> >+        return MAX_VQS_WITHOUT_DGRAM;
> >> >+
> >> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >>
> >>
> >> As I said in the previous version, we cannot directly open
> >> /dev/vhost-vsock, for two reasons:
> >>
> >>    1. this code is common with vhost-user-vsock which does not use
> >>    /dev/vhost-vsock.
> >>
> >>    2. the fd may have been passed from the management layer and qemu may
> >>    not be able to directly open /dev/vhost-vsock.
> >>
> >> I think is better to move this function in hw/virtio/vhost-vsock.c,
> >> using the `vhostfd`, returning true or false if dgram is supported, then
> >> you can use it for `enable_dgram` param ...
> >>
> >
> >Yes, you are right. Now I remember you said that before but I forgot about that
> >when I changed the code. I will fix it. Sorry about that.
>
> No problem :-)
>
> >
> >> >+    if (fd == -1) {
> >> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >> >+        return -1;
> >> >+    }
> >> >+
> >> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >> >+    if (ret) {
> >> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >> >+        qemu_close(fd);
> >> >+        return ret;
> >> >+    }
> >> >+
> >> >+    qemu_close(fd);
> >> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >> >+        return MAX_VQS_WITH_DGRAM;
> >> >+
> >> >+    return MAX_VQS_WITHOUT_DGRAM;
> >> >+}
> >> >+
> >> >+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
> >> > {
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >
> >> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >> >                 sizeof(struct virtio_vsock_config));
> >> >@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >> >*vdev, const char *name)
> >> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >> >
> >> >+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
> >> >+
> >> >+    if (nvqs < 0)
> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >>
> >> ... and here, if `enable_dgram` is true, you can set `nvqs =
> >> MAX_VQS_WITH_DGRAM``
> >>
> >sure.
> >
> >> >+
> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                              vhost_vsock_common_handle_output);
> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+
> >> >vhost_vsock_common_handle_output);
> >> >+    }
> >> >+
> >>
> >> I'm still concerned about compatibility with guests that don't
> >> support
> >> dgram, as I mentioned in the previous email.
> >>
> >> I need to do some testing, but my guess is it won't work if the host
> >> (QEMU and vhost-vsock) supports it and the guest doesn't.
> >>
> >> I still think that we should allocate an array of queues and then decide
> >> at runtime which one to use for events (third or fifth) after the guest
> >> has acked the features.
> >>
> >Agree. I will check where the guest ack the feature. If you have any
>
> I'm not sure we should delete them, I think we can allocate 5 queues and
> then use queue 3 or 5 for events in vhost_vsock_common_start(), when the
> guest already acked the features.
>

Got it.

> >pointers,
> >just let me know. Also, could we just remove the vq allocation in common_realize
> >and do it at a later time? Or need to delete and add again as I mentioned in the
> >previous email?
>
> Instead of having 'recv_vq', 'trans_vq', 'event_vq' fields, we can have
> an array of VirtQueue pointers and a field that indicates what the index
> of the event queue is. (or a boolean that indicates if we are enabling
> dgram or not).
>
> This should simplify the management.
>
I see. I will dig more. thanks.

> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Re: [PATCH v4] virtio/vsock: add two more queues for datagram types
@ 2021-09-05 18:08 Jiang Wang .
  0 siblings, 0 replies; 6+ messages in thread
From: Jiang Wang . @ 2021-09-05 18:08 UTC (permalink / raw)
  To: Stefano Garzarella
  Cc: Arseny Krasnov, Jason Wang, qemu devel list, Stefan Hajnoczi,
	Michael S. Tsirkin

On Mon, Aug 9, 2021 at 3:58 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Thu, Aug 05, 2021 at 12:07:02PM -0700, Jiang Wang . wrote:
> >On Wed, Aug 4, 2021 at 1:13 AM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >>
> >> On Tue, Aug 03, 2021 at 11:41:32PM +0000, Jiang Wang wrote:
> >> >Datagram sockets are connectionless and unreliable.
> >> >The sender does not know the capacity of the receiver
> >> >and may send more packets than the receiver can handle.
> >> >
> >> >Add two more dedicate virtqueues for datagram sockets,
> >> >so that it will not unfairly steal resources from
> >> >stream and future connection-oriented sockets.
> >> >
> >> >Signed-off-by: Jiang Wang <jiang.wang@bytedance.com>
> >> >---
> >> >v1 -> v2: use qemu cmd option to control number of queues,
> >> >       removed configuration settings for dgram.
> >> >v2 -> v3: use ioctl to get features and decide number of
> >> >        virt queues, instead of qemu cmd option.
> >> >v3 -> v4: change DGRAM feature bit value to 2. Add an argument
> >> >       in vhost_vsock_common_realize to indicate dgram is supported or not.
> >> >
> >> > hw/virtio/vhost-user-vsock.c                  |  2 +-
> >> > hw/virtio/vhost-vsock-common.c                | 58 ++++++++++++++++++-
> >> > hw/virtio/vhost-vsock.c                       |  5 +-
> >> > include/hw/virtio/vhost-vsock-common.h        |  6 +-
> >> > include/hw/virtio/vhost-vsock.h               |  4 ++
> >> > include/standard-headers/linux/virtio_vsock.h |  1 +
> >> > 6 files changed, 69 insertions(+), 7 deletions(-)
> >> >
> >> >diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
> >> >index 6095ed7349..e9ec0e1c00 100644
> >> >--- a/hw/virtio/vhost-user-vsock.c
> >> >+++ b/hw/virtio/vhost-user-vsock.c
> >> >@@ -105,7 +105,7 @@ static void vuv_device_realize(DeviceState *dev, Error **errp)
> >> >         return;
> >> >     }
> >> >
> >> >-    vhost_vsock_common_realize(vdev, "vhost-user-vsock");
> >> >+    vhost_vsock_common_realize(vdev, "vhost-user-vsock", false);
> >> >
> >> >     vhost_dev_set_config_notifier(&vvc->vhost_dev, &vsock_ops);
> >> >
> >> >diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
> >> >index 4ad6e234ad..c78536911a 100644
> >> >--- a/hw/virtio/vhost-vsock-common.c
> >> >+++ b/hw/virtio/vhost-vsock-common.c
> >> >@@ -17,6 +17,8 @@
> >> > #include "hw/virtio/vhost-vsock.h"
> >> > #include "qemu/iov.h"
> >> > #include "monitor/monitor.h"
> >> >+#include <sys/ioctl.h>
> >> >+#include <linux/vhost.h>
> >> >
> >> > int vhost_vsock_common_start(VirtIODevice *vdev)
> >> > {
> >> >@@ -196,9 +198,39 @@ int vhost_vsock_common_post_load(void *opaque, int version_id)
> >> >     return 0;
> >> > }
> >> >
> >> >-void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name)
> >> >+static int vhost_vsock_get_max_qps(bool enable_dgram)
> >> >+{
> >> >+    uint64_t features;
> >> >+    int ret;
> >> >+    int fd = -1;
> >> >+
> >> >+    if (!enable_dgram)
> >> >+        return MAX_VQS_WITHOUT_DGRAM;
> >> >+
> >> >+    fd = qemu_open_old("/dev/vhost-vsock", O_RDONLY);
> >>
> >>
> >> As I said in the previous version, we cannot directly open
> >> /dev/vhost-vsock, for two reasons:
> >>
> >>    1. this code is common with vhost-user-vsock which does not use
> >>    /dev/vhost-vsock.
> >>
> >>    2. the fd may have been passed from the management layer and qemu may
> >>    not be able to directly open /dev/vhost-vsock.
> >>
> >> I think is better to move this function in hw/virtio/vhost-vsock.c,
> >> using the `vhostfd`, returning true or false if dgram is supported, then
> >> you can use it for `enable_dgram` param ...
> >>
> >
> >Yes, you are right. Now I remember you said that before but I forgot about that
> >when I changed the code. I will fix it. Sorry about that.
>
> No problem :-)
>
> >
> >> >+    if (fd == -1) {
> >> >+        error_report("vhost-vsock: failed to open device. %s", strerror(errno));
> >> >+        return -1;
> >> >+    }
> >> >+
> >> >+    ret = ioctl(fd, VHOST_GET_FEATURES, &features);
> >> >+    if (ret) {
> >> >+        error_report("vhost-vsock: failed to read  device. %s", strerror(errno));
> >> >+        qemu_close(fd);
> >> >+        return ret;
> >> >+    }
> >> >+
> >> >+    qemu_close(fd);
> >> >+    if (features & (1 << VIRTIO_VSOCK_F_DGRAM))
> >> >+        return MAX_VQS_WITH_DGRAM;
> >> >+
> >> >+    return MAX_VQS_WITHOUT_DGRAM;
> >> >+}
> >> >+
> >> >+void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name, bool enable_dgram)
> >> > {
> >> >     VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
> >> >+    int nvqs = MAX_VQS_WITHOUT_DGRAM;
> >> >
> >> >     virtio_init(vdev, name, VIRTIO_ID_VSOCK,
> >> >                 sizeof(struct virtio_vsock_config));
> >> >@@ -209,12 +241,24 @@ void vhost_vsock_common_realize(VirtIODevice
> >> >*vdev, const char *name)
> >> >     vvc->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >                                        vhost_vsock_common_handle_output);
> >> >
> >> >+    nvqs = vhost_vsock_get_max_qps(enable_dgram);
> >> >+
> >> >+    if (nvqs < 0)
> >> >+        nvqs = MAX_VQS_WITHOUT_DGRAM;
> >>
> >> ... and here, if `enable_dgram` is true, you can set `nvqs =
> >> MAX_VQS_WITH_DGRAM``
> >>
> >sure.
> >
> >> >+
> >> >+    if (nvqs == MAX_VQS_WITH_DGRAM) {
> >> >+        vvc->dgram_recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+                                              vhost_vsock_common_handle_output);
> >> >+        vvc->dgram_trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
> >> >+
> >> >vhost_vsock_common_handle_output);
> >> >+    }
> >> >+
> >>
> >> I'm still concerned about compatibility with guests that don't
> >> support
> >> dgram, as I mentioned in the previous email.
> >>
> >> I need to do some testing, but my guess is it won't work if the host
> >> (QEMU and vhost-vsock) supports it and the guest doesn't.
> >>
> >> I still think that we should allocate an array of queues and then decide
> >> at runtime which one to use for events (third or fifth) after the guest
> >> has acked the features.
> >>
> >Agree. I will check where the guest ack the feature. If you have any
>
> I'm not sure we should delete them, I think we can allocate 5 queues and
> then use queue 3 or 5 for events in vhost_vsock_common_start(), when the
> guest already acked the features.
>

I think I just solved most compatibility issues during migration. The
previous error I saw was due to a bug in vhost-vsock kernel module.
After fixing that, I did not change anything for qemu ( i.e, still the same
version 4, btw I will fix fd issue in v5) and did a few migration tests.
Most of them are good.

There are two test cases that migration failed with "Features 0x130000002
unsupported"error, which is due to
SEQPACKET qemu patch (my dgram patch
is based on seqpacket patch). Not sure if we need to
fix it or not.  I think the compatibility is good as of now. Let me
know if you have other concerns or more test cases to test.
Otherwise, I will submit V5 soon.

Test results:
Left three columns are the source set-up,  right are destination set up.
Host and Guest refers to the host and guest kernel respectively. These
tests are not complete, and I make the src and dest kernel mostly the
same version. But I did test one case where source kernel has dgram
support while dest kernel does not and it is good. Btw, if the src kernel
and dest kernel have a big difference( like 5.14 vs 4.19), then QEMU
will show some msr errors which I guess is kind of expected.

Host        QEMU        Guest            --> Host        QEMU            result
dgram       no-dgram    no-dgram        dgram       no-dgram        Good
dgram       no-dgram    dgram           dgram       no-dgram        Good
dgram       dgram       no-dgram        dgram       dgram           Good
dgram       dgram       no-dgram        dgram       no-dgram        Good
dgram       dgram       dgram           dgram       no-dgram
load feature error *1

no-dgram    no-dgram    dgram           no-dgram    no-dgram        Good
no-dgram    dgram       dgram           no-dgram    dgram             Good
no-dgram    dgram       no-dgram        no-dgram    dgram           Good
no-dgram    dgram       no-dgram        no-dgram    no-dgram        Good
no-dgram    dgram       dgram           no-dgram    no-dgram
load feature error *1

dgram       dgram       no-dgram        no-dgram    no-dgram        Good

*1 Qemu shows following error messages:
qemu-system-x86_64: Features 0x130000002 unsupported. Allowed
features: 0x179000000
qemu-system-x86_64: Failed to load virtio-vhost_vsock:virtio
qemu-system-x86_64: error while loading state for instance 0x0 of
device '0000:00:05.0/virtio-vhost_vsock'
qemu-system-x86_64: load of migration failed: Operation not permitted

This is due to SEQPACKET feature bit.


Step back and rethink about whether the event vq number should be 3 or or 5,
now I think it does not matter. The tx and rx queues (whether 2 or 4 queues)
belong to vhost, but event queue belongs to QEMU. The qemu code
allocates an array  for vhost_dev.vqs only for tx and rx queues. So
event queue is never in that array. That means we don't need to
worry about even queue number is 3 or 5. And my tests confirmed that.
I think for the virtio spec, we need to put event queue somewhere and
it looks like having a relative position to tx rx queues. But for vhost kernel
implementation, the event queue is a special case and not related to
tx or rx queues.

Regards,

Jiang


> >pointers,
> >just let me know. Also, could we just remove the vq allocation in common_realize
> >and do it at a later time? Or need to delete and add again as I mentioned in the
> >previous email?
>
> Instead of having 'recv_vq', 'trans_vq', 'event_vq' fields, we can have
> an array of VirtQueue pointers and a field that indicates what the index
> of the event queue is. (or a boolean that indicates if we are enabling
> dgram or not).
>
> This should simplify the management.
>
> Thanks,
> Stefano
>


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-09-05 18:09 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-03 23:41 [PATCH v4] virtio/vsock: add two more queues for datagram types Jiang Wang
2021-08-04  8:13 ` Stefano Garzarella
2021-08-05 19:07   ` Jiang Wang .
2021-08-09 10:58     ` Stefano Garzarella
2021-08-09 22:49       ` Jiang Wang .
2021-09-05 18:08 Jiang Wang .

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.