All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] vhost-scsi: Support worker ioctls
@ 2023-11-27  0:28 Mike Christie
  2023-11-27  0:28 ` [PATCH v2 1/2] vhost: Add worker backend callouts Mike Christie
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Mike Christie @ 2023-11-27  0:28 UTC (permalink / raw)
  To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel

The following patches allow users to configure the vhost worker threads
for vhost-scsi. With vhost-net we get a worker thread per rx/tx virtqueue
pair, but for vhost-scsi we get one worker for all workqueues. This
becomes a bottlneck after 2 queues are used.

In the upstream linux kernel commit:

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/drivers/vhost/vhost.c?id=c1ecd8e9500797748ae4f79657971955d452d69d

we enabled the vhost layer to be able to create a worker thread and
attach it to a virtqueue.

This patchset adds support to vhost-scsi to use these ioctls so we are
no longer limited to the single worker.

v2:
- Make config option a bool instead of an int.




^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v2 1/2] vhost: Add worker backend callouts
  2023-11-27  0:28 [PATCH v2 0/2] vhost-scsi: Support worker ioctls Mike Christie
@ 2023-11-27  0:28 ` Mike Christie
  2023-11-29  9:26   ` Stefano Garzarella
  2023-11-27  0:28 ` [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
  2023-11-29 13:43 ` [PATCH v2 0/2] vhost-scsi: Support worker ioctls Stefan Hajnoczi
  2 siblings, 1 reply; 7+ messages in thread
From: Mike Christie @ 2023-11-27  0:28 UTC (permalink / raw)
  To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel
  Cc: Mike Christie

This adds the vhost backend callouts for the worker ioctls added in the
6.4 linux kernel commit:

c1ecd8e95007 ("vhost: allow userspace to create workers")

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 hw/virtio/vhost-backend.c         | 28 ++++++++++++++++++++++++++++
 include/hw/virtio/vhost-backend.h | 14 ++++++++++++++
 2 files changed, 42 insertions(+)

diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 17f3fc6a0823..833804dd40f2 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -158,6 +158,30 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
     return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
 }
 
+static int vhost_kernel_new_worker(struct vhost_dev *dev,
+                                   struct vhost_worker_state *worker)
+{
+    return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker);
+}
+
+static int vhost_kernel_free_worker(struct vhost_dev *dev,
+                                    struct vhost_worker_state *worker)
+{
+    return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker);
+}
+
+static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev,
+                                            struct vhost_vring_worker *worker)
+{
+    return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker);
+}
+
+static int vhost_kernel_get_vring_worker(struct vhost_dev *dev,
+                                         struct vhost_vring_worker *worker)
+{
+    return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker);
+}
+
 static int vhost_kernel_set_features(struct vhost_dev *dev,
                                      uint64_t features)
 {
@@ -313,6 +337,10 @@ const VhostOps kernel_ops = {
         .vhost_set_vring_err = vhost_kernel_set_vring_err,
         .vhost_set_vring_busyloop_timeout =
                                 vhost_kernel_set_vring_busyloop_timeout,
+        .vhost_get_vring_worker = vhost_kernel_get_vring_worker,
+        .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker,
+        .vhost_new_worker = vhost_kernel_new_worker,
+        .vhost_free_worker = vhost_kernel_free_worker,
         .vhost_set_features = vhost_kernel_set_features,
         .vhost_get_features = vhost_kernel_get_features,
         .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 96ccc18cd33b..9f16d0884e8f 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -33,6 +33,8 @@ struct vhost_memory;
 struct vhost_vring_file;
 struct vhost_vring_state;
 struct vhost_vring_addr;
+struct vhost_vring_worker;
+struct vhost_worker_state;
 struct vhost_scsi_target;
 struct vhost_iotlb_msg;
 struct vhost_virtqueue;
@@ -73,6 +75,14 @@ typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
                                       struct vhost_vring_file *file);
 typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
                                                    struct vhost_vring_state *r);
+typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev,
+                                            struct vhost_vring_worker *worker);
+typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev,
+                                         struct vhost_vring_worker *worker);
+typedef int (*vhost_new_worker_op)(struct vhost_dev *dev,
+                                   struct vhost_worker_state *worker);
+typedef int (*vhost_free_worker_op)(struct vhost_dev *dev,
+                                    struct vhost_worker_state *worker);
 typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
                                      uint64_t features);
 typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
@@ -151,6 +161,10 @@ typedef struct VhostOps {
     vhost_set_vring_call_op vhost_set_vring_call;
     vhost_set_vring_err_op vhost_set_vring_err;
     vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
+    vhost_new_worker_op vhost_new_worker;
+    vhost_free_worker_op vhost_free_worker;
+    vhost_get_vring_worker_op vhost_get_vring_worker;
+    vhost_attach_vring_worker_op vhost_attach_vring_worker;
     vhost_set_features_op vhost_set_features;
     vhost_get_features_op vhost_get_features;
     vhost_set_backend_cap_op vhost_set_backend_cap;
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue
  2023-11-27  0:28 [PATCH v2 0/2] vhost-scsi: Support worker ioctls Mike Christie
  2023-11-27  0:28 ` [PATCH v2 1/2] vhost: Add worker backend callouts Mike Christie
@ 2023-11-27  0:28 ` Mike Christie
  2023-11-29  9:30   ` Stefano Garzarella
  2023-11-29 13:43 ` [PATCH v2 0/2] vhost-scsi: Support worker ioctls Stefan Hajnoczi
  2 siblings, 1 reply; 7+ messages in thread
From: Mike Christie @ 2023-11-27  0:28 UTC (permalink / raw)
  To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel
  Cc: Mike Christie

This adds support for vhost-scsi to be able to create a worker thread
per virtqueue. Right now for vhost-net we get a worker thread per
tx/rx virtqueue pair which scales nicely as we add more virtqueues and
CPUs, but for scsi we get the single worker thread that's shared by all
virtqueues. When trying to send IO to more than 2 virtqueues the single
thread becomes a bottlneck.

This patch adds a new setting, workers_per_virtqueue, which can be set
to:

false: Existing behavior where we get the single worker thread.
true: Create a worker per IO virtqueue.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 hw/scsi/vhost-scsi.c            | 60 +++++++++++++++++++++++++++++++++
 include/hw/virtio/virtio-scsi.h |  1 +
 2 files changed, 61 insertions(+)

diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 3126df9e1d9d..77eef9474c23 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -165,6 +165,57 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
     .pre_save = vhost_scsi_pre_save,
 };
 
+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
+{
+    struct vhost_dev *dev = &vsc->dev;
+    struct vhost_vring_worker vq_worker;
+    struct vhost_worker_state worker;
+    int i, ret;
+
+    /* Use default worker */
+    if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
+        return 0;
+    }
+
+    /*
+     * ctl/evt share the first worker since it will be rare for them
+     * to send cmds while IO is running.
+     */
+    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
+        memset(&worker, 0, sizeof(worker));
+
+        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
+        if (ret == -ENOTTY) {
+            /*
+             * worker ioctls are not implemented so just ignore and
+             * and continue device setup.
+             */
+            ret = 0;
+            break;
+        } else if (ret) {
+            break;
+        }
+
+        memset(&vq_worker, 0, sizeof(vq_worker));
+        vq_worker.worker_id = worker.worker_id;
+        vq_worker.index = i;
+
+        ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
+        if (ret == -ENOTTY) {
+            /*
+             * It's a bug for the kernel to have supported the worker creation
+             * ioctl but not attach.
+             */
+            dev->vhost_ops->vhost_free_worker(dev, &worker);
+            break;
+        } else if (ret) {
+            break;
+        }
+    }
+
+    return ret;
+}
+
 static void vhost_scsi_realize(DeviceState *dev, Error **errp)
 {
     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@@ -232,6 +283,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
         goto free_vqs;
     }
 
+    ret = vhost_scsi_set_workers(vsc, vs->conf.worker_per_virtqueue);
+    if (ret < 0) {
+        error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
+                   strerror(-ret));
+        goto free_vqs;
+    }
+
     /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
     vsc->channel = 0;
     vsc->lun = 0;
@@ -297,6 +355,8 @@ static Property vhost_scsi_properties[] = {
                                                  VIRTIO_SCSI_F_T10_PI,
                                                  false),
     DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
+    DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon,
+                     conf.worker_per_virtqueue, false),
     DEFINE_PROP_END_OF_LIST(),
 };
 
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 779568ab5d28..0e9a1867665e 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
 struct VirtIOSCSIConf {
     uint32_t num_queues;
     uint32_t virtqueue_size;
+    bool worker_per_virtqueue;
     bool seg_max_adjust;
     uint32_t max_sectors;
     uint32_t cmd_per_lun;
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 1/2] vhost: Add worker backend callouts
  2023-11-27  0:28 ` [PATCH v2 1/2] vhost: Add worker backend callouts Mike Christie
@ 2023-11-29  9:26   ` Stefano Garzarella
  0 siblings, 0 replies; 7+ messages in thread
From: Stefano Garzarella @ 2023-11-29  9:26 UTC (permalink / raw)
  To: Mike Christie; +Cc: fam, stefanha, jasowang, mst, pbonzini, qemu-devel

On Sun, Nov 26, 2023 at 06:28:33PM -0600, Mike Christie wrote:
>This adds the vhost backend callouts for the worker ioctls added in the
>6.4 linux kernel commit:
>
>c1ecd8e95007 ("vhost: allow userspace to create workers")
>
>Signed-off-by: Mike Christie <michael.christie@oracle.com>
>---
> hw/virtio/vhost-backend.c         | 28 ++++++++++++++++++++++++++++
> include/hw/virtio/vhost-backend.h | 14 ++++++++++++++
> 2 files changed, 42 insertions(+)

Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>

>
>diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
>index 17f3fc6a0823..833804dd40f2 100644
>--- a/hw/virtio/vhost-backend.c
>+++ b/hw/virtio/vhost-backend.c
>@@ -158,6 +158,30 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
>     return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
> }
>
>+static int vhost_kernel_new_worker(struct vhost_dev *dev,
>+                                   struct vhost_worker_state *worker)
>+{
>+    return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker);
>+}
>+
>+static int vhost_kernel_free_worker(struct vhost_dev *dev,
>+                                    struct vhost_worker_state *worker)
>+{
>+    return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker);
>+}
>+
>+static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev,
>+                                            struct vhost_vring_worker *worker)
>+{
>+    return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker);
>+}
>+
>+static int vhost_kernel_get_vring_worker(struct vhost_dev *dev,
>+                                         struct vhost_vring_worker *worker)
>+{
>+    return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker);
>+}
>+
> static int vhost_kernel_set_features(struct vhost_dev *dev,
>                                      uint64_t features)
> {
>@@ -313,6 +337,10 @@ const VhostOps kernel_ops = {
>         .vhost_set_vring_err = vhost_kernel_set_vring_err,
>         .vhost_set_vring_busyloop_timeout =
>                                 vhost_kernel_set_vring_busyloop_timeout,
>+        .vhost_get_vring_worker = vhost_kernel_get_vring_worker,
>+        .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker,
>+        .vhost_new_worker = vhost_kernel_new_worker,
>+        .vhost_free_worker = vhost_kernel_free_worker,
>         .vhost_set_features = vhost_kernel_set_features,
>         .vhost_get_features = vhost_kernel_get_features,
>         .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
>diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
>index 96ccc18cd33b..9f16d0884e8f 100644
>--- a/include/hw/virtio/vhost-backend.h
>+++ b/include/hw/virtio/vhost-backend.h
>@@ -33,6 +33,8 @@ struct vhost_memory;
> struct vhost_vring_file;
> struct vhost_vring_state;
> struct vhost_vring_addr;
>+struct vhost_vring_worker;
>+struct vhost_worker_state;
> struct vhost_scsi_target;
> struct vhost_iotlb_msg;
> struct vhost_virtqueue;
>@@ -73,6 +75,14 @@ typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
>                                       struct vhost_vring_file *file);
> typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
>                                                    struct vhost_vring_state *r);
>+typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev,
>+                                            struct vhost_vring_worker *worker);
>+typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev,
>+                                         struct vhost_vring_worker *worker);
>+typedef int (*vhost_new_worker_op)(struct vhost_dev *dev,
>+                                   struct vhost_worker_state *worker);
>+typedef int (*vhost_free_worker_op)(struct vhost_dev *dev,
>+                                    struct vhost_worker_state *worker);
> typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
>                                      uint64_t features);
> typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
>@@ -151,6 +161,10 @@ typedef struct VhostOps {
>     vhost_set_vring_call_op vhost_set_vring_call;
>     vhost_set_vring_err_op vhost_set_vring_err;
>     vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
>+    vhost_new_worker_op vhost_new_worker;
>+    vhost_free_worker_op vhost_free_worker;
>+    vhost_get_vring_worker_op vhost_get_vring_worker;
>+    vhost_attach_vring_worker_op vhost_attach_vring_worker;
>     vhost_set_features_op vhost_set_features;
>     vhost_get_features_op vhost_get_features;
>     vhost_set_backend_cap_op vhost_set_backend_cap;
>-- 
>2.34.1
>



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue
  2023-11-27  0:28 ` [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
@ 2023-11-29  9:30   ` Stefano Garzarella
  2023-11-29 17:40     ` Mike Christie
  0 siblings, 1 reply; 7+ messages in thread
From: Stefano Garzarella @ 2023-11-29  9:30 UTC (permalink / raw)
  To: Mike Christie; +Cc: fam, stefanha, jasowang, mst, pbonzini, qemu-devel

On Sun, Nov 26, 2023 at 06:28:34PM -0600, Mike Christie wrote:
>This adds support for vhost-scsi to be able to create a worker thread
>per virtqueue. Right now for vhost-net we get a worker thread per
>tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>CPUs, but for scsi we get the single worker thread that's shared by all
>virtqueues. When trying to send IO to more than 2 virtqueues the single
>thread becomes a bottlneck.
>
>This patch adds a new setting, workers_per_virtqueue, which can be set
>to:
>
>false: Existing behavior where we get the single worker thread.
>true: Create a worker per IO virtqueue.
>
>Signed-off-by: Mike Christie <michael.christie@oracle.com>
>---
> hw/scsi/vhost-scsi.c            | 60 +++++++++++++++++++++++++++++++++
> include/hw/virtio/virtio-scsi.h |  1 +
> 2 files changed, 61 insertions(+)
>
>diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>index 3126df9e1d9d..77eef9474c23 100644
>--- a/hw/scsi/vhost-scsi.c
>+++ b/hw/scsi/vhost-scsi.c
>@@ -165,6 +165,57 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
>     .pre_save = vhost_scsi_pre_save,
> };
>
>+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
>+{
>+    struct vhost_dev *dev = &vsc->dev;
>+    struct vhost_vring_worker vq_worker;
>+    struct vhost_worker_state worker;
>+    int i, ret;
>+
>+    /* Use default worker */
>+    if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>+        return 0;
>+    }
>+
>+    /*
>+     * ctl/evt share the first worker since it will be rare for them
>+     * to send cmds while IO is running.
>+     */
>+    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>+        memset(&worker, 0, sizeof(worker));
>+
>+        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
>+        if (ret == -ENOTTY) {
>+            /*
>+             * worker ioctls are not implemented so just ignore and
>+             * and continue device setup.
>+             */

IIUC here the user has asked to use a worker for each virtqueue, but the
kernel does not support it so we ignore it.

Should we at least print a warning?

The rest LGTM!

Stefano

>+            ret = 0;
>+            break;
>+        } else if (ret) {
>+            break;
>+        }
>+
>+        memset(&vq_worker, 0, sizeof(vq_worker));
>+        vq_worker.worker_id = worker.worker_id;
>+        vq_worker.index = i;
>+
>+        ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
>+        if (ret == -ENOTTY) {
>+            /*
>+             * It's a bug for the kernel to have supported the worker creation
>+             * ioctl but not attach.
>+             */
>+            dev->vhost_ops->vhost_free_worker(dev, &worker);
>+            break;
>+        } else if (ret) {
>+            break;
>+        }
>+    }
>+
>+    return ret;
>+}
>+
> static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> {
>     VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>@@ -232,6 +283,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
>         goto free_vqs;
>     }
>
>+    ret = vhost_scsi_set_workers(vsc, vs->conf.worker_per_virtqueue);
>+    if (ret < 0) {
>+        error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
>+                   strerror(-ret));
>+        goto free_vqs;
>+    }
>+
>     /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
>     vsc->channel = 0;
>     vsc->lun = 0;
>@@ -297,6 +355,8 @@ static Property vhost_scsi_properties[] = {
>                                                  VIRTIO_SCSI_F_T10_PI,
>                                                  false),
>     DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
>+    DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon,
>+                     conf.worker_per_virtqueue, false),
>     DEFINE_PROP_END_OF_LIST(),
> };
>
>diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
>index 779568ab5d28..0e9a1867665e 100644
>--- a/include/hw/virtio/virtio-scsi.h
>+++ b/include/hw/virtio/virtio-scsi.h
>@@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
> struct VirtIOSCSIConf {
>     uint32_t num_queues;
>     uint32_t virtqueue_size;
>+    bool worker_per_virtqueue;
>     bool seg_max_adjust;
>     uint32_t max_sectors;
>     uint32_t cmd_per_lun;
>-- 
>2.34.1
>



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 0/2] vhost-scsi: Support worker ioctls
  2023-11-27  0:28 [PATCH v2 0/2] vhost-scsi: Support worker ioctls Mike Christie
  2023-11-27  0:28 ` [PATCH v2 1/2] vhost: Add worker backend callouts Mike Christie
  2023-11-27  0:28 ` [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
@ 2023-11-29 13:43 ` Stefan Hajnoczi
  2 siblings, 0 replies; 7+ messages in thread
From: Stefan Hajnoczi @ 2023-11-29 13:43 UTC (permalink / raw)
  To: Mike Christie; +Cc: fam, jasowang, mst, sgarzare, pbonzini, qemu-devel

[-- Attachment #1: Type: text/plain, Size: 984 bytes --]

On Sun, Nov 26, 2023 at 06:28:32PM -0600, Mike Christie wrote:
> The following patches allow users to configure the vhost worker threads
> for vhost-scsi. With vhost-net we get a worker thread per rx/tx virtqueue
> pair, but for vhost-scsi we get one worker for all workqueues. This
> becomes a bottlneck after 2 queues are used.
> 
> In the upstream linux kernel commit:
> 
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/drivers/vhost/vhost.c?id=c1ecd8e9500797748ae4f79657971955d452d69d
> 
> we enabled the vhost layer to be able to create a worker thread and
> attach it to a virtqueue.
> 
> This patchset adds support to vhost-scsi to use these ioctls so we are
> no longer limited to the single worker.
> 
> v2:
> - Make config option a bool instead of an int.
> 
> 

Aside from Stefano's comment asking for a warning when the kernel
doesn't support the vhost worker ioctl:

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue
  2023-11-29  9:30   ` Stefano Garzarella
@ 2023-11-29 17:40     ` Mike Christie
  0 siblings, 0 replies; 7+ messages in thread
From: Mike Christie @ 2023-11-29 17:40 UTC (permalink / raw)
  To: Stefano Garzarella; +Cc: fam, stefanha, jasowang, mst, pbonzini, qemu-devel

On 11/29/23 3:30 AM, Stefano Garzarella wrote:
> On Sun, Nov 26, 2023 at 06:28:34PM -0600, Mike Christie wrote:
>> This adds support for vhost-scsi to be able to create a worker thread
>> per virtqueue. Right now for vhost-net we get a worker thread per
>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>> CPUs, but for scsi we get the single worker thread that's shared by all
>> virtqueues. When trying to send IO to more than 2 virtqueues the single
>> thread becomes a bottlneck.
>>
>> This patch adds a new setting, workers_per_virtqueue, which can be set
>> to:
>>
>> false: Existing behavior where we get the single worker thread.
>> true: Create a worker per IO virtqueue.
>>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>> hw/scsi/vhost-scsi.c            | 60 +++++++++++++++++++++++++++++++++
>> include/hw/virtio/virtio-scsi.h |  1 +
>> 2 files changed, 61 insertions(+)
>>
>> diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>> index 3126df9e1d9d..77eef9474c23 100644
>> --- a/hw/scsi/vhost-scsi.c
>> +++ b/hw/scsi/vhost-scsi.c
>> @@ -165,6 +165,57 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
>>     .pre_save = vhost_scsi_pre_save,
>> };
>>
>> +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
>> +{
>> +    struct vhost_dev *dev = &vsc->dev;
>> +    struct vhost_vring_worker vq_worker;
>> +    struct vhost_worker_state worker;
>> +    int i, ret;
>> +
>> +    /* Use default worker */
>> +    if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>> +        return 0;
>> +    }
>> +
>> +    /*
>> +     * ctl/evt share the first worker since it will be rare for them
>> +     * to send cmds while IO is running.
>> +     */
>> +    for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>> +        memset(&worker, 0, sizeof(worker));
>> +
>> +        ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
>> +        if (ret == -ENOTTY) {
>> +            /*
>> +             * worker ioctls are not implemented so just ignore and
>> +             * and continue device setup.
>> +             */
> 
> IIUC here the user has asked to use a worker for each virtqueue, but the
> kernel does not support it so we ignore it.
> 
> Should we at least print a warning?
> 

We should. I'll add it.



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-11-29 17:41 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-27  0:28 [PATCH v2 0/2] vhost-scsi: Support worker ioctls Mike Christie
2023-11-27  0:28 ` [PATCH v2 1/2] vhost: Add worker backend callouts Mike Christie
2023-11-29  9:26   ` Stefano Garzarella
2023-11-27  0:28 ` [PATCH v2 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
2023-11-29  9:30   ` Stefano Garzarella
2023-11-29 17:40     ` Mike Christie
2023-11-29 13:43 ` [PATCH v2 0/2] vhost-scsi: Support worker ioctls Stefan Hajnoczi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.