All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Christie <michael.christie@oracle.com>
To: target-devel@vger.kernel.org, linux-scsi@vger.kernel.org,
	stefanha@redhat.com, pbonzini@redhat.com, jasowang@redhat.com,
	mst@redhat.com, sgarzare@redhat.com,
	virtualization@lists.linux-foundation.org
Cc: Mike Christie <michael.christie@oracle.com>
Subject: [PATCH V4 12/12] vhost: allow worker attachment after initial setup
Date: Thu,  4 Nov 2021 14:05:02 -0500	[thread overview]
Message-ID: <20211104190502.7053-13-michael.christie@oracle.com> (raw)
In-Reply-To: <20211104190502.7053-1-michael.christie@oracle.com>

This patch allows userspace to change the vq to worker mapping while it's
in use so tools can do this setup post device creation if needed.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---

 drivers/vhost/vhost.c      | 102 +++++++++++++++++++++++++------------
 drivers/vhost/vhost.h      |   2 +-
 include/uapi/linux/vhost.h |   2 +-
 3 files changed, 71 insertions(+), 35 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 20bf67a846f1..f47710a77853 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -232,12 +232,9 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-static void vhost_work_queue_on(struct vhost_worker *worker,
-				struct vhost_work *work)
+static void vhost_worker_work_queue(struct vhost_worker *worker,
+				    struct vhost_work *work)
 {
-	if (!worker)
-		return;
-
 	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 		/* We can only add the work to the list after we're
 		 * sure it was not in the list.
@@ -248,31 +245,45 @@ static void vhost_work_queue_on(struct vhost_worker *worker,
 	}
 }
 
-static void vhost_work_flush_on(struct vhost_worker *worker)
+void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
 {
-	struct vhost_flush_struct flush;
+	struct vhost_worker *worker;
 
-	if (!worker)
-		return;
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker)
+		vhost_worker_work_queue(worker, work);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
 
-	init_completion(&flush.wait_event);
-	vhost_work_init(&flush.work, vhost_flush_work);
+static void vhost_worker_flush_queue(struct vhost_worker *worker,
+				     struct vhost_flush_struct *flush)
+{
+	init_completion(&flush->wait_event);
+	vhost_work_init(&flush->work, vhost_flush_work);
 
-	vhost_work_queue_on(worker, &flush.work);
-	wait_for_completion(&flush.wait_event);
+	vhost_worker_work_queue(worker, &flush->work);
 }
 
 void vhost_vq_work_flush(struct vhost_virtqueue *vq)
 {
-	vhost_work_flush_on(vq->worker);
-}
-EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
+	struct vhost_flush_struct flush;
+	struct vhost_worker *worker;
 
-void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
-{
-	vhost_work_queue_on(vq->worker, work);
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (!worker) {
+		rcu_read_unlock();
+		return;
+	}
+
+	vhost_worker_flush_queue(worker, &flush);
+	rcu_read_unlock();
+
+	wait_for_completion(&flush.wait_event);
 }
-EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
 
 /* Flush any work that has been scheduled. When calling this, don't hold any
  * locks that are also used by the callback. */
@@ -285,7 +296,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
 /* A lockless hint for busy polling code to exit the loop */
 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 {
-	return vq->worker && !llist_empty(&vq->worker->work_list);
+	struct vhost_worker *worker;
+	bool has_work = false;
+
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker && !llist_empty(&worker->work_list))
+		has_work = true;
+	rcu_read_unlock();
+
+	return has_work;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_has_work);
 
@@ -510,7 +530,7 @@ void vhost_dev_init(struct vhost_dev *dev,
 		vq->log = NULL;
 		vq->indirect = NULL;
 		vq->heads = NULL;
-		vq->worker = NULL;
+		rcu_assign_pointer(vq->worker, NULL);
 		vq->dev = dev;
 		mutex_init(&vq->mutex);
 		vhost_vq_reset(dev, vq);
@@ -590,11 +610,32 @@ static void vhost_worker_put(struct vhost_dev *dev, struct vhost_worker *worker)
 	kfree(worker);
 }
 
-static void vhost_vq_detach_worker(struct vhost_virtqueue *vq)
+static void vhost_vq_swap_worker(struct vhost_virtqueue *vq,
+				 struct vhost_worker *new_worker, bool flush)
 {
-	if (vq->worker)
-		vhost_worker_put(vq->dev, vq->worker);
-	vq->worker = NULL;
+	struct vhost_flush_struct flush_work;
+	struct vhost_worker *old_worker;
+
+	old_worker = rcu_dereference_check(vq->worker,
+					   lockdep_is_held(&vq->dev->mutex));
+	rcu_assign_pointer(vq->worker, new_worker);
+
+	if (!old_worker)
+		return;
+
+	if (flush) {
+		/*
+		 * For dev cleanup we won't have work running, but for the
+		 * dynamic attach case we might so make sure we see the new
+		 * worker and there is no work in the old worker.
+		 */
+		synchronize_rcu();
+
+		vhost_worker_flush_queue(old_worker, &flush_work);
+		wait_for_completion(&flush_work.wait_event);
+	}
+
+	vhost_worker_put(vq->dev, old_worker);
 }
 
 static int vhost_workers_idr_iter(int id, void *worker, void *dev)
@@ -611,7 +652,7 @@ static void vhost_workers_free(struct vhost_dev *dev)
 		return;
 
 	for (i = 0; i < dev->nvqs; i++)
-		vhost_vq_detach_worker(dev->vqs[i]);
+		vhost_vq_swap_worker(dev->vqs[i], NULL, false);
 
 	idr_for_each(&dev->worker_idr, vhost_workers_idr_iter, dev);
 }
@@ -667,18 +708,13 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
 	if (!dev->use_worker)
 		return -EINVAL;
 
-	/* We don't support setting a worker on an active vq */
-	if (vq->private_data)
-		return -EBUSY;
-
 	worker = idr_find(&dev->worker_idr, info->worker_id);
 	if (!worker)
 		return -ENODEV;
 
 	refcount_inc(&worker->refcount);
 
-	vhost_vq_detach_worker(vq);
-	vq->worker = worker;
+	vhost_vq_swap_worker(vq, worker, true);
 	return 0;
 }
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3786e3537f1..607e95887942 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -81,7 +81,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
 	struct vhost_dev *dev;
-	struct vhost_worker *worker;
+	struct vhost_worker __rcu *worker;
 
 	/* The actual ring of buffers. */
 	struct mutex mutex;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 117ea92b3925..e0221c8ce877 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -88,7 +88,7 @@
 #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
 #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
 /* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
- * virtqueues. This must be done before the virtqueue is active.
+ * virtqueues.
  */
 #define VHOST_ATTACH_VRING_WORKER _IOR(VHOST_VIRTIO, 0x15,		\
 				       struct vhost_vring_worker)
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Mike Christie <michael.christie@oracle.com>
To: target-devel@vger.kernel.org, linux-scsi@vger.kernel.org,
	stefanha@redhat.com, pbonzini@redhat.com, jasowang@redhat.com,
	mst@redhat.com, sgarzare@redhat.com,
	virtualization@lists.linux-foundation.org
Subject: [PATCH V4 12/12] vhost: allow worker attachment after initial setup
Date: Thu,  4 Nov 2021 14:05:02 -0500	[thread overview]
Message-ID: <20211104190502.7053-13-michael.christie@oracle.com> (raw)
In-Reply-To: <20211104190502.7053-1-michael.christie@oracle.com>

This patch allows userspace to change the vq to worker mapping while it's
in use so tools can do this setup post device creation if needed.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---

 drivers/vhost/vhost.c      | 102 +++++++++++++++++++++++++------------
 drivers/vhost/vhost.h      |   2 +-
 include/uapi/linux/vhost.h |   2 +-
 3 files changed, 71 insertions(+), 35 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 20bf67a846f1..f47710a77853 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -232,12 +232,9 @@ void vhost_poll_stop(struct vhost_poll *poll)
 }
 EXPORT_SYMBOL_GPL(vhost_poll_stop);
 
-static void vhost_work_queue_on(struct vhost_worker *worker,
-				struct vhost_work *work)
+static void vhost_worker_work_queue(struct vhost_worker *worker,
+				    struct vhost_work *work)
 {
-	if (!worker)
-		return;
-
 	if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
 		/* We can only add the work to the list after we're
 		 * sure it was not in the list.
@@ -248,31 +245,45 @@ static void vhost_work_queue_on(struct vhost_worker *worker,
 	}
 }
 
-static void vhost_work_flush_on(struct vhost_worker *worker)
+void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
 {
-	struct vhost_flush_struct flush;
+	struct vhost_worker *worker;
 
-	if (!worker)
-		return;
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker)
+		vhost_worker_work_queue(worker, work);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
 
-	init_completion(&flush.wait_event);
-	vhost_work_init(&flush.work, vhost_flush_work);
+static void vhost_worker_flush_queue(struct vhost_worker *worker,
+				     struct vhost_flush_struct *flush)
+{
+	init_completion(&flush->wait_event);
+	vhost_work_init(&flush->work, vhost_flush_work);
 
-	vhost_work_queue_on(worker, &flush.work);
-	wait_for_completion(&flush.wait_event);
+	vhost_worker_work_queue(worker, &flush->work);
 }
 
 void vhost_vq_work_flush(struct vhost_virtqueue *vq)
 {
-	vhost_work_flush_on(vq->worker);
-}
-EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
+	struct vhost_flush_struct flush;
+	struct vhost_worker *worker;
 
-void vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
-{
-	vhost_work_queue_on(vq->worker, work);
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (!worker) {
+		rcu_read_unlock();
+		return;
+	}
+
+	vhost_worker_flush_queue(worker, &flush);
+	rcu_read_unlock();
+
+	wait_for_completion(&flush.wait_event);
 }
-EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
+EXPORT_SYMBOL_GPL(vhost_vq_work_flush);
 
 /* Flush any work that has been scheduled. When calling this, don't hold any
  * locks that are also used by the callback. */
@@ -285,7 +296,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
 /* A lockless hint for busy polling code to exit the loop */
 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
 {
-	return vq->worker && !llist_empty(&vq->worker->work_list);
+	struct vhost_worker *worker;
+	bool has_work = false;
+
+	rcu_read_lock();
+	worker = rcu_dereference(vq->worker);
+	if (worker && !llist_empty(&worker->work_list))
+		has_work = true;
+	rcu_read_unlock();
+
+	return has_work;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_has_work);
 
@@ -510,7 +530,7 @@ void vhost_dev_init(struct vhost_dev *dev,
 		vq->log = NULL;
 		vq->indirect = NULL;
 		vq->heads = NULL;
-		vq->worker = NULL;
+		rcu_assign_pointer(vq->worker, NULL);
 		vq->dev = dev;
 		mutex_init(&vq->mutex);
 		vhost_vq_reset(dev, vq);
@@ -590,11 +610,32 @@ static void vhost_worker_put(struct vhost_dev *dev, struct vhost_worker *worker)
 	kfree(worker);
 }
 
-static void vhost_vq_detach_worker(struct vhost_virtqueue *vq)
+static void vhost_vq_swap_worker(struct vhost_virtqueue *vq,
+				 struct vhost_worker *new_worker, bool flush)
 {
-	if (vq->worker)
-		vhost_worker_put(vq->dev, vq->worker);
-	vq->worker = NULL;
+	struct vhost_flush_struct flush_work;
+	struct vhost_worker *old_worker;
+
+	old_worker = rcu_dereference_check(vq->worker,
+					   lockdep_is_held(&vq->dev->mutex));
+	rcu_assign_pointer(vq->worker, new_worker);
+
+	if (!old_worker)
+		return;
+
+	if (flush) {
+		/*
+		 * For dev cleanup we won't have work running, but for the
+		 * dynamic attach case we might so make sure we see the new
+		 * worker and there is no work in the old worker.
+		 */
+		synchronize_rcu();
+
+		vhost_worker_flush_queue(old_worker, &flush_work);
+		wait_for_completion(&flush_work.wait_event);
+	}
+
+	vhost_worker_put(vq->dev, old_worker);
 }
 
 static int vhost_workers_idr_iter(int id, void *worker, void *dev)
@@ -611,7 +652,7 @@ static void vhost_workers_free(struct vhost_dev *dev)
 		return;
 
 	for (i = 0; i < dev->nvqs; i++)
-		vhost_vq_detach_worker(dev->vqs[i]);
+		vhost_vq_swap_worker(dev->vqs[i], NULL, false);
 
 	idr_for_each(&dev->worker_idr, vhost_workers_idr_iter, dev);
 }
@@ -667,18 +708,13 @@ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
 	if (!dev->use_worker)
 		return -EINVAL;
 
-	/* We don't support setting a worker on an active vq */
-	if (vq->private_data)
-		return -EBUSY;
-
 	worker = idr_find(&dev->worker_idr, info->worker_id);
 	if (!worker)
 		return -ENODEV;
 
 	refcount_inc(&worker->refcount);
 
-	vhost_vq_detach_worker(vq);
-	vq->worker = worker;
+	vhost_vq_swap_worker(vq, worker, true);
 	return 0;
 }
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b3786e3537f1..607e95887942 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -81,7 +81,7 @@ struct vhost_vring_call {
 /* The virtqueue structure describes a queue attached to a device. */
 struct vhost_virtqueue {
 	struct vhost_dev *dev;
-	struct vhost_worker *worker;
+	struct vhost_worker __rcu *worker;
 
 	/* The actual ring of buffers. */
 	struct mutex mutex;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 117ea92b3925..e0221c8ce877 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -88,7 +88,7 @@
 #define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
 #define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
 /* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
- * virtqueues. This must be done before the virtqueue is active.
+ * virtqueues.
  */
 #define VHOST_ATTACH_VRING_WORKER _IOR(VHOST_VIRTIO, 0x15,		\
 				       struct vhost_vring_worker)
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2021-11-04 19:05 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-04 19:04 [PATCH V4 00/12] vhost: multiple worker support Mike Christie
2021-11-04 19:04 ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 01/12] vhost: add vhost_worker pointer to vhost_virtqueue Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 02/12] vhost, vhost-net: add helper to check if vq has work Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 03/12] vhost: take worker or vq instead of dev for queueing Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 04/12] vhost: take worker or vq instead of dev for flushing Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 05/12] vhost: convert poll work to be vq based Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 06/12] vhost-sock: convert to vq helpers Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 07/12] vhost-scsi: make SCSI cmd completion per vq Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 08/12] vhost-scsi: convert to vq helpers Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:04 ` [PATCH V4 09/12] vhost-scsi: flush IO vqs then send TMF rsp Mike Christie
2021-11-04 19:04   ` Mike Christie
2021-11-04 19:05 ` [PATCH V4 10/12] vhost: remove device wide queu/flushing helpers Mike Christie
2021-11-04 19:05   ` Mike Christie
2021-11-04 19:05 ` [PATCH V4 11/12] vhost: allow userspace to create workers Mike Christie
2021-11-04 19:05   ` Mike Christie
2021-11-04 19:05 ` Mike Christie [this message]
2021-11-04 19:05   ` [PATCH V4 12/12] vhost: allow worker attachment after initial setup Mike Christie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211104190502.7053-13-michael.christie@oracle.com \
    --to=michael.christie@oracle.com \
    --cc=jasowang@redhat.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=target-devel@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.