All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Christie <michael.christie@oracle.com>
To: hch@infradead.org, stefanha@redhat.com, jasowang@redhat.com,
	mst@redhat.com, sgarzare@redhat.com,
	virtualization@lists.linux-foundation.org, brauner@kernel.org,
	ebiederm@xmission.com, torvalds@linux-foundation.org,
	konrad.wilk@oracle.com, linux-kernel@vger.kernel.org
Cc: Mike Christie <michael.christie@oracle.com>
Subject: [PATCH v11 8/8] vhost: use vhost_tasks for worker threads
Date: Thu,  2 Feb 2023 17:25:17 -0600	[thread overview]
Message-ID: <20230202232517.8695-9-michael.christie@oracle.com> (raw)
In-Reply-To: <20230202232517.8695-1-michael.christie@oracle.com>

For vhost workers we use the kthread API which inherit's its values from
and checks against the kthreadd thread. This results in the wrong RLIMITs
being checked, so while tools like libvirt try to control the number of
threads based on the nproc rlimit setting we can end up creating more
threads than the user wanted.

This patch has us use the vhost_task helpers which will inherit its
values/checks from the thread that owns the device similar to if we did
a clone in userspace. The vhost threads will now be counted in the nproc
rlimits. And we get features like cgroups and mm sharing automatically,
so we can remove those calls.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
---
 drivers/vhost/vhost.c | 58 ++++++++-----------------------------------
 drivers/vhost/vhost.h |  4 +--
 2 files changed, 13 insertions(+), 49 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 74378d241f8d..d3c7c37b69a7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,11 +22,11 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/kthread.h>
-#include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/sort.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/vhost_task.h>
 #include <linux/interval_tree_generic.h>
 #include <linux/nospec.h>
 #include <linux/kcov.h>
@@ -256,7 +256,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 		 * test_and_set_bit() implies a memory barrier.
 		 */
 		llist_add(&work->node, &dev->worker->work_list);
-		wake_up_process(dev->worker->task);
+		wake_up_process(dev->worker->vtsk->task);
 	}
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -336,17 +336,14 @@ static void vhost_vq_reset(struct vhost_dev *dev,
 static int vhost_worker(void *data)
 {
 	struct vhost_worker *worker = data;
-	struct vhost_dev *dev = worker->dev;
 	struct vhost_work *work, *work_next;
 	struct llist_node *node;
 
-	kthread_use_mm(dev->mm);
-
 	for (;;) {
 		/* mb paired w/ kthread_stop */
 		set_current_state(TASK_INTERRUPTIBLE);
 
-		if (kthread_should_stop()) {
+		if (vhost_task_should_stop(worker->vtsk)) {
 			__set_current_state(TASK_RUNNING);
 			break;
 		}
@@ -368,7 +365,7 @@ static int vhost_worker(void *data)
 				schedule();
 		}
 	}
-	kthread_unuse_mm(dev->mm);
+
 	return 0;
 }
 
@@ -509,31 +506,6 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
 }
 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 
-struct vhost_attach_cgroups_struct {
-	struct vhost_work work;
-	struct task_struct *owner;
-	int ret;
-};
-
-static void vhost_attach_cgroups_work(struct vhost_work *work)
-{
-	struct vhost_attach_cgroups_struct *s;
-
-	s = container_of(work, struct vhost_attach_cgroups_struct, work);
-	s->ret = cgroup_attach_task_all(s->owner, current);
-}
-
-static int vhost_attach_cgroups(struct vhost_dev *dev)
-{
-	struct vhost_attach_cgroups_struct attach;
-
-	attach.owner = current;
-	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
-	vhost_work_queue(dev, &attach.work);
-	vhost_dev_flush(dev);
-	return attach.ret;
-}
-
 /* Caller should have device mutex */
 bool vhost_dev_has_owner(struct vhost_dev *dev)
 {
@@ -580,14 +552,14 @@ static void vhost_worker_free(struct vhost_dev *dev)
 
 	dev->worker = NULL;
 	WARN_ON(!llist_empty(&worker->work_list));
-	kthread_stop(worker->task);
+	vhost_task_stop(worker->vtsk);
 	kfree(worker);
 }
 
 static int vhost_worker_create(struct vhost_dev *dev)
 {
 	struct vhost_worker *worker;
-	struct task_struct *task;
+	struct vhost_task *vtsk;
 	int ret;
 
 	worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
@@ -595,27 +567,19 @@ static int vhost_worker_create(struct vhost_dev *dev)
 		return -ENOMEM;
 
 	dev->worker = worker;
-	worker->dev = dev;
 	worker->kcov_handle = kcov_common_handle();
 	init_llist_head(&worker->work_list);
 
-	task = kthread_create(vhost_worker, worker, "vhost-%d", current->pid);
-	if (IS_ERR(task)) {
-		ret = PTR_ERR(task);
+	vtsk = vhost_task_create(vhost_worker, worker, NUMA_NO_NODE);
+	if (!vtsk) {
+		ret = -ENOMEM;
 		goto free_worker;
 	}
 
-	worker->task = task;
-	wake_up_process(task); /* avoid contributing to loadavg */
-
-	ret = vhost_attach_cgroups(dev);
-	if (ret)
-		goto stop_worker;
-
+	worker->vtsk = vtsk;
+	vhost_task_start(vtsk, "vhost-%d", current->pid);
 	return 0;
 
-stop_worker:
-	kthread_stop(worker->task);
 free_worker:
 	kfree(worker);
 	dev->worker = NULL;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2f6beab93784..3af59c65025e 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -16,6 +16,7 @@
 #include <linux/irqbypass.h>
 
 struct vhost_work;
+struct vhost_task;
 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 
 #define VHOST_WORK_QUEUED 1
@@ -26,9 +27,8 @@ struct vhost_work {
 };
 
 struct vhost_worker {
-	struct task_struct	*task;
+	struct vhost_task	*vtsk;
 	struct llist_head	work_list;
-	struct vhost_dev	*dev;
 	u64			kcov_handle;
 };
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Mike Christie <michael.christie@oracle.com>
To: hch@infradead.org, stefanha@redhat.com, jasowang@redhat.com,
	mst@redhat.com, sgarzare@redhat.com,
	virtualization@lists.linux-foundation.org, brauner@kernel.org,
	ebiederm@xmission.com, torvalds@linux-foundation.org,
	konrad.wilk@oracle.com, linux-kernel@vger.kernel.org
Subject: [PATCH v11 8/8] vhost: use vhost_tasks for worker threads
Date: Thu,  2 Feb 2023 17:25:17 -0600	[thread overview]
Message-ID: <20230202232517.8695-9-michael.christie@oracle.com> (raw)
In-Reply-To: <20230202232517.8695-1-michael.christie@oracle.com>

For vhost workers we use the kthread API which inherit's its values from
and checks against the kthreadd thread. This results in the wrong RLIMITs
being checked, so while tools like libvirt try to control the number of
threads based on the nproc rlimit setting we can end up creating more
threads than the user wanted.

This patch has us use the vhost_task helpers which will inherit its
values/checks from the thread that owns the device similar to if we did
a clone in userspace. The vhost threads will now be counted in the nproc
rlimits. And we get features like cgroups and mm sharing automatically,
so we can remove those calls.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
---
 drivers/vhost/vhost.c | 58 ++++++++-----------------------------------
 drivers/vhost/vhost.h |  4 +--
 2 files changed, 13 insertions(+), 49 deletions(-)

diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 74378d241f8d..d3c7c37b69a7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,11 +22,11 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/kthread.h>
-#include <linux/cgroup.h>
 #include <linux/module.h>
 #include <linux/sort.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/vhost_task.h>
 #include <linux/interval_tree_generic.h>
 #include <linux/nospec.h>
 #include <linux/kcov.h>
@@ -256,7 +256,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 		 * test_and_set_bit() implies a memory barrier.
 		 */
 		llist_add(&work->node, &dev->worker->work_list);
-		wake_up_process(dev->worker->task);
+		wake_up_process(dev->worker->vtsk->task);
 	}
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -336,17 +336,14 @@ static void vhost_vq_reset(struct vhost_dev *dev,
 static int vhost_worker(void *data)
 {
 	struct vhost_worker *worker = data;
-	struct vhost_dev *dev = worker->dev;
 	struct vhost_work *work, *work_next;
 	struct llist_node *node;
 
-	kthread_use_mm(dev->mm);
-
 	for (;;) {
 		/* mb paired w/ kthread_stop */
 		set_current_state(TASK_INTERRUPTIBLE);
 
-		if (kthread_should_stop()) {
+		if (vhost_task_should_stop(worker->vtsk)) {
 			__set_current_state(TASK_RUNNING);
 			break;
 		}
@@ -368,7 +365,7 @@ static int vhost_worker(void *data)
 				schedule();
 		}
 	}
-	kthread_unuse_mm(dev->mm);
+
 	return 0;
 }
 
@@ -509,31 +506,6 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
 }
 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 
-struct vhost_attach_cgroups_struct {
-	struct vhost_work work;
-	struct task_struct *owner;
-	int ret;
-};
-
-static void vhost_attach_cgroups_work(struct vhost_work *work)
-{
-	struct vhost_attach_cgroups_struct *s;
-
-	s = container_of(work, struct vhost_attach_cgroups_struct, work);
-	s->ret = cgroup_attach_task_all(s->owner, current);
-}
-
-static int vhost_attach_cgroups(struct vhost_dev *dev)
-{
-	struct vhost_attach_cgroups_struct attach;
-
-	attach.owner = current;
-	vhost_work_init(&attach.work, vhost_attach_cgroups_work);
-	vhost_work_queue(dev, &attach.work);
-	vhost_dev_flush(dev);
-	return attach.ret;
-}
-
 /* Caller should have device mutex */
 bool vhost_dev_has_owner(struct vhost_dev *dev)
 {
@@ -580,14 +552,14 @@ static void vhost_worker_free(struct vhost_dev *dev)
 
 	dev->worker = NULL;
 	WARN_ON(!llist_empty(&worker->work_list));
-	kthread_stop(worker->task);
+	vhost_task_stop(worker->vtsk);
 	kfree(worker);
 }
 
 static int vhost_worker_create(struct vhost_dev *dev)
 {
 	struct vhost_worker *worker;
-	struct task_struct *task;
+	struct vhost_task *vtsk;
 	int ret;
 
 	worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
@@ -595,27 +567,19 @@ static int vhost_worker_create(struct vhost_dev *dev)
 		return -ENOMEM;
 
 	dev->worker = worker;
-	worker->dev = dev;
 	worker->kcov_handle = kcov_common_handle();
 	init_llist_head(&worker->work_list);
 
-	task = kthread_create(vhost_worker, worker, "vhost-%d", current->pid);
-	if (IS_ERR(task)) {
-		ret = PTR_ERR(task);
+	vtsk = vhost_task_create(vhost_worker, worker, NUMA_NO_NODE);
+	if (!vtsk) {
+		ret = -ENOMEM;
 		goto free_worker;
 	}
 
-	worker->task = task;
-	wake_up_process(task); /* avoid contributing to loadavg */
-
-	ret = vhost_attach_cgroups(dev);
-	if (ret)
-		goto stop_worker;
-
+	worker->vtsk = vtsk;
+	vhost_task_start(vtsk, "vhost-%d", current->pid);
 	return 0;
 
-stop_worker:
-	kthread_stop(worker->task);
 free_worker:
 	kfree(worker);
 	dev->worker = NULL;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2f6beab93784..3af59c65025e 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -16,6 +16,7 @@
 #include <linux/irqbypass.h>
 
 struct vhost_work;
+struct vhost_task;
 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
 
 #define VHOST_WORK_QUEUED 1
@@ -26,9 +27,8 @@ struct vhost_work {
 };
 
 struct vhost_worker {
-	struct task_struct	*task;
+	struct vhost_task	*vtsk;
 	struct llist_head	work_list;
-	struct vhost_dev	*dev;
 	u64			kcov_handle;
 };
 
-- 
2.25.1

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2023-02-02 23:26 UTC|newest]

Thread overview: 176+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-02 23:25 [PATCH v11 0/8] Use copy_process in vhost layer Mike Christie
2023-02-02 23:25 ` Mike Christie
2023-02-02 23:25 ` [PATCH v11 1/8] fork: Make IO worker options flag based Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-03  0:14   ` Linus Torvalds
2023-02-03  0:14     ` Linus Torvalds
2023-02-02 23:25 ` [PATCH v11 2/8] fork/vm: Move common PF_IO_WORKER behavior to new flag Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-02 23:25 ` [PATCH v11 3/8] fork: add USER_WORKER flag to not dup/clone files Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-03  0:16   ` Linus Torvalds
2023-02-03  0:16     ` Linus Torvalds
2023-02-02 23:25 ` [PATCH v11 4/8] fork: Add USER_WORKER flag to ignore signals Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-03  0:19   ` Linus Torvalds
2023-02-03  0:19     ` Linus Torvalds
2023-02-05 16:06     ` Mike Christie
2023-02-05 16:06       ` Mike Christie
2023-02-02 23:25 ` [PATCH v11 5/8] fork: allow kernel code to call copy_process Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-02 23:25 ` [PATCH v11 6/8] vhost_task: Allow vhost layer to use copy_process Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-03  0:43   ` Linus Torvalds
2023-02-03  0:43     ` Linus Torvalds
2023-02-02 23:25 ` [PATCH v11 7/8] vhost: move worker thread fields to new struct Mike Christie
2023-02-02 23:25   ` Mike Christie
2023-02-02 23:25 ` Mike Christie [this message]
2023-02-02 23:25   ` [PATCH v11 8/8] vhost: use vhost_tasks for worker threads Mike Christie
2023-05-05 13:40   ` Nicolas Dichtel
2023-05-05 18:22     ` Linus Torvalds
2023-05-05 18:22       ` Linus Torvalds
2023-05-05 22:37       ` Mike Christie
2023-05-05 22:37         ` Mike Christie
2023-05-06  1:53         ` Linus Torvalds
2023-05-06  1:53           ` Linus Torvalds
2023-05-08 17:13         ` Christian Brauner
2023-05-09  8:09         ` Nicolas Dichtel
2023-05-09  8:17           ` Nicolas Dichtel
2023-05-13 12:39         ` Thorsten Leemhuis
2023-05-13 12:39           ` Thorsten Leemhuis
2023-05-13 15:08           ` Linus Torvalds
2023-05-13 15:08             ` Linus Torvalds
2023-05-15 14:23             ` Christian Brauner
2023-05-15 15:44               ` Linus Torvalds
2023-05-15 15:44                 ` Linus Torvalds
2023-05-15 15:52                 ` Jens Axboe
2023-05-15 15:52                   ` Jens Axboe
2023-05-15 15:54                   ` Linus Torvalds
2023-05-15 15:54                     ` Linus Torvalds
2023-05-15 17:23                     ` Linus Torvalds
2023-05-15 17:23                       ` Linus Torvalds
2023-05-15 15:56                   ` Linus Torvalds
2023-05-15 15:56                     ` Linus Torvalds
2023-05-15 22:23                 ` Mike Christie
2023-05-15 22:23                   ` Mike Christie
2023-05-15 22:54                   ` Linus Torvalds
2023-05-15 22:54                     ` Linus Torvalds
2023-05-16  3:53                     ` Mike Christie
2023-05-16  3:53                       ` Mike Christie
2023-05-16 13:18                       ` Oleg Nesterov
2023-05-16 13:18                         ` Oleg Nesterov
2023-05-16 13:40                       ` Oleg Nesterov
2023-05-16 13:40                         ` Oleg Nesterov
2023-05-16 15:56                     ` Eric W. Biederman
2023-05-16 15:56                       ` Eric W. Biederman
2023-05-16 18:37                       ` Oleg Nesterov
2023-05-16 18:37                         ` Oleg Nesterov
2023-05-16 20:12                         ` Eric W. Biederman
2023-05-16 20:12                           ` Eric W. Biederman
2023-05-17 17:09                           ` Oleg Nesterov
2023-05-17 17:09                             ` Oleg Nesterov
2023-05-17 18:22                             ` Mike Christie
2023-05-17 18:22                               ` Mike Christie
2023-05-16  8:39                   ` Christian Brauner
2023-05-16 16:24                     ` Mike Christie
2023-05-16 16:24                       ` Mike Christie
2023-05-16 16:44                       ` Christian Brauner
2023-05-19 12:15                     ` [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Christian Brauner
2023-06-01  7:58                       ` Thorsten Leemhuis
2023-06-01  7:58                         ` Thorsten Leemhuis
2023-06-01 10:18                         ` Nicolas Dichtel
2023-06-01 10:47                         ` Christian Brauner
2023-06-01 11:29                           ` Thorsten Leemhuis
2023-06-01 11:29                             ` Thorsten Leemhuis
2023-06-01 12:26                           ` Linus Torvalds
2023-06-01 12:26                             ` Linus Torvalds
2023-06-01 16:10                           ` Mike Christie
2023-06-01 16:10                             ` Mike Christie
2023-05-16 14:06     ` [PATCH v11 8/8] vhost: use vhost_tasks for worker threads Linux regression tracking #adding (Thorsten Leemhuis)
2023-05-26  9:03       ` Linux regression tracking #update (Thorsten Leemhuis)
2023-06-02 11:38       ` Thorsten Leemhuis
2023-07-20 13:06   ` Michael S. Tsirkin
2023-07-20 13:06     ` Michael S. Tsirkin
2023-07-23  4:03     ` michael.christie
2023-07-23  4:03       ` michael.christie
2023-07-23  9:31       ` Michael S. Tsirkin
2023-07-23  9:31         ` Michael S. Tsirkin
2023-08-10 18:57       ` Michael S. Tsirkin
2023-08-10 18:57         ` Michael S. Tsirkin
2023-08-11 18:51         ` Mike Christie
2023-08-11 18:51           ` Mike Christie
2023-08-13 19:01           ` Michael S. Tsirkin
2023-08-13 19:01             ` Michael S. Tsirkin
2023-08-14  3:13             ` michael.christie
2023-08-14  3:13               ` michael.christie
2023-02-07  8:19 ` [PATCH v11 0/8] Use copy_process in vhost layer Christian Brauner
2023-05-18  0:09 [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Mike Christie
2023-05-18  0:09 ` Mike Christie
2023-05-18  0:09 ` [RFC PATCH 1/8] signal: Dequeue SIGKILL even if SIGNAL_GROUP_EXIT/group_exec_task is set Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  2:34   ` Eric W. Biederman
2023-05-18  2:34     ` Eric W. Biederman
2023-05-18  3:49   ` Eric W. Biederman
2023-05-18  3:49     ` Eric W. Biederman
2023-05-18 15:21     ` Mike Christie
2023-05-18 15:21       ` Mike Christie
2023-05-18 16:25       ` Oleg Nesterov
2023-05-18 16:25         ` Oleg Nesterov
2023-05-18 16:42         ` Mike Christie
2023-05-18 16:42           ` Mike Christie
2023-05-18 17:04           ` Oleg Nesterov
2023-05-18 17:04             ` Oleg Nesterov
2023-05-18 18:28             ` Eric W. Biederman
2023-05-18 18:28               ` Eric W. Biederman
2023-05-18 22:57               ` Mike Christie
2023-05-18 22:57                 ` Mike Christie
2023-05-19  4:16                 ` Eric W. Biederman
2023-05-19  4:16                   ` Eric W. Biederman
2023-05-19 23:24                   ` Mike Christie
2023-05-19 23:24                     ` Mike Christie
2023-05-22 13:30               ` Oleg Nesterov
2023-05-22 13:30                 ` Oleg Nesterov
2023-05-18  8:08   ` Christian Brauner
2023-05-18 15:27     ` Mike Christie
2023-05-18 15:27       ` Mike Christie
2023-05-18 17:07       ` Christian Brauner
2023-05-18 18:08         ` Oleg Nesterov
2023-05-18 18:08           ` Oleg Nesterov
2023-05-18 18:12           ` Christian Brauner
2023-05-18 18:23             ` Oleg Nesterov
2023-05-18 18:23               ` Oleg Nesterov
2023-05-18  0:09 ` [RFC PATCH 2/8] vhost/vhost_task: Hook vhost layer into signal handler Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  0:16   ` Linus Torvalds
2023-05-18  0:16     ` Linus Torvalds
2023-05-18  1:01     ` Mike Christie
2023-05-18  1:01       ` Mike Christie
2023-05-18  8:16       ` Christian Brauner
2023-05-18  0:09 ` [RFC PATCH 3/8] fork/vhost_task: Switch to CLONE_THREAD and CLONE_SIGHAND Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  8:18   ` Christian Brauner
2023-05-18  0:09 ` [RFC PATCH 4/8] vhost-net: Move vhost_net_open Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  0:09 ` [RFC PATCH 5/8] vhost: Add callback that stops new work and waits on running ones Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18 14:18   ` Christian Brauner
2023-05-18 15:03     ` Mike Christie
2023-05-18 15:03       ` Mike Christie
2023-05-18 15:09       ` Christian Brauner
2023-05-18 18:38       ` Eric W. Biederman
2023-05-18 18:38         ` Eric W. Biederman
2023-05-18  0:09 ` [RFC PATCH 6/8] vhost-scsi: Add callback to stop and wait on works Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  0:09 ` [RFC PATCH 7/8] vhost-net: " Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  0:09 ` [RFC PATCH 8/8] fork/vhost_task: remove no_files Mike Christie
2023-05-18  0:09   ` Mike Christie
2023-05-18  1:04   ` Mike Christie
2023-05-18  1:04     ` Mike Christie
2023-05-18 12:31   ` kernel test robot
2023-05-18 15:30   ` kernel test robot
2023-05-18 23:14   ` kernel test robot
2023-05-19  7:26   ` kernel test robot
2023-05-18  8:25 ` [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Christian Brauner
2023-05-18  8:40   ` Christian Brauner
2023-05-18 14:30   ` Christian Brauner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230202232517.8695-9-michael.christie@oracle.com \
    --to=michael.christie@oracle.com \
    --cc=brauner@kernel.org \
    --cc=ebiederm@xmission.com \
    --cc=hch@infradead.org \
    --cc=jasowang@redhat.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.