From: Mike Christie <michael.christie@oracle.com> To: hch@infradead.org, stefanha@redhat.com, jasowang@redhat.com, mst@redhat.com, sgarzare@redhat.com, virtualization@lists.linux-foundation.org, brauner@kernel.org, ebiederm@xmission.com, torvalds@linux-foundation.org, konrad.wilk@oracle.com, linux-kernel@vger.kernel.org Cc: Mike Christie <michael.christie@oracle.com> Subject: [PATCH v11 6/8] vhost_task: Allow vhost layer to use copy_process Date: Thu, 2 Feb 2023 17:25:15 -0600 [thread overview] Message-ID: <20230202232517.8695-7-michael.christie@oracle.com> (raw) In-Reply-To: <20230202232517.8695-1-michael.christie@oracle.com> Qemu will create vhost devices in the kernel which perform network, SCSI, etc IO and management operations from worker threads created by the kthread API. Because the kthread API does a copy_process on the kthreadd thread, the vhost layer has to use kthread_use_mm to access the Qemu thread's memory and cgroup_attach_task_all to add itself to the Qemu thread's cgroups, and it bypasses the RLIMIT_NPROC limit which can result in VMs creating more threads than the admin expected. This patch adds a new struct vhost_task which can be used instead of kthreads. They allow the vhost layer to use copy_process and inherit the userspace process's mm and cgroups, the task is accounted for under the userspace's nproc count and can be seen in its process tree, and other features like namespaces work and are inherited by default. Signed-off-by: Mike Christie <michael.christie@oracle.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> --- MAINTAINERS | 2 + drivers/vhost/Kconfig | 5 ++ include/linux/sched/vhost_task.h | 23 ++++++ kernel/Makefile | 1 + kernel/vhost_task.c | 122 +++++++++++++++++++++++++++++++ 5 files changed, 153 insertions(+) create mode 100644 include/linux/sched/vhost_task.h create mode 100644 kernel/vhost_task.c diff --git a/MAINTAINERS b/MAINTAINERS index 8a5c25c20d00..5f7a3b3af7aa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22125,7 +22125,9 @@ L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git +F: kernel/vhost_task.c F: drivers/vhost/ +F: include/linux/sched/vhost_task.h F: include/linux/vhost_iotlb.h F: include/uapi/linux/vhost.h diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 587fbae06182..b455d9ab6f3d 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -13,9 +13,14 @@ config VHOST_RING This option is selected by any driver which needs to access the host side of a virtio ring. +config VHOST_TASK + bool + default n + config VHOST tristate select VHOST_IOTLB + select VHOST_TASK help This option is selected by any driver which needs to access the core of vhost. diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h new file mode 100644 index 000000000000..50d02a25d37b --- /dev/null +++ b/include/linux/sched/vhost_task.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VHOST_TASK_H +#define _LINUX_VHOST_TASK_H + +#include <linux/completion.h> + +struct task_struct; + +struct vhost_task { + int (*fn)(void *data); + void *data; + struct completion exited; + unsigned long flags; + struct task_struct *task; +}; + +struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, int node); +__printf(2, 3) +void vhost_task_start(struct vhost_task *vtsk, const char namefmt[], ...); +void vhost_task_stop(struct vhost_task *vtsk); +bool vhost_task_should_stop(struct vhost_task *vtsk); + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 10ef068f598d..6fc72b3afbde 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -15,6 +15,7 @@ obj-y = fork.o exec_domain.o panic.o \ obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MULTIUSER) += groups.o +obj-$(CONFIG_VHOST_TASK) += vhost_task.o ifdef CONFIG_FUNCTION_TRACER # Do not trace internal ftrace files diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c new file mode 100644 index 000000000000..517dd166bb2b --- /dev/null +++ b/kernel/vhost_task.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Oracle Corporation + */ +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/sched/task.h> +#include <linux/sched/vhost_task.h> +#include <linux/sched/signal.h> + +enum vhost_task_flags { + VHOST_TASK_FLAGS_STOP, +}; + +static int vhost_task_fn(void *data) +{ + struct vhost_task *vtsk = data; + int ret; + + ret = vtsk->fn(vtsk->data); + complete(&vtsk->exited); + do_exit(ret); +} + +/** + * vhost_task_stop - stop a vhost_task + * @vtsk: vhost_task to stop + * + * Callers must call vhost_task_should_stop and return from their worker + * function when it returns true; + */ +void vhost_task_stop(struct vhost_task *vtsk) +{ + pid_t pid = vtsk->task->pid; + + set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); + wake_up_process(vtsk->task); + /* + * Make sure vhost_task_fn is no longer accessing the vhost_task before + * freeing it below. If userspace crashed or exited without closing, + * then the vhost_task->task could already be marked dead so + * kernel_wait will return early. + */ + wait_for_completion(&vtsk->exited); + /* + * If we are just closing/removing a device and the parent process is + * not exiting then reap the task. + */ + kernel_wait4(pid, NULL, __WCLONE, NULL); + kfree(vtsk); +} +EXPORT_SYMBOL_GPL(vhost_task_stop); + +/** + * vhost_task_should_stop - should the vhost task return from the work function + */ +bool vhost_task_should_stop(struct vhost_task *vtsk) +{ + return test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); +} +EXPORT_SYMBOL_GPL(vhost_task_should_stop); + +/** + * vhost_task_create - create a copy of a process to be used by the kernel + * @fn: thread stack + * @arg: data to be passed to fn + * @node: numa node to allocate task from + * + * This returns a specialized task for use by the vhost layer or NULL on + * failure. The returned task is inactive, and the caller must fire it up + * through vhost_task_start(). + */ +struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, int node) +{ + struct kernel_clone_args args = { + .flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM, + .exit_signal = 0, + .worker_flags = USER_WORKER | USER_WORKER_NO_FILES | + USER_WORKER_SIG_IGN, + .fn = vhost_task_fn, + }; + struct vhost_task *vtsk; + struct task_struct *tsk; + + vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); + if (!vtsk) + return ERR_PTR(-ENOMEM); + init_completion(&vtsk->exited); + vtsk->data = arg; + vtsk->fn = fn; + + args.fn_arg = vtsk; + + tsk = copy_process(NULL, 0, node, &args); + if (IS_ERR(tsk)) { + kfree(vtsk); + return NULL; + } + + vtsk->task = tsk; + return vtsk; +} +EXPORT_SYMBOL_GPL(vhost_task_create); + +/** + * vhost_task_start - start a vhost_task created with vhost_task_create + * @vtsk: vhost_task to wake up + * @namefmt: printf-style format string for the thread name + */ +void vhost_task_start(struct vhost_task *vtsk, const char namefmt[], ...) +{ + char name[TASK_COMM_LEN]; + va_list args; + + va_start(args, namefmt); + vsnprintf(name, sizeof(name), namefmt, args); + set_task_comm(vtsk->task, name); + va_end(args); + + wake_up_new_task(vtsk->task); +} +EXPORT_SYMBOL_GPL(vhost_task_start); -- 2.25.1
WARNING: multiple messages have this Message-ID (diff)
From: Mike Christie <michael.christie@oracle.com> To: hch@infradead.org, stefanha@redhat.com, jasowang@redhat.com, mst@redhat.com, sgarzare@redhat.com, virtualization@lists.linux-foundation.org, brauner@kernel.org, ebiederm@xmission.com, torvalds@linux-foundation.org, konrad.wilk@oracle.com, linux-kernel@vger.kernel.org Subject: [PATCH v11 6/8] vhost_task: Allow vhost layer to use copy_process Date: Thu, 2 Feb 2023 17:25:15 -0600 [thread overview] Message-ID: <20230202232517.8695-7-michael.christie@oracle.com> (raw) In-Reply-To: <20230202232517.8695-1-michael.christie@oracle.com> Qemu will create vhost devices in the kernel which perform network, SCSI, etc IO and management operations from worker threads created by the kthread API. Because the kthread API does a copy_process on the kthreadd thread, the vhost layer has to use kthread_use_mm to access the Qemu thread's memory and cgroup_attach_task_all to add itself to the Qemu thread's cgroups, and it bypasses the RLIMIT_NPROC limit which can result in VMs creating more threads than the admin expected. This patch adds a new struct vhost_task which can be used instead of kthreads. They allow the vhost layer to use copy_process and inherit the userspace process's mm and cgroups, the task is accounted for under the userspace's nproc count and can be seen in its process tree, and other features like namespaces work and are inherited by default. Signed-off-by: Mike Christie <michael.christie@oracle.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> --- MAINTAINERS | 2 + drivers/vhost/Kconfig | 5 ++ include/linux/sched/vhost_task.h | 23 ++++++ kernel/Makefile | 1 + kernel/vhost_task.c | 122 +++++++++++++++++++++++++++++++ 5 files changed, 153 insertions(+) create mode 100644 include/linux/sched/vhost_task.h create mode 100644 kernel/vhost_task.c diff --git a/MAINTAINERS b/MAINTAINERS index 8a5c25c20d00..5f7a3b3af7aa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22125,7 +22125,9 @@ L: virtualization@lists.linux-foundation.org L: netdev@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git +F: kernel/vhost_task.c F: drivers/vhost/ +F: include/linux/sched/vhost_task.h F: include/linux/vhost_iotlb.h F: include/uapi/linux/vhost.h diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 587fbae06182..b455d9ab6f3d 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -13,9 +13,14 @@ config VHOST_RING This option is selected by any driver which needs to access the host side of a virtio ring. +config VHOST_TASK + bool + default n + config VHOST tristate select VHOST_IOTLB + select VHOST_TASK help This option is selected by any driver which needs to access the core of vhost. diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h new file mode 100644 index 000000000000..50d02a25d37b --- /dev/null +++ b/include/linux/sched/vhost_task.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VHOST_TASK_H +#define _LINUX_VHOST_TASK_H + +#include <linux/completion.h> + +struct task_struct; + +struct vhost_task { + int (*fn)(void *data); + void *data; + struct completion exited; + unsigned long flags; + struct task_struct *task; +}; + +struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, int node); +__printf(2, 3) +void vhost_task_start(struct vhost_task *vtsk, const char namefmt[], ...); +void vhost_task_stop(struct vhost_task *vtsk); +bool vhost_task_should_stop(struct vhost_task *vtsk); + +#endif diff --git a/kernel/Makefile b/kernel/Makefile index 10ef068f598d..6fc72b3afbde 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -15,6 +15,7 @@ obj-y = fork.o exec_domain.o panic.o \ obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MULTIUSER) += groups.o +obj-$(CONFIG_VHOST_TASK) += vhost_task.o ifdef CONFIG_FUNCTION_TRACER # Do not trace internal ftrace files diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c new file mode 100644 index 000000000000..517dd166bb2b --- /dev/null +++ b/kernel/vhost_task.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Oracle Corporation + */ +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/sched/task.h> +#include <linux/sched/vhost_task.h> +#include <linux/sched/signal.h> + +enum vhost_task_flags { + VHOST_TASK_FLAGS_STOP, +}; + +static int vhost_task_fn(void *data) +{ + struct vhost_task *vtsk = data; + int ret; + + ret = vtsk->fn(vtsk->data); + complete(&vtsk->exited); + do_exit(ret); +} + +/** + * vhost_task_stop - stop a vhost_task + * @vtsk: vhost_task to stop + * + * Callers must call vhost_task_should_stop and return from their worker + * function when it returns true; + */ +void vhost_task_stop(struct vhost_task *vtsk) +{ + pid_t pid = vtsk->task->pid; + + set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); + wake_up_process(vtsk->task); + /* + * Make sure vhost_task_fn is no longer accessing the vhost_task before + * freeing it below. If userspace crashed or exited without closing, + * then the vhost_task->task could already be marked dead so + * kernel_wait will return early. + */ + wait_for_completion(&vtsk->exited); + /* + * If we are just closing/removing a device and the parent process is + * not exiting then reap the task. + */ + kernel_wait4(pid, NULL, __WCLONE, NULL); + kfree(vtsk); +} +EXPORT_SYMBOL_GPL(vhost_task_stop); + +/** + * vhost_task_should_stop - should the vhost task return from the work function + */ +bool vhost_task_should_stop(struct vhost_task *vtsk) +{ + return test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); +} +EXPORT_SYMBOL_GPL(vhost_task_should_stop); + +/** + * vhost_task_create - create a copy of a process to be used by the kernel + * @fn: thread stack + * @arg: data to be passed to fn + * @node: numa node to allocate task from + * + * This returns a specialized task for use by the vhost layer or NULL on + * failure. The returned task is inactive, and the caller must fire it up + * through vhost_task_start(). + */ +struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, int node) +{ + struct kernel_clone_args args = { + .flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM, + .exit_signal = 0, + .worker_flags = USER_WORKER | USER_WORKER_NO_FILES | + USER_WORKER_SIG_IGN, + .fn = vhost_task_fn, + }; + struct vhost_task *vtsk; + struct task_struct *tsk; + + vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); + if (!vtsk) + return ERR_PTR(-ENOMEM); + init_completion(&vtsk->exited); + vtsk->data = arg; + vtsk->fn = fn; + + args.fn_arg = vtsk; + + tsk = copy_process(NULL, 0, node, &args); + if (IS_ERR(tsk)) { + kfree(vtsk); + return NULL; + } + + vtsk->task = tsk; + return vtsk; +} +EXPORT_SYMBOL_GPL(vhost_task_create); + +/** + * vhost_task_start - start a vhost_task created with vhost_task_create + * @vtsk: vhost_task to wake up + * @namefmt: printf-style format string for the thread name + */ +void vhost_task_start(struct vhost_task *vtsk, const char namefmt[], ...) +{ + char name[TASK_COMM_LEN]; + va_list args; + + va_start(args, namefmt); + vsnprintf(name, sizeof(name), namefmt, args); + set_task_comm(vtsk->task, name); + va_end(args); + + wake_up_new_task(vtsk->task); +} +EXPORT_SYMBOL_GPL(vhost_task_start); -- 2.25.1 _______________________________________________ Virtualization mailing list Virtualization@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/virtualization
next prev parent reply other threads:[~2023-02-02 23:26 UTC|newest] Thread overview: 176+ messages / expand[flat|nested] mbox.gz Atom feed top 2023-02-02 23:25 [PATCH v11 0/8] Use copy_process in vhost layer Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-02 23:25 ` [PATCH v11 1/8] fork: Make IO worker options flag based Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-03 0:14 ` Linus Torvalds 2023-02-03 0:14 ` Linus Torvalds 2023-02-02 23:25 ` [PATCH v11 2/8] fork/vm: Move common PF_IO_WORKER behavior to new flag Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-02 23:25 ` [PATCH v11 3/8] fork: add USER_WORKER flag to not dup/clone files Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-03 0:16 ` Linus Torvalds 2023-02-03 0:16 ` Linus Torvalds 2023-02-02 23:25 ` [PATCH v11 4/8] fork: Add USER_WORKER flag to ignore signals Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-03 0:19 ` Linus Torvalds 2023-02-03 0:19 ` Linus Torvalds 2023-02-05 16:06 ` Mike Christie 2023-02-05 16:06 ` Mike Christie 2023-02-02 23:25 ` [PATCH v11 5/8] fork: allow kernel code to call copy_process Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-02 23:25 ` Mike Christie [this message] 2023-02-02 23:25 ` [PATCH v11 6/8] vhost_task: Allow vhost layer to use copy_process Mike Christie 2023-02-03 0:43 ` Linus Torvalds 2023-02-03 0:43 ` Linus Torvalds 2023-02-02 23:25 ` [PATCH v11 7/8] vhost: move worker thread fields to new struct Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-02-02 23:25 ` [PATCH v11 8/8] vhost: use vhost_tasks for worker threads Mike Christie 2023-02-02 23:25 ` Mike Christie 2023-05-05 13:40 ` Nicolas Dichtel 2023-05-05 18:22 ` Linus Torvalds 2023-05-05 18:22 ` Linus Torvalds 2023-05-05 22:37 ` Mike Christie 2023-05-05 22:37 ` Mike Christie 2023-05-06 1:53 ` Linus Torvalds 2023-05-06 1:53 ` Linus Torvalds 2023-05-08 17:13 ` Christian Brauner 2023-05-09 8:09 ` Nicolas Dichtel 2023-05-09 8:17 ` Nicolas Dichtel 2023-05-13 12:39 ` Thorsten Leemhuis 2023-05-13 12:39 ` Thorsten Leemhuis 2023-05-13 15:08 ` Linus Torvalds 2023-05-13 15:08 ` Linus Torvalds 2023-05-15 14:23 ` Christian Brauner 2023-05-15 15:44 ` Linus Torvalds 2023-05-15 15:44 ` Linus Torvalds 2023-05-15 15:52 ` Jens Axboe 2023-05-15 15:52 ` Jens Axboe 2023-05-15 15:54 ` Linus Torvalds 2023-05-15 15:54 ` Linus Torvalds 2023-05-15 17:23 ` Linus Torvalds 2023-05-15 17:23 ` Linus Torvalds 2023-05-15 15:56 ` Linus Torvalds 2023-05-15 15:56 ` Linus Torvalds 2023-05-15 22:23 ` Mike Christie 2023-05-15 22:23 ` Mike Christie 2023-05-15 22:54 ` Linus Torvalds 2023-05-15 22:54 ` Linus Torvalds 2023-05-16 3:53 ` Mike Christie 2023-05-16 3:53 ` Mike Christie 2023-05-16 13:18 ` Oleg Nesterov 2023-05-16 13:18 ` Oleg Nesterov 2023-05-16 13:40 ` Oleg Nesterov 2023-05-16 13:40 ` Oleg Nesterov 2023-05-16 15:56 ` Eric W. Biederman 2023-05-16 15:56 ` Eric W. Biederman 2023-05-16 18:37 ` Oleg Nesterov 2023-05-16 18:37 ` Oleg Nesterov 2023-05-16 20:12 ` Eric W. Biederman 2023-05-16 20:12 ` Eric W. Biederman 2023-05-17 17:09 ` Oleg Nesterov 2023-05-17 17:09 ` Oleg Nesterov 2023-05-17 18:22 ` Mike Christie 2023-05-17 18:22 ` Mike Christie 2023-05-16 8:39 ` Christian Brauner 2023-05-16 16:24 ` Mike Christie 2023-05-16 16:24 ` Mike Christie 2023-05-16 16:44 ` Christian Brauner 2023-05-19 12:15 ` [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Christian Brauner 2023-06-01 7:58 ` Thorsten Leemhuis 2023-06-01 7:58 ` Thorsten Leemhuis 2023-06-01 10:18 ` Nicolas Dichtel 2023-06-01 10:47 ` Christian Brauner 2023-06-01 11:29 ` Thorsten Leemhuis 2023-06-01 11:29 ` Thorsten Leemhuis 2023-06-01 12:26 ` Linus Torvalds 2023-06-01 12:26 ` Linus Torvalds 2023-06-01 16:10 ` Mike Christie 2023-06-01 16:10 ` Mike Christie 2023-05-16 14:06 ` [PATCH v11 8/8] vhost: use vhost_tasks for worker threads Linux regression tracking #adding (Thorsten Leemhuis) 2023-05-26 9:03 ` Linux regression tracking #update (Thorsten Leemhuis) 2023-06-02 11:38 ` Thorsten Leemhuis 2023-07-20 13:06 ` Michael S. Tsirkin 2023-07-20 13:06 ` Michael S. Tsirkin 2023-07-23 4:03 ` michael.christie 2023-07-23 4:03 ` michael.christie 2023-07-23 9:31 ` Michael S. Tsirkin 2023-07-23 9:31 ` Michael S. Tsirkin 2023-08-10 18:57 ` Michael S. Tsirkin 2023-08-10 18:57 ` Michael S. Tsirkin 2023-08-11 18:51 ` Mike Christie 2023-08-11 18:51 ` Mike Christie 2023-08-13 19:01 ` Michael S. Tsirkin 2023-08-13 19:01 ` Michael S. Tsirkin 2023-08-14 3:13 ` michael.christie 2023-08-14 3:13 ` michael.christie 2023-02-07 8:19 ` [PATCH v11 0/8] Use copy_process in vhost layer Christian Brauner 2023-05-18 0:09 [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 0:09 ` [RFC PATCH 1/8] signal: Dequeue SIGKILL even if SIGNAL_GROUP_EXIT/group_exec_task is set Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 2:34 ` Eric W. Biederman 2023-05-18 2:34 ` Eric W. Biederman 2023-05-18 3:49 ` Eric W. Biederman 2023-05-18 3:49 ` Eric W. Biederman 2023-05-18 15:21 ` Mike Christie 2023-05-18 15:21 ` Mike Christie 2023-05-18 16:25 ` Oleg Nesterov 2023-05-18 16:25 ` Oleg Nesterov 2023-05-18 16:42 ` Mike Christie 2023-05-18 16:42 ` Mike Christie 2023-05-18 17:04 ` Oleg Nesterov 2023-05-18 17:04 ` Oleg Nesterov 2023-05-18 18:28 ` Eric W. Biederman 2023-05-18 18:28 ` Eric W. Biederman 2023-05-18 22:57 ` Mike Christie 2023-05-18 22:57 ` Mike Christie 2023-05-19 4:16 ` Eric W. Biederman 2023-05-19 4:16 ` Eric W. Biederman 2023-05-19 23:24 ` Mike Christie 2023-05-19 23:24 ` Mike Christie 2023-05-22 13:30 ` Oleg Nesterov 2023-05-22 13:30 ` Oleg Nesterov 2023-05-18 8:08 ` Christian Brauner 2023-05-18 15:27 ` Mike Christie 2023-05-18 15:27 ` Mike Christie 2023-05-18 17:07 ` Christian Brauner 2023-05-18 18:08 ` Oleg Nesterov 2023-05-18 18:08 ` Oleg Nesterov 2023-05-18 18:12 ` Christian Brauner 2023-05-18 18:23 ` Oleg Nesterov 2023-05-18 18:23 ` Oleg Nesterov 2023-05-18 0:09 ` [RFC PATCH 2/8] vhost/vhost_task: Hook vhost layer into signal handler Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 0:16 ` Linus Torvalds 2023-05-18 0:16 ` Linus Torvalds 2023-05-18 1:01 ` Mike Christie 2023-05-18 1:01 ` Mike Christie 2023-05-18 8:16 ` Christian Brauner 2023-05-18 0:09 ` [RFC PATCH 3/8] fork/vhost_task: Switch to CLONE_THREAD and CLONE_SIGHAND Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 8:18 ` Christian Brauner 2023-05-18 0:09 ` [RFC PATCH 4/8] vhost-net: Move vhost_net_open Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 0:09 ` [RFC PATCH 5/8] vhost: Add callback that stops new work and waits on running ones Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 14:18 ` Christian Brauner 2023-05-18 15:03 ` Mike Christie 2023-05-18 15:03 ` Mike Christie 2023-05-18 15:09 ` Christian Brauner 2023-05-18 18:38 ` Eric W. Biederman 2023-05-18 18:38 ` Eric W. Biederman 2023-05-18 0:09 ` [RFC PATCH 6/8] vhost-scsi: Add callback to stop and wait on works Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 0:09 ` [RFC PATCH 7/8] vhost-net: " Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 0:09 ` [RFC PATCH 8/8] fork/vhost_task: remove no_files Mike Christie 2023-05-18 0:09 ` Mike Christie 2023-05-18 1:04 ` Mike Christie 2023-05-18 1:04 ` Mike Christie 2023-05-18 12:31 ` kernel test robot 2023-05-18 15:30 ` kernel test robot 2023-05-18 23:14 ` kernel test robot 2023-05-19 7:26 ` kernel test robot 2023-05-18 8:25 ` [RFC PATCH 0/8] vhost_tasks: Use CLONE_THREAD/SIGHAND Christian Brauner 2023-05-18 8:40 ` Christian Brauner 2023-05-18 14:30 ` Christian Brauner
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20230202232517.8695-7-michael.christie@oracle.com \ --to=michael.christie@oracle.com \ --cc=brauner@kernel.org \ --cc=ebiederm@xmission.com \ --cc=hch@infradead.org \ --cc=jasowang@redhat.com \ --cc=konrad.wilk@oracle.com \ --cc=linux-kernel@vger.kernel.org \ --cc=mst@redhat.com \ --cc=sgarzare@redhat.com \ --cc=stefanha@redhat.com \ --cc=torvalds@linux-foundation.org \ --cc=virtualization@lists.linux-foundation.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.