linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
@ 2020-04-10 11:47 zhe.he
  2020-05-12  7:00 ` He Zhe
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: zhe.he @ 2020-04-10 11:47 UTC (permalink / raw)
  To: viro, axboe, linux-fsdevel, linux-kernel, zhe.he

From: He Zhe <zhe.he@windriver.com>

commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
introduces a percpu counter that tracks the percpu recursion depth and
warn if it greater than zero, to avoid potential deadlock and stack
overflow.

However sometimes different eventfds may be used in parallel. Specifically,
when heavy network load goes through kvm and vhost, working as below, it
would trigger the following call trace.

-  100.00%
   - 66.51%
        ret_from_fork
        kthread
      - vhost_worker
         - 33.47% handle_tx_kick
              handle_tx
              handle_tx_copy
              vhost_tx_batch.isra.0
              vhost_add_used_and_signal_n
              eventfd_signal
         - 33.05% handle_rx_net
              handle_rx
              vhost_add_used_and_signal_n
              eventfd_signal
   - 33.49%
        ioctl
        entry_SYSCALL_64_after_hwframe
        do_syscall_64
        __x64_sys_ioctl
        ksys_ioctl
        do_vfs_ioctl
        kvm_vcpu_ioctl
        kvm_arch_vcpu_ioctl_run
        vmx_handle_exit
        handle_ept_misconfig
        kvm_io_bus_write
        __kvm_io_bus_write
        eventfd_signal

001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
---- snip ----
001: Call Trace:
001:  vhost_signal+0x15e/0x1b0 [vhost]
001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
001:  handle_rx+0xb9/0x900 [vhost_net]
001:  handle_rx_net+0x15/0x20 [vhost_net]
001:  vhost_worker+0xbe/0x120 [vhost]
001:  kthread+0x106/0x140
001:  ? log_used.part.0+0x20/0x20 [vhost]
001:  ? kthread_park+0x90/0x90
001:  ret_from_fork+0x35/0x40
001: ---[ end trace 0000000000000003 ]---

This patch enlarges the limit to 1 which is the maximum recursion depth we
have found so far.

Signed-off-by: He Zhe <zhe.he@windriver.com>
---
 fs/eventfd.c            | 3 ++-
 include/linux/eventfd.h | 3 +++
 2 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/fs/eventfd.c b/fs/eventfd.c
index 78e41c7c3d05..8b9bd6fb08cd 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -70,7 +70,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 	 * it returns true, the eventfd_signal() call should be deferred to a
 	 * safe context.
 	 */
-	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
+	    EFD_WAKE_COUNT_MAX))
 		return 0;
 
 	spin_lock_irqsave(&ctx->wqh.lock, flags);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index dc4fd8a6644d..e7684d768e3f 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -29,6 +29,9 @@
 #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
 #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
 
+/* This is the maximum recursion depth we find so far */
+#define EFD_WAKE_COUNT_MAX 1
+
 struct eventfd_ctx;
 struct file;
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-04-10 11:47 [PATCH] eventfd: Enlarge recursion limit to allow vhost to work zhe.he
@ 2020-05-12  7:00 ` He Zhe
  2020-06-22  9:09 ` He Zhe
  2020-07-03  8:12 ` Juri Lelli
  2 siblings, 0 replies; 13+ messages in thread
From: He Zhe @ 2020-05-12  7:00 UTC (permalink / raw)
  To: viro, axboe, linux-fsdevel, linux-kernel

Can this be considered for this moment?
This is actually v2 of
"[PATCH 1/2] eventfd: Make wake counter work for single fd instead of all".

Thanks,
Zhe

On 4/10/20 7:47 PM, zhe.he@windriver.com wrote:
> From: He Zhe <zhe.he@windriver.com>
>
> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> introduces a percpu counter that tracks the percpu recursion depth and
> warn if it greater than zero, to avoid potential deadlock and stack
> overflow.
>
> However sometimes different eventfds may be used in parallel. Specifically,
> when heavy network load goes through kvm and vhost, working as below, it
> would trigger the following call trace.
>
> -  100.00%
>    - 66.51%
>         ret_from_fork
>         kthread
>       - vhost_worker
>          - 33.47% handle_tx_kick
>               handle_tx
>               handle_tx_copy
>               vhost_tx_batch.isra.0
>               vhost_add_used_and_signal_n
>               eventfd_signal
>          - 33.05% handle_rx_net
>               handle_rx
>               vhost_add_used_and_signal_n
>               eventfd_signal
>    - 33.49%
>         ioctl
>         entry_SYSCALL_64_after_hwframe
>         do_syscall_64
>         __x64_sys_ioctl
>         ksys_ioctl
>         do_vfs_ioctl
>         kvm_vcpu_ioctl
>         kvm_arch_vcpu_ioctl_run
>         vmx_handle_exit
>         handle_ept_misconfig
>         kvm_io_bus_write
>         __kvm_io_bus_write
>         eventfd_signal
>
> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> ---- snip ----
> 001: Call Trace:
> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> 001:  handle_rx+0xb9/0x900 [vhost_net]
> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> 001:  vhost_worker+0xbe/0x120 [vhost]
> 001:  kthread+0x106/0x140
> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> 001:  ? kthread_park+0x90/0x90
> 001:  ret_from_fork+0x35/0x40
> 001: ---[ end trace 0000000000000003 ]---
>
> This patch enlarges the limit to 1 which is the maximum recursion depth we
> have found so far.
>
> Signed-off-by: He Zhe <zhe.he@windriver.com>
> ---
>  fs/eventfd.c            | 3 ++-
>  include/linux/eventfd.h | 3 +++
>  2 files changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/fs/eventfd.c b/fs/eventfd.c
> index 78e41c7c3d05..8b9bd6fb08cd 100644
> --- a/fs/eventfd.c
> +++ b/fs/eventfd.c
> @@ -70,7 +70,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
>  	 * it returns true, the eventfd_signal() call should be deferred to a
>  	 * safe context.
>  	 */
> -	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
> +	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
> +	    EFD_WAKE_COUNT_MAX))
>  		return 0;
>  
>  	spin_lock_irqsave(&ctx->wqh.lock, flags);
> diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
> index dc4fd8a6644d..e7684d768e3f 100644
> --- a/include/linux/eventfd.h
> +++ b/include/linux/eventfd.h
> @@ -29,6 +29,9 @@
>  #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
>  #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
>  
> +/* This is the maximum recursion depth we find so far */
> +#define EFD_WAKE_COUNT_MAX 1
> +
>  struct eventfd_ctx;
>  struct file;
>  


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-04-10 11:47 [PATCH] eventfd: Enlarge recursion limit to allow vhost to work zhe.he
  2020-05-12  7:00 ` He Zhe
@ 2020-06-22  9:09 ` He Zhe
  2020-07-03  8:12 ` Juri Lelli
  2 siblings, 0 replies; 13+ messages in thread
From: He Zhe @ 2020-06-22  9:09 UTC (permalink / raw)
  To: axboe, linux-fsdevel, linux-kernel

Can this be considered for this moment?
This is actually v2 of
"[PATCH 1/2] eventfd: Make wake counter work for single fd instead of all".

Thanks,
Zhe



On 4/10/20 7:47 PM, zhe.he@windriver.com wrote:
> From: He Zhe <zhe.he@windriver.com>
>
> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> introduces a percpu counter that tracks the percpu recursion depth and
> warn if it greater than zero, to avoid potential deadlock and stack
> overflow.
>
> However sometimes different eventfds may be used in parallel. Specifically,
> when heavy network load goes through kvm and vhost, working as below, it
> would trigger the following call trace.
>
> -  100.00%
>    - 66.51%
>         ret_from_fork
>         kthread
>       - vhost_worker
>          - 33.47% handle_tx_kick
>               handle_tx
>               handle_tx_copy
>               vhost_tx_batch.isra.0
>               vhost_add_used_and_signal_n
>               eventfd_signal
>          - 33.05% handle_rx_net
>               handle_rx
>               vhost_add_used_and_signal_n
>               eventfd_signal
>    - 33.49%
>         ioctl
>         entry_SYSCALL_64_after_hwframe
>         do_syscall_64
>         __x64_sys_ioctl
>         ksys_ioctl
>         do_vfs_ioctl
>         kvm_vcpu_ioctl
>         kvm_arch_vcpu_ioctl_run
>         vmx_handle_exit
>         handle_ept_misconfig
>         kvm_io_bus_write
>         __kvm_io_bus_write
>         eventfd_signal
>
> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> ---- snip ----
> 001: Call Trace:
> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> 001:  handle_rx+0xb9/0x900 [vhost_net]
> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> 001:  vhost_worker+0xbe/0x120 [vhost]
> 001:  kthread+0x106/0x140
> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> 001:  ? kthread_park+0x90/0x90
> 001:  ret_from_fork+0x35/0x40
> 001: ---[ end trace 0000000000000003 ]---
>
> This patch enlarges the limit to 1 which is the maximum recursion depth we
> have found so far.
>
> Signed-off-by: He Zhe <zhe.he@windriver.com>
> ---
>  fs/eventfd.c            | 3 ++-
>  include/linux/eventfd.h | 3 +++
>  2 files changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/fs/eventfd.c b/fs/eventfd.c
> index 78e41c7c3d05..8b9bd6fb08cd 100644
> --- a/fs/eventfd.c
> +++ b/fs/eventfd.c
> @@ -70,7 +70,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
>  	 * it returns true, the eventfd_signal() call should be deferred to a
>  	 * safe context.
>  	 */
> -	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
> +	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
> +	    EFD_WAKE_COUNT_MAX))
>  		return 0;
>  
>  	spin_lock_irqsave(&ctx->wqh.lock, flags);
> diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
> index dc4fd8a6644d..e7684d768e3f 100644
> --- a/include/linux/eventfd.h
> +++ b/include/linux/eventfd.h
> @@ -29,6 +29,9 @@
>  #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
>  #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
>  
> +/* This is the maximum recursion depth we find so far */
> +#define EFD_WAKE_COUNT_MAX 1
> +
>  struct eventfd_ctx;
>  struct file;
>  


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-04-10 11:47 [PATCH] eventfd: Enlarge recursion limit to allow vhost to work zhe.he
  2020-05-12  7:00 ` He Zhe
  2020-06-22  9:09 ` He Zhe
@ 2020-07-03  8:12 ` Juri Lelli
  2020-07-03 11:11   ` He Zhe
  2 siblings, 1 reply; 13+ messages in thread
From: Juri Lelli @ 2020-07-03  8:12 UTC (permalink / raw)
  To: zhe.he; +Cc: viro, axboe, linux-fsdevel, linux-kernel

Hi,

On 10/04/20 19:47, zhe.he@windriver.com wrote:
> From: He Zhe <zhe.he@windriver.com>
> 
> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> introduces a percpu counter that tracks the percpu recursion depth and
> warn if it greater than zero, to avoid potential deadlock and stack
> overflow.
> 
> However sometimes different eventfds may be used in parallel. Specifically,
> when heavy network load goes through kvm and vhost, working as below, it
> would trigger the following call trace.
> 
> -  100.00%
>    - 66.51%
>         ret_from_fork
>         kthread
>       - vhost_worker
>          - 33.47% handle_tx_kick
>               handle_tx
>               handle_tx_copy
>               vhost_tx_batch.isra.0
>               vhost_add_used_and_signal_n
>               eventfd_signal
>          - 33.05% handle_rx_net
>               handle_rx
>               vhost_add_used_and_signal_n
>               eventfd_signal
>    - 33.49%
>         ioctl
>         entry_SYSCALL_64_after_hwframe
>         do_syscall_64
>         __x64_sys_ioctl
>         ksys_ioctl
>         do_vfs_ioctl
>         kvm_vcpu_ioctl
>         kvm_arch_vcpu_ioctl_run
>         vmx_handle_exit
>         handle_ept_misconfig
>         kvm_io_bus_write
>         __kvm_io_bus_write
>         eventfd_signal
> 
> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> ---- snip ----
> 001: Call Trace:
> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> 001:  handle_rx+0xb9/0x900 [vhost_net]
> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> 001:  vhost_worker+0xbe/0x120 [vhost]
> 001:  kthread+0x106/0x140
> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> 001:  ? kthread_park+0x90/0x90
> 001:  ret_from_fork+0x35/0x40
> 001: ---[ end trace 0000000000000003 ]---
> 
> This patch enlarges the limit to 1 which is the maximum recursion depth we
> have found so far.
> 
> Signed-off-by: He Zhe <zhe.he@windriver.com>
> ---

Not sure if this approch can fly, but I also encountered the same
warning (which further caused hangs during VM install) and this change
addresses that.

I'd be interested in understanding what is the status of this problem/fix.

On a side note, by looking at the code, I noticed that (apart from
samples) all callers don't actually check eventfd_signal() return value
and I'm wondering why is that the case and if is it safe to do so.

Thanks,

Juri


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-07-03  8:12 ` Juri Lelli
@ 2020-07-03 11:11   ` He Zhe
  2020-07-06  6:45     ` Juri Lelli
  0 siblings, 1 reply; 13+ messages in thread
From: He Zhe @ 2020-07-03 11:11 UTC (permalink / raw)
  To: Juri Lelli; +Cc: viro, axboe, linux-fsdevel, linux-kernel



On 7/3/20 4:12 PM, Juri Lelli wrote:
> Hi,
>
> On 10/04/20 19:47, zhe.he@windriver.com wrote:
>> From: He Zhe <zhe.he@windriver.com>
>>
>> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
>> introduces a percpu counter that tracks the percpu recursion depth and
>> warn if it greater than zero, to avoid potential deadlock and stack
>> overflow.
>>
>> However sometimes different eventfds may be used in parallel. Specifically,
>> when heavy network load goes through kvm and vhost, working as below, it
>> would trigger the following call trace.
>>
>> -  100.00%
>>    - 66.51%
>>         ret_from_fork
>>         kthread
>>       - vhost_worker
>>          - 33.47% handle_tx_kick
>>               handle_tx
>>               handle_tx_copy
>>               vhost_tx_batch.isra.0
>>               vhost_add_used_and_signal_n
>>               eventfd_signal
>>          - 33.05% handle_rx_net
>>               handle_rx
>>               vhost_add_used_and_signal_n
>>               eventfd_signal
>>    - 33.49%
>>         ioctl
>>         entry_SYSCALL_64_after_hwframe
>>         do_syscall_64
>>         __x64_sys_ioctl
>>         ksys_ioctl
>>         do_vfs_ioctl
>>         kvm_vcpu_ioctl
>>         kvm_arch_vcpu_ioctl_run
>>         vmx_handle_exit
>>         handle_ept_misconfig
>>         kvm_io_bus_write
>>         __kvm_io_bus_write
>>         eventfd_signal
>>
>> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
>> ---- snip ----
>> 001: Call Trace:
>> 001:  vhost_signal+0x15e/0x1b0 [vhost]
>> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
>> 001:  handle_rx+0xb9/0x900 [vhost_net]
>> 001:  handle_rx_net+0x15/0x20 [vhost_net]
>> 001:  vhost_worker+0xbe/0x120 [vhost]
>> 001:  kthread+0x106/0x140
>> 001:  ? log_used.part.0+0x20/0x20 [vhost]
>> 001:  ? kthread_park+0x90/0x90
>> 001:  ret_from_fork+0x35/0x40
>> 001: ---[ end trace 0000000000000003 ]---
>>
>> This patch enlarges the limit to 1 which is the maximum recursion depth we
>> have found so far.
>>
>> Signed-off-by: He Zhe <zhe.he@windriver.com>
>> ---
> Not sure if this approch can fly, but I also encountered the same
> warning (which further caused hangs during VM install) and this change
> addresses that.
>
> I'd be interested in understanding what is the status of this problem/fix.

This is actually v2 of the patch and has not got any reply yet. Here is the v1. FYI.
https://lore.kernel.org/lkml/1586257192-58369-1-git-send-email-zhe.he@windriver.com/

> On a side note, by looking at the code, I noticed that (apart from
> samples) all callers don't actually check eventfd_signal() return value
> and I'm wondering why is that the case and if is it safe to do so.

Checking the return value right after sending the signal can tell us if the
event counter has just overflowed, that is, exceeding ULLONG_MAX. I guess the
authors of the callers listed in the commit log just don't worry about that,
since they add only one to a dedicated eventfd.

Zhe

>
> Thanks,
>
> Juri
>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-07-03 11:11   ` He Zhe
@ 2020-07-06  6:45     ` Juri Lelli
  2020-07-13 13:22       ` Juri Lelli
  0 siblings, 1 reply; 13+ messages in thread
From: Juri Lelli @ 2020-07-06  6:45 UTC (permalink / raw)
  To: He Zhe; +Cc: viro, axboe, linux-fsdevel, linux-kernel

On 03/07/20 19:11, He Zhe wrote:
> 
> 
> On 7/3/20 4:12 PM, Juri Lelli wrote:
> > Hi,
> >
> > On 10/04/20 19:47, zhe.he@windriver.com wrote:
> >> From: He Zhe <zhe.he@windriver.com>
> >>
> >> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> >> introduces a percpu counter that tracks the percpu recursion depth and
> >> warn if it greater than zero, to avoid potential deadlock and stack
> >> overflow.
> >>
> >> However sometimes different eventfds may be used in parallel. Specifically,
> >> when heavy network load goes through kvm and vhost, working as below, it
> >> would trigger the following call trace.
> >>
> >> -  100.00%
> >>    - 66.51%
> >>         ret_from_fork
> >>         kthread
> >>       - vhost_worker
> >>          - 33.47% handle_tx_kick
> >>               handle_tx
> >>               handle_tx_copy
> >>               vhost_tx_batch.isra.0
> >>               vhost_add_used_and_signal_n
> >>               eventfd_signal
> >>          - 33.05% handle_rx_net
> >>               handle_rx
> >>               vhost_add_used_and_signal_n
> >>               eventfd_signal
> >>    - 33.49%
> >>         ioctl
> >>         entry_SYSCALL_64_after_hwframe
> >>         do_syscall_64
> >>         __x64_sys_ioctl
> >>         ksys_ioctl
> >>         do_vfs_ioctl
> >>         kvm_vcpu_ioctl
> >>         kvm_arch_vcpu_ioctl_run
> >>         vmx_handle_exit
> >>         handle_ept_misconfig
> >>         kvm_io_bus_write
> >>         __kvm_io_bus_write
> >>         eventfd_signal
> >>
> >> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> >> ---- snip ----
> >> 001: Call Trace:
> >> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> >> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> >> 001:  handle_rx+0xb9/0x900 [vhost_net]
> >> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> >> 001:  vhost_worker+0xbe/0x120 [vhost]
> >> 001:  kthread+0x106/0x140
> >> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> >> 001:  ? kthread_park+0x90/0x90
> >> 001:  ret_from_fork+0x35/0x40
> >> 001: ---[ end trace 0000000000000003 ]---
> >>
> >> This patch enlarges the limit to 1 which is the maximum recursion depth we
> >> have found so far.
> >>
> >> Signed-off-by: He Zhe <zhe.he@windriver.com>
> >> ---
> > Not sure if this approch can fly, but I also encountered the same
> > warning (which further caused hangs during VM install) and this change
> > addresses that.
> >
> > I'd be interested in understanding what is the status of this problem/fix.
> 
> This is actually v2 of the patch and has not got any reply yet. Here is the v1. FYI.
> https://lore.kernel.org/lkml/1586257192-58369-1-git-send-email-zhe.he@windriver.com/

I see, thanks. Hope this gets reviewed soon! :-)

> > On a side note, by looking at the code, I noticed that (apart from
> > samples) all callers don't actually check eventfd_signal() return value
> > and I'm wondering why is that the case and if is it safe to do so.
> 
> Checking the return value right after sending the signal can tell us if the
> event counter has just overflowed, that is, exceeding ULLONG_MAX. I guess the
> authors of the callers listed in the commit log just don't worry about that,
> since they add only one to a dedicated eventfd.

OK. I was mostly wondering if returning early in case the WARN_ON_ONCE
fires would cause a missing wakeup for the eventfd_ctx wait queue.

Best,

Juri


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-07-06  6:45     ` Juri Lelli
@ 2020-07-13 13:22       ` Juri Lelli
  2020-07-22  9:01         ` Juri Lelli
  0 siblings, 1 reply; 13+ messages in thread
From: Juri Lelli @ 2020-07-13 13:22 UTC (permalink / raw)
  To: He Zhe; +Cc: viro, axboe, linux-fsdevel, linux-kernel

Hi,

On 06/07/20 08:45, Juri Lelli wrote:
> On 03/07/20 19:11, He Zhe wrote:
> > 
> > 
> > On 7/3/20 4:12 PM, Juri Lelli wrote:
> > > Hi,
> > >
> > > On 10/04/20 19:47, zhe.he@windriver.com wrote:
> > >> From: He Zhe <zhe.he@windriver.com>
> > >>
> > >> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> > >> introduces a percpu counter that tracks the percpu recursion depth and
> > >> warn if it greater than zero, to avoid potential deadlock and stack
> > >> overflow.
> > >>
> > >> However sometimes different eventfds may be used in parallel. Specifically,
> > >> when heavy network load goes through kvm and vhost, working as below, it
> > >> would trigger the following call trace.
> > >>
> > >> -  100.00%
> > >>    - 66.51%
> > >>         ret_from_fork
> > >>         kthread
> > >>       - vhost_worker
> > >>          - 33.47% handle_tx_kick
> > >>               handle_tx
> > >>               handle_tx_copy
> > >>               vhost_tx_batch.isra.0
> > >>               vhost_add_used_and_signal_n
> > >>               eventfd_signal
> > >>          - 33.05% handle_rx_net
> > >>               handle_rx
> > >>               vhost_add_used_and_signal_n
> > >>               eventfd_signal
> > >>    - 33.49%
> > >>         ioctl
> > >>         entry_SYSCALL_64_after_hwframe
> > >>         do_syscall_64
> > >>         __x64_sys_ioctl
> > >>         ksys_ioctl
> > >>         do_vfs_ioctl
> > >>         kvm_vcpu_ioctl
> > >>         kvm_arch_vcpu_ioctl_run
> > >>         vmx_handle_exit
> > >>         handle_ept_misconfig
> > >>         kvm_io_bus_write
> > >>         __kvm_io_bus_write
> > >>         eventfd_signal
> > >>
> > >> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> > >> ---- snip ----
> > >> 001: Call Trace:
> > >> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> > >> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> > >> 001:  handle_rx+0xb9/0x900 [vhost_net]
> > >> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> > >> 001:  vhost_worker+0xbe/0x120 [vhost]
> > >> 001:  kthread+0x106/0x140
> > >> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> > >> 001:  ? kthread_park+0x90/0x90
> > >> 001:  ret_from_fork+0x35/0x40
> > >> 001: ---[ end trace 0000000000000003 ]---
> > >>
> > >> This patch enlarges the limit to 1 which is the maximum recursion depth we
> > >> have found so far.
> > >>
> > >> Signed-off-by: He Zhe <zhe.he@windriver.com>
> > >> ---
> > > Not sure if this approch can fly, but I also encountered the same
> > > warning (which further caused hangs during VM install) and this change
> > > addresses that.
> > >
> > > I'd be interested in understanding what is the status of this problem/fix.
> > 
> > This is actually v2 of the patch and has not got any reply yet. Here is the v1. FYI.
> > https://lore.kernel.org/lkml/1586257192-58369-1-git-send-email-zhe.he@windriver.com/
> 
> I see, thanks. Hope this gets reviewed soon! :-)
> 
> > > On a side note, by looking at the code, I noticed that (apart from
> > > samples) all callers don't actually check eventfd_signal() return value
> > > and I'm wondering why is that the case and if is it safe to do so.
> > 
> > Checking the return value right after sending the signal can tell us if the
> > event counter has just overflowed, that is, exceeding ULLONG_MAX. I guess the
> > authors of the callers listed in the commit log just don't worry about that,
> > since they add only one to a dedicated eventfd.
> 
> OK. I was mostly wondering if returning early in case the WARN_ON_ONCE
> fires would cause a missing wakeup for the eventfd_ctx wait queue.

Gentle ping about this issue (mainly addressing relevant maintainers and
potential reviewers). It's easily reproducible with PREEMPT_RT.

Thanks,

Juri


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-07-13 13:22       ` Juri Lelli
@ 2020-07-22  9:01         ` Juri Lelli
  2020-08-20 10:41           ` He Zhe
  0 siblings, 1 reply; 13+ messages in thread
From: Juri Lelli @ 2020-07-22  9:01 UTC (permalink / raw)
  To: He Zhe; +Cc: viro, axboe, linux-fsdevel, linux-kernel

On 13/07/20 15:22, Juri Lelli wrote:

[...]

> Gentle ping about this issue (mainly addressing relevant maintainers and
> potential reviewers). It's easily reproducible with PREEMPT_RT.

Ping. Any comment at all? :-)

Thanks,

Juri


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-07-22  9:01         ` Juri Lelli
@ 2020-08-20 10:41           ` He Zhe
  2021-05-27 15:52             ` Nitesh Narayan Lal
  0 siblings, 1 reply; 13+ messages in thread
From: He Zhe @ 2020-08-20 10:41 UTC (permalink / raw)
  To: Juri Lelli; +Cc: viro, axboe, linux-fsdevel, linux-kernel



On 7/22/20 5:01 PM, Juri Lelli wrote:
> On 13/07/20 15:22, Juri Lelli wrote:
>
> [...]
>
>> Gentle ping about this issue (mainly addressing relevant maintainers and
>> potential reviewers). It's easily reproducible with PREEMPT_RT.
> Ping. Any comment at all? :-)

Hi Maintainer(s),

It's been 4 months. Can this be considered this round?

Thanks,
Zhe

>
> Thanks,
>
> Juri
>


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2020-08-20 10:41           ` He Zhe
@ 2021-05-27 15:52             ` Nitesh Narayan Lal
  0 siblings, 0 replies; 13+ messages in thread
From: Nitesh Narayan Lal @ 2021-05-27 15:52 UTC (permalink / raw)
  To: He Zhe, Juri Lelli, viro, axboe; +Cc: linux-fsdevel, linux-kernel, nilal


On 8/20/20 6:41 AM, He Zhe wrote:
>
> On 7/22/20 5:01 PM, Juri Lelli wrote:
>> On 13/07/20 15:22, Juri Lelli wrote:
>>
>> [...]
>>
>>> Gentle ping about this issue (mainly addressing relevant maintainers and
>>> potential reviewers). It's easily reproducible with PREEMPT_RT.
>> Ping. Any comment at all? :-)
> Hi Maintainer(s),
>
> It's been 4 months. Can this be considered this round?

Gentle ping, is there any update or comments here?

As Juri mentioned the issue is still easily reproducible with PREEMPT_RT.

--
Thanks
Nitesh


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2021-06-18  8:44 ` [PATCH] eventfd: Enlarge recursion limit to allow vhost to work He Zhe
  2021-07-03  8:31   ` Michael S. Tsirkin
@ 2021-08-25  7:57   ` Yongji Xie
  1 sibling, 0 replies; 13+ messages in thread
From: Yongji Xie @ 2021-08-25  7:57 UTC (permalink / raw)
  To: He Zhe
  Cc: Michael S. Tsirkin, Jason Wang, Stefan Hajnoczi,
	Stefano Garzarella, Parav Pandit, Christoph Hellwig,
	Christian Brauner, Randy Dunlap, Matthew Wilcox, Al Viro,
	Jens Axboe, bcrl, Jonathan Corbet, Mika Penttilä,
	Dan Carpenter, Greg KH, songmuchun, virtualization, kvm,
	linux-fsdevel, iommu, linux-kernel, qiang.zhang

Hi guys,

Is there any comments or update for this patch?

Thanks,
Yongji

On Fri, Jun 18, 2021 at 4:47 PM He Zhe <zhe.he@windriver.com> wrote:
>
> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> introduces a percpu counter that tracks the percpu recursion depth and
> warn if it greater than zero, to avoid potential deadlock and stack
> overflow.
>
> However sometimes different eventfds may be used in parallel. Specifically,
> when heavy network load goes through kvm and vhost, working as below, it
> would trigger the following call trace.
>
> -  100.00%
>    - 66.51%
>         ret_from_fork
>         kthread
>       - vhost_worker
>          - 33.47% handle_tx_kick
>               handle_tx
>               handle_tx_copy
>               vhost_tx_batch.isra.0
>               vhost_add_used_and_signal_n
>               eventfd_signal
>          - 33.05% handle_rx_net
>               handle_rx
>               vhost_add_used_and_signal_n
>               eventfd_signal
>    - 33.49%
>         ioctl
>         entry_SYSCALL_64_after_hwframe
>         do_syscall_64
>         __x64_sys_ioctl
>         ksys_ioctl
>         do_vfs_ioctl
>         kvm_vcpu_ioctl
>         kvm_arch_vcpu_ioctl_run
>         vmx_handle_exit
>         handle_ept_misconfig
>         kvm_io_bus_write
>         __kvm_io_bus_write
>         eventfd_signal
>
> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> ---- snip ----
> 001: Call Trace:
> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> 001:  handle_rx+0xb9/0x900 [vhost_net]
> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> 001:  vhost_worker+0xbe/0x120 [vhost]
> 001:  kthread+0x106/0x140
> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> 001:  ? kthread_park+0x90/0x90
> 001:  ret_from_fork+0x35/0x40
> 001: ---[ end trace 0000000000000003 ]---
>
> This patch enlarges the limit to 1 which is the maximum recursion depth we
> have found so far.
>
> The credit of modification for eventfd_signal_count goes to
> Xie Yongji <xieyongji@bytedance.com>
>
> Signed-off-by: He Zhe <zhe.he@windriver.com>
> ---
>  fs/eventfd.c            | 3 ++-
>  include/linux/eventfd.h | 5 ++++-
>  2 files changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/fs/eventfd.c b/fs/eventfd.c
> index e265b6dd4f34..add6af91cacf 100644
> --- a/fs/eventfd.c
> +++ b/fs/eventfd.c
> @@ -71,7 +71,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
>          * it returns true, the eventfd_signal() call should be deferred to a
>          * safe context.
>          */
> -       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
> +       if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
> +           EFD_WAKE_COUNT_MAX))
>                 return 0;
>
>         spin_lock_irqsave(&ctx->wqh.lock, flags);
> diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
> index fa0a524baed0..74be152ebe87 100644
> --- a/include/linux/eventfd.h
> +++ b/include/linux/eventfd.h
> @@ -29,6 +29,9 @@
>  #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
>  #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
>
> +/* This is the maximum recursion depth we find so far */
> +#define EFD_WAKE_COUNT_MAX 1
> +
>  struct eventfd_ctx;
>  struct file;
>
> @@ -47,7 +50,7 @@ DECLARE_PER_CPU(int, eventfd_wake_count);
>
>  static inline bool eventfd_signal_count(void)
>  {
> -       return this_cpu_read(eventfd_wake_count);
> +       return this_cpu_read(eventfd_wake_count) > EFD_WAKE_COUNT_MAX;
>  }
>
>  #else /* CONFIG_EVENTFD */
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2021-06-18  8:44 ` [PATCH] eventfd: Enlarge recursion limit to allow vhost to work He Zhe
@ 2021-07-03  8:31   ` Michael S. Tsirkin
  2021-08-25  7:57   ` Yongji Xie
  1 sibling, 0 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2021-07-03  8:31 UTC (permalink / raw)
  To: He Zhe
  Cc: xieyongji, jasowang, stefanha, sgarzare, parav, hch,
	christian.brauner, rdunlap, willy, viro, axboe, bcrl, corbet,
	mika.penttila, dan.carpenter, gregkh, songmuchun, virtualization,
	kvm, linux-fsdevel, iommu, linux-kernel, qiang.zhang

On Fri, Jun 18, 2021 at 04:44:12PM +0800, He Zhe wrote:
> commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
> introduces a percpu counter that tracks the percpu recursion depth and
> warn if it greater than zero, to avoid potential deadlock and stack
> overflow.
> 
> However sometimes different eventfds may be used in parallel. Specifically,
> when heavy network load goes through kvm and vhost, working as below, it
> would trigger the following call trace.
> 
> -  100.00%
>    - 66.51%
>         ret_from_fork
>         kthread
>       - vhost_worker
>          - 33.47% handle_tx_kick
>               handle_tx
>               handle_tx_copy
>               vhost_tx_batch.isra.0
>               vhost_add_used_and_signal_n
>               eventfd_signal
>          - 33.05% handle_rx_net
>               handle_rx
>               vhost_add_used_and_signal_n
>               eventfd_signal
>    - 33.49%
>         ioctl
>         entry_SYSCALL_64_after_hwframe
>         do_syscall_64
>         __x64_sys_ioctl
>         ksys_ioctl
>         do_vfs_ioctl
>         kvm_vcpu_ioctl
>         kvm_arch_vcpu_ioctl_run
>         vmx_handle_exit
>         handle_ept_misconfig
>         kvm_io_bus_write
>         __kvm_io_bus_write
>         eventfd_signal
> 
> 001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
> ---- snip ----
> 001: Call Trace:
> 001:  vhost_signal+0x15e/0x1b0 [vhost]
> 001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
> 001:  handle_rx+0xb9/0x900 [vhost_net]
> 001:  handle_rx_net+0x15/0x20 [vhost_net]
> 001:  vhost_worker+0xbe/0x120 [vhost]
> 001:  kthread+0x106/0x140
> 001:  ? log_used.part.0+0x20/0x20 [vhost]
> 001:  ? kthread_park+0x90/0x90
> 001:  ret_from_fork+0x35/0x40
> 001: ---[ end trace 0000000000000003 ]---
> 
> This patch enlarges the limit to 1 which is the maximum recursion depth we
> have found so far.
> 
> The credit of modification for eventfd_signal_count goes to
> Xie Yongji <xieyongji@bytedance.com>
> 

And maybe:

Fixes: b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")

who's merging this?

> Signed-off-by: He Zhe <zhe.he@windriver.com>
> ---
>  fs/eventfd.c            | 3 ++-
>  include/linux/eventfd.h | 5 ++++-
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/fs/eventfd.c b/fs/eventfd.c
> index e265b6dd4f34..add6af91cacf 100644
> --- a/fs/eventfd.c
> +++ b/fs/eventfd.c
> @@ -71,7 +71,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
>  	 * it returns true, the eventfd_signal() call should be deferred to a
>  	 * safe context.
>  	 */
> -	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
> +	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
> +	    EFD_WAKE_COUNT_MAX))
>  		return 0;
>  
>  	spin_lock_irqsave(&ctx->wqh.lock, flags);
> diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
> index fa0a524baed0..74be152ebe87 100644
> --- a/include/linux/eventfd.h
> +++ b/include/linux/eventfd.h
> @@ -29,6 +29,9 @@
>  #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
>  #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
>  
> +/* This is the maximum recursion depth we find so far */
> +#define EFD_WAKE_COUNT_MAX 1
> +
>  struct eventfd_ctx;
>  struct file;
>  
> @@ -47,7 +50,7 @@ DECLARE_PER_CPU(int, eventfd_wake_count);
>  
>  static inline bool eventfd_signal_count(void)
>  {
> -	return this_cpu_read(eventfd_wake_count);
> +	return this_cpu_read(eventfd_wake_count) > EFD_WAKE_COUNT_MAX;
>  }
>  
>  #else /* CONFIG_EVENTFD */
> -- 
> 2.17.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] eventfd: Enlarge recursion limit to allow vhost to work
  2021-06-18  3:29 Re: [PATCH v8 03/10] eventfd: Increase the recursion depth of eventfd_signal() Yongji Xie
@ 2021-06-18  8:44 ` He Zhe
  2021-07-03  8:31   ` Michael S. Tsirkin
  2021-08-25  7:57   ` Yongji Xie
  0 siblings, 2 replies; 13+ messages in thread
From: He Zhe @ 2021-06-18  8:44 UTC (permalink / raw)
  To: xieyongji, mst, jasowang, stefanha, sgarzare, parav, hch,
	christian.brauner, rdunlap, willy, viro, axboe, bcrl, corbet,
	mika.penttila, dan.carpenter, gregkh, songmuchun, virtualization,
	kvm, linux-fsdevel, iommu, linux-kernel, qiang.zhang, zhe.he

commit b5e683d5cab8 ("eventfd: track eventfd_signal() recursion depth")
introduces a percpu counter that tracks the percpu recursion depth and
warn if it greater than zero, to avoid potential deadlock and stack
overflow.

However sometimes different eventfds may be used in parallel. Specifically,
when heavy network load goes through kvm and vhost, working as below, it
would trigger the following call trace.

-  100.00%
   - 66.51%
        ret_from_fork
        kthread
      - vhost_worker
         - 33.47% handle_tx_kick
              handle_tx
              handle_tx_copy
              vhost_tx_batch.isra.0
              vhost_add_used_and_signal_n
              eventfd_signal
         - 33.05% handle_rx_net
              handle_rx
              vhost_add_used_and_signal_n
              eventfd_signal
   - 33.49%
        ioctl
        entry_SYSCALL_64_after_hwframe
        do_syscall_64
        __x64_sys_ioctl
        ksys_ioctl
        do_vfs_ioctl
        kvm_vcpu_ioctl
        kvm_arch_vcpu_ioctl_run
        vmx_handle_exit
        handle_ept_misconfig
        kvm_io_bus_write
        __kvm_io_bus_write
        eventfd_signal

001: WARNING: CPU: 1 PID: 1503 at fs/eventfd.c:73 eventfd_signal+0x85/0xa0
---- snip ----
001: Call Trace:
001:  vhost_signal+0x15e/0x1b0 [vhost]
001:  vhost_add_used_and_signal_n+0x2b/0x40 [vhost]
001:  handle_rx+0xb9/0x900 [vhost_net]
001:  handle_rx_net+0x15/0x20 [vhost_net]
001:  vhost_worker+0xbe/0x120 [vhost]
001:  kthread+0x106/0x140
001:  ? log_used.part.0+0x20/0x20 [vhost]
001:  ? kthread_park+0x90/0x90
001:  ret_from_fork+0x35/0x40
001: ---[ end trace 0000000000000003 ]---

This patch enlarges the limit to 1 which is the maximum recursion depth we
have found so far.

The credit of modification for eventfd_signal_count goes to
Xie Yongji <xieyongji@bytedance.com>

Signed-off-by: He Zhe <zhe.he@windriver.com>
---
 fs/eventfd.c            | 3 ++-
 include/linux/eventfd.h | 5 ++++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/fs/eventfd.c b/fs/eventfd.c
index e265b6dd4f34..add6af91cacf 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -71,7 +71,8 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
 	 * it returns true, the eventfd_signal() call should be deferred to a
 	 * safe context.
 	 */
-	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+	if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) >
+	    EFD_WAKE_COUNT_MAX))
 		return 0;
 
 	spin_lock_irqsave(&ctx->wqh.lock, flags);
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index fa0a524baed0..74be152ebe87 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -29,6 +29,9 @@
 #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
 #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
 
+/* This is the maximum recursion depth we find so far */
+#define EFD_WAKE_COUNT_MAX 1
+
 struct eventfd_ctx;
 struct file;
 
@@ -47,7 +50,7 @@ DECLARE_PER_CPU(int, eventfd_wake_count);
 
 static inline bool eventfd_signal_count(void)
 {
-	return this_cpu_read(eventfd_wake_count);
+	return this_cpu_read(eventfd_wake_count) > EFD_WAKE_COUNT_MAX;
 }
 
 #else /* CONFIG_EVENTFD */
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-08-25  7:57 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-10 11:47 [PATCH] eventfd: Enlarge recursion limit to allow vhost to work zhe.he
2020-05-12  7:00 ` He Zhe
2020-06-22  9:09 ` He Zhe
2020-07-03  8:12 ` Juri Lelli
2020-07-03 11:11   ` He Zhe
2020-07-06  6:45     ` Juri Lelli
2020-07-13 13:22       ` Juri Lelli
2020-07-22  9:01         ` Juri Lelli
2020-08-20 10:41           ` He Zhe
2021-05-27 15:52             ` Nitesh Narayan Lal
2021-06-18  3:29 Re: [PATCH v8 03/10] eventfd: Increase the recursion depth of eventfd_signal() Yongji Xie
2021-06-18  8:44 ` [PATCH] eventfd: Enlarge recursion limit to allow vhost to work He Zhe
2021-07-03  8:31   ` Michael S. Tsirkin
2021-08-25  7:57   ` Yongji Xie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).