All of lore.kernel.org
 help / color / mirror / Atom feed
From: Fuad Tabba <tabba@google.com>
To: Kalesh Singh <kaleshsingh@google.com>
Cc: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	madvenka@linux.microsoft.com, will@kernel.org,
	qperret@google.com, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	catalin.marinas@arm.com, andreyknvl@gmail.com,
	vincenzo.frascino@arm.com, mhiramat@kernel.org, ast@kernel.org,
	wangkefeng.wang@huawei.com, elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org, oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	kernel-team@android.com
Subject: Re: [PATCH v5 13/17] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace
Date: Thu, 21 Jul 2022 10:58:32 +0100	[thread overview]
Message-ID: <CA+EHjTxpHxojNdRm21hYgcWqFJCzPx3jch2bdVqZ4+2NQqNvDA@mail.gmail.com> (raw)
In-Reply-To: <20220721055728.718573-14-kaleshsingh@google.com>

Hi Kalesh,


On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@google.com> wrote:
>
> In non-protected nVHE mode (non-pKVM) the host can directly access
> hypervisor memory; and unwinding of the hypervisor stacktrace is
> done from EL1 to save on memory for shared buffers.
>
> To unwind the hypervisor stack from EL1 the host needs to know the
> starting point for the unwind and information that will allow it to
> translate hypervisor stack addresses to the corresponding kernel
> addresses. This patch sets up this book keeping. It is made use of
> later in the series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
> ---

Reviewed-by: Fuad Tabba <tabba@google.com>

Cheers,
/fuad


>
> Changes in v5:
>   - Use regular comments instead of doc comments, per Fuad
>
>  arch/arm64/include/asm/kvm_asm.h         | 16 ++++++++++++++++
>  arch/arm64/include/asm/stacktrace/nvhe.h |  4 ++++
>  arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 24 ++++++++++++++++++++++++
>  3 files changed, 44 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 2e277f2ed671..53035763e48e 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
>         unsigned long vtcr;
>  };
>
> +/*
> + * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
> + * hyp_panic() in non-protected mode.
> + *
> + * @stack_base:                 hyp VA of the hyp_stack base.
> + * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
> + * @fp:                         hyp FP where the backtrace begins.
> + * @pc:                         hyp PC where the backtrace begins.
> + */
> +struct kvm_nvhe_stacktrace_info {
> +       unsigned long stack_base;
> +       unsigned long overflow_stack_base;
> +       unsigned long fp;
> +       unsigned long pc;
> +};
> +
>  /* Translate a kernel address @ptr into its equivalent linear mapping */
>  #define kvm_ksym_ref(ptr)                                              \
>         ({                                                              \
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 05d7e03e0a8c..8f02803a005f 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -19,6 +19,7 @@
>  #ifndef __ASM_STACKTRACE_NVHE_H
>  #define __ASM_STACKTRACE_NVHE_H
>
> +#include <asm/kvm_asm.h>
>  #include <asm/stacktrace/common.h>
>
>  /*
> @@ -52,6 +53,9 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
>   * In protected mode, the unwinding is done by the hypervisor in EL2.
>   */
>
> +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
> +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
>                                      struct stack_info *info)
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 60461c033a04..cbd365f4f26a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -9,6 +9,28 @@
>  DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
>         __aligned(16);
>
> +DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
> +
> +/*
> + * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Save the information needed by the host to unwind the non-protected
> + * nVHE hypervisor stack in EL1.
> + */
> +static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
> +{
> +       struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
> +       struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
> +       stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
> +       stacktrace_info->fp = fp;
> +       stacktrace_info->pc = pc;
> +}
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
>
> @@ -89,4 +111,6 @@ void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
>  {
>         if (is_protected_kvm_enabled())
>                 pkvm_save_backtrace(fp, pc);
> +       else
> +               hyp_prepare_backtrace(fp, pc);
>  }
> --
> 2.37.0.170.g444d1eabd0-goog
>

WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: Kalesh Singh <kaleshsingh@google.com>
Cc: wangkefeng.wang@huawei.com, catalin.marinas@arm.com,
	ast@kernel.org, vincenzo.frascino@arm.com, will@kernel.org,
	kvmarm@lists.cs.columbia.edu, maz@kernel.org,
	madvenka@linux.microsoft.com, kernel-team@android.com,
	elver@google.com, broonie@kernel.org,
	linux-arm-kernel@lists.infradead.org, andreyknvl@gmail.com,
	linux-kernel@vger.kernel.org, mhiramat@kernel.org
Subject: Re: [PATCH v5 13/17] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace
Date: Thu, 21 Jul 2022 10:58:32 +0100	[thread overview]
Message-ID: <CA+EHjTxpHxojNdRm21hYgcWqFJCzPx3jch2bdVqZ4+2NQqNvDA@mail.gmail.com> (raw)
In-Reply-To: <20220721055728.718573-14-kaleshsingh@google.com>

Hi Kalesh,


On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@google.com> wrote:
>
> In non-protected nVHE mode (non-pKVM) the host can directly access
> hypervisor memory; and unwinding of the hypervisor stacktrace is
> done from EL1 to save on memory for shared buffers.
>
> To unwind the hypervisor stack from EL1 the host needs to know the
> starting point for the unwind and information that will allow it to
> translate hypervisor stack addresses to the corresponding kernel
> addresses. This patch sets up this book keeping. It is made use of
> later in the series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
> ---

Reviewed-by: Fuad Tabba <tabba@google.com>

Cheers,
/fuad


>
> Changes in v5:
>   - Use regular comments instead of doc comments, per Fuad
>
>  arch/arm64/include/asm/kvm_asm.h         | 16 ++++++++++++++++
>  arch/arm64/include/asm/stacktrace/nvhe.h |  4 ++++
>  arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 24 ++++++++++++++++++++++++
>  3 files changed, 44 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 2e277f2ed671..53035763e48e 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
>         unsigned long vtcr;
>  };
>
> +/*
> + * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
> + * hyp_panic() in non-protected mode.
> + *
> + * @stack_base:                 hyp VA of the hyp_stack base.
> + * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
> + * @fp:                         hyp FP where the backtrace begins.
> + * @pc:                         hyp PC where the backtrace begins.
> + */
> +struct kvm_nvhe_stacktrace_info {
> +       unsigned long stack_base;
> +       unsigned long overflow_stack_base;
> +       unsigned long fp;
> +       unsigned long pc;
> +};
> +
>  /* Translate a kernel address @ptr into its equivalent linear mapping */
>  #define kvm_ksym_ref(ptr)                                              \
>         ({                                                              \
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 05d7e03e0a8c..8f02803a005f 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -19,6 +19,7 @@
>  #ifndef __ASM_STACKTRACE_NVHE_H
>  #define __ASM_STACKTRACE_NVHE_H
>
> +#include <asm/kvm_asm.h>
>  #include <asm/stacktrace/common.h>
>
>  /*
> @@ -52,6 +53,9 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
>   * In protected mode, the unwinding is done by the hypervisor in EL2.
>   */
>
> +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
> +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
>                                      struct stack_info *info)
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 60461c033a04..cbd365f4f26a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -9,6 +9,28 @@
>  DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
>         __aligned(16);
>
> +DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
> +
> +/*
> + * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Save the information needed by the host to unwind the non-protected
> + * nVHE hypervisor stack in EL1.
> + */
> +static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
> +{
> +       struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
> +       struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
> +       stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
> +       stacktrace_info->fp = fp;
> +       stacktrace_info->pc = pc;
> +}
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
>
> @@ -89,4 +111,6 @@ void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
>  {
>         if (is_protected_kvm_enabled())
>                 pkvm_save_backtrace(fp, pc);
> +       else
> +               hyp_prepare_backtrace(fp, pc);
>  }
> --
> 2.37.0.170.g444d1eabd0-goog
>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: Kalesh Singh <kaleshsingh@google.com>
Cc: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com, will@kernel.org,
	qperret@google.com,  james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	 catalin.marinas@arm.com, andreyknvl@gmail.com,
	vincenzo.frascino@arm.com,  mhiramat@kernel.org, ast@kernel.org,
	wangkefeng.wang@huawei.com,  elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org,  oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	 kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	 kernel-team@android.com
Subject: Re: [PATCH v5 13/17] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace
Date: Thu, 21 Jul 2022 10:58:32 +0100	[thread overview]
Message-ID: <CA+EHjTxpHxojNdRm21hYgcWqFJCzPx3jch2bdVqZ4+2NQqNvDA@mail.gmail.com> (raw)
In-Reply-To: <20220721055728.718573-14-kaleshsingh@google.com>

Hi Kalesh,


On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@google.com> wrote:
>
> In non-protected nVHE mode (non-pKVM) the host can directly access
> hypervisor memory; and unwinding of the hypervisor stacktrace is
> done from EL1 to save on memory for shared buffers.
>
> To unwind the hypervisor stack from EL1 the host needs to know the
> starting point for the unwind and information that will allow it to
> translate hypervisor stack addresses to the corresponding kernel
> addresses. This patch sets up this book keeping. It is made use of
> later in the series.
>
> Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
> ---

Reviewed-by: Fuad Tabba <tabba@google.com>

Cheers,
/fuad


>
> Changes in v5:
>   - Use regular comments instead of doc comments, per Fuad
>
>  arch/arm64/include/asm/kvm_asm.h         | 16 ++++++++++++++++
>  arch/arm64/include/asm/stacktrace/nvhe.h |  4 ++++
>  arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 24 ++++++++++++++++++++++++
>  3 files changed, 44 insertions(+)
>
> diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
> index 2e277f2ed671..53035763e48e 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -176,6 +176,22 @@ struct kvm_nvhe_init_params {
>         unsigned long vtcr;
>  };
>
> +/*
> + * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
> + * hyp_panic() in non-protected mode.
> + *
> + * @stack_base:                 hyp VA of the hyp_stack base.
> + * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
> + * @fp:                         hyp FP where the backtrace begins.
> + * @pc:                         hyp PC where the backtrace begins.
> + */
> +struct kvm_nvhe_stacktrace_info {
> +       unsigned long stack_base;
> +       unsigned long overflow_stack_base;
> +       unsigned long fp;
> +       unsigned long pc;
> +};
> +
>  /* Translate a kernel address @ptr into its equivalent linear mapping */
>  #define kvm_ksym_ref(ptr)                                              \
>         ({                                                              \
> diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
> index 05d7e03e0a8c..8f02803a005f 100644
> --- a/arch/arm64/include/asm/stacktrace/nvhe.h
> +++ b/arch/arm64/include/asm/stacktrace/nvhe.h
> @@ -19,6 +19,7 @@
>  #ifndef __ASM_STACKTRACE_NVHE_H
>  #define __ASM_STACKTRACE_NVHE_H
>
> +#include <asm/kvm_asm.h>
>  #include <asm/stacktrace/common.h>
>
>  /*
> @@ -52,6 +53,9 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
>   * In protected mode, the unwinding is done by the hypervisor in EL2.
>   */
>
> +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
> +DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
>                                      struct stack_info *info)
> diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> index 60461c033a04..cbd365f4f26a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
> @@ -9,6 +9,28 @@
>  DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
>         __aligned(16);
>
> +DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
> +
> +/*
> + * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
> + *
> + * @fp : frame pointer at which to start the unwinding.
> + * @pc : program counter at which to start the unwinding.
> + *
> + * Save the information needed by the host to unwind the non-protected
> + * nVHE hypervisor stack in EL1.
> + */
> +static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
> +{
> +       struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
> +       struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +       stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
> +       stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
> +       stacktrace_info->fp = fp;
> +       stacktrace_info->pc = pc;
> +}
> +
>  #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
>  DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
>
> @@ -89,4 +111,6 @@ void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
>  {
>         if (is_protected_kvm_enabled())
>                 pkvm_save_backtrace(fp, pc);
> +       else
> +               hyp_prepare_backtrace(fp, pc);
>  }
> --
> 2.37.0.170.g444d1eabd0-goog
>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2022-07-21  9:59 UTC|newest]

Thread overview: 120+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-21  5:57 [PATCH v5 00/17] KVM nVHE Hypervisor stack unwinder Kalesh Singh
2022-07-21  5:57 ` Kalesh Singh
2022-07-21  5:57 ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 01/17] arm64: stacktrace: Add shared header for common stack unwinding code Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 02/17] arm64: stacktrace: Factor out on_accessible_stack_common() Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 03/17] arm64: stacktrace: Factor out unwind_next_common() Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 04/17] arm64: stacktrace: Handle frame pointer from different address spaces Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:57   ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 05/17] arm64: stacktrace: Factor out common unwind() Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-25 14:05   ` Mark Brown
2022-07-25 14:05     ` Mark Brown
2022-07-25 14:05     ` Mark Brown
2022-07-21  5:57 ` [PATCH v5 06/17] arm64: stacktrace: Add description of stacktrace/common.h Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:57   ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 07/17] KVM: arm64: On stack overflow switch to hyp overflow_stack Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 08/17] KVM: arm64: Add PROTECTED_NVHE_STACKTRACE Kconfig Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:57   ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 09/17] KVM: arm64: Allocate shared pKVM hyp stacktrace buffers Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:57   ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  9:57     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 10/17] KVM: arm64: Stub implementation of pKVM HYP stack unwinder Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 11/17] KVM: arm64: Stub implementation of non-protected nVHE " Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 12/17] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-22 15:33   ` Oliver Upton
2022-07-22 15:33     ` Oliver Upton
2022-07-22 15:33     ` Oliver Upton
2022-07-22 17:28     ` Kalesh Singh
2022-07-22 17:28       ` Kalesh Singh
2022-07-22 17:28       ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 13/17] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba [this message]
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 14/17] KVM: arm64: Implement protected nVHE hyp stack unwinder Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 15/17] KVM: arm64: Implement non-protected " Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:58   ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  9:58     ` Fuad Tabba
2022-07-21  5:57 ` [PATCH v5 16/17] KVM: arm64: Introduce pkvm_dump_backtrace() Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:59   ` Fuad Tabba
2022-07-21  9:59     ` Fuad Tabba
2022-07-21  9:59     ` Fuad Tabba
2022-07-22 11:16   ` Oliver Upton
2022-07-22 11:16     ` Oliver Upton
2022-07-22 11:16     ` Oliver Upton
2022-07-22 17:25     ` Kalesh Singh
2022-07-22 17:25       ` Kalesh Singh
2022-07-22 17:25       ` Kalesh Singh
2022-07-21  5:57 ` [PATCH v5 17/17] KVM: arm64: Introduce hyp_dump_backtrace() Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  5:57   ` Kalesh Singh
2022-07-21  9:59   ` Fuad Tabba
2022-07-21  9:59     ` Fuad Tabba
2022-07-21  9:59     ` Fuad Tabba
2022-07-21 20:35   ` Oliver Upton
2022-07-21 20:35     ` Oliver Upton
2022-07-21 20:35     ` Oliver Upton
2022-07-22  0:01     ` Kalesh Singh
2022-07-22  0:01       ` Kalesh Singh
2022-07-22  0:01       ` Kalesh Singh
2022-07-21  9:55 ` [PATCH v5 00/17] KVM nVHE Hypervisor stack unwinder Fuad Tabba
2022-07-21  9:55   ` Fuad Tabba
2022-07-21  9:55   ` Fuad Tabba
2022-07-21 16:06   ` Kalesh Singh
2022-07-21 16:06     ` Kalesh Singh
2022-07-21 16:06     ` Kalesh Singh
2022-07-22 10:48 ` Oliver Upton
2022-07-22 10:48   ` Oliver Upton
2022-07-22 10:48   ` Oliver Upton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CA+EHjTxpHxojNdRm21hYgcWqFJCzPx3jch2bdVqZ4+2NQqNvDA@mail.gmail.com \
    --to=tabba@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=andreyknvl@gmail.com \
    --cc=ardb@kernel.org \
    --cc=ast@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=elver@google.com \
    --cc=james.morse@arm.com \
    --cc=kaleshsingh@google.com \
    --cc=keirf@google.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=madvenka@linux.microsoft.com \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=oupton@google.com \
    --cc=qperret@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vincenzo.frascino@arm.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.