All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com
Cc: wangkefeng.wang@huawei.com, elver@google.com,
	catalin.marinas@arm.com, ast@kernel.org,
	vincenzo.frascino@arm.com, will@kernel.org,
	kvmarm@lists.cs.columbia.edu, android-mm@google.com,
	andreyknvl@gmail.com, kernel-team@android.com,
	drjones@redhat.com, linux-arm-kernel@lists.infradead.org,
	russell.king@oracle.com, linux-kernel@vger.kernel.org,
	mhiramat@kernel.org
Subject: [PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace()
Date: Thu, 14 Jul 2022 23:10:26 -0700	[thread overview]
Message-ID: <20220715061027.1612149-18-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In non-protected nVHE mode, unwinds and dumps the hypervisor backtrace
from EL1. This is possible beacuase the host can directly access the
hypervisor stack pages in non-proteced mode.

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 64 +++++++++++++++++++++---
 1 file changed, 56 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index ec1a4ee21c21..c322ac95b256 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -190,6 +190,56 @@ static int notrace unwind_next(struct unwind_state *state)
 }
 NOKPROBE_SYMBOL(unwind_next);
 
+/**
+ * kvm_nvhe_print_backtrace_entry - Symbolizes and prints the HYP stack address
+ */
+static inline void kvm_nvhe_print_backtrace_entry(unsigned long addr,
+						  unsigned long hyp_offset)
+{
+	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+	/* Mask tags and convert to kern addr */
+	addr = (addr & va_mask) + hyp_offset;
+	kvm_err(" [<%016lx>] %pB\n", addr, (void *)addr);
+}
+
+/**
+ * hyp_backtrace_entry - Dump an entry of the non-protected nVHE HYP stacktrace
+ *
+ * @arg    : the hypervisor offset, used for address translation
+ * @where  : the program counter corresponding to the stack frame
+ */
+static inline bool hyp_dump_backtrace_entry(void *arg, unsigned long where)
+{
+	kvm_nvhe_print_backtrace_entry(where, (unsigned long)arg);
+
+	return true;
+}
+
+/**
+ * hyp_dump_backtrace - Dump the non-proteced nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * The host can directly access HYP stack pages in non-protected
+ * mode, so the unwinding is done directly from EL1. This removes
+ * the need for shared buffers between host and hypervisor for
+ * the stacktrace.
+ */
+static inline void hyp_dump_backtrace(unsigned long hyp_offset)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	struct unwind_state state;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
+
+	kvm_err("Non-protected nVHE HYP call trace:\n");
+	unwind(&state, hyp_dump_backtrace_entry, (void *)hyp_offset);
+	kvm_err("---- End of Non-protected nVHE HYP call trace ----\n");
+}
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
 
@@ -206,22 +256,18 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
 	unsigned long *stacktrace_pos;
-	unsigned long va_mask, pc;
 
 	stacktrace_pos = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
-	va_mask = GENMASK_ULL(vabits_actual - 1, 0);
 
 	kvm_err("Protected nVHE HYP call trace:\n");
 
-	/* The stack trace is terminated by a null entry */
-	for (; *stacktrace_pos; stacktrace_pos++) {
-		/* Mask tags and convert to kern addr */
-		pc = (*stacktrace_pos & va_mask) + hyp_offset;
-		kvm_err(" [<%016lx>] %pB\n", pc, (void *)pc);
-	}
+	/* The saved stacktrace is terminated by a null entry */
+	for (; *stacktrace_pos; stacktrace_pos++)
+		kvm_nvhe_print_backtrace_entry(*stacktrace_pos, hyp_offset);
 
 	kvm_err("---- End of Protected nVHE HYP call trace ----\n");
 }
+
 #else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
@@ -238,6 +284,8 @@ static inline void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
 {
 	if (is_protected_kvm_enabled())
 		pkvm_dump_backtrace(hyp_offset);
+	else
+		hyp_dump_backtrace(hyp_offset);
 }
 #endif	/* __KVM_NVHE_HYPERVISOR__ */
 #endif	/* __ASM_STACKTRACE_NVHE_H */
-- 
2.37.0.170.g444d1eabd0-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	madvenka@linux.microsoft.com
Cc: will@kernel.org, qperret@google.com, tabba@google.com,
	kaleshsingh@google.com, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	catalin.marinas@arm.com, andreyknvl@gmail.com,
	russell.king@oracle.com, vincenzo.frascino@arm.com,
	mhiramat@kernel.org, ast@kernel.org, drjones@redhat.com,
	wangkefeng.wang@huawei.com, elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org, oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	android-mm@google.com, kernel-team@android.com
Subject: [PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace()
Date: Thu, 14 Jul 2022 23:10:26 -0700	[thread overview]
Message-ID: <20220715061027.1612149-18-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In non-protected nVHE mode, unwinds and dumps the hypervisor backtrace
from EL1. This is possible beacuase the host can directly access the
hypervisor stack pages in non-proteced mode.

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 64 +++++++++++++++++++++---
 1 file changed, 56 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index ec1a4ee21c21..c322ac95b256 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -190,6 +190,56 @@ static int notrace unwind_next(struct unwind_state *state)
 }
 NOKPROBE_SYMBOL(unwind_next);
 
+/**
+ * kvm_nvhe_print_backtrace_entry - Symbolizes and prints the HYP stack address
+ */
+static inline void kvm_nvhe_print_backtrace_entry(unsigned long addr,
+						  unsigned long hyp_offset)
+{
+	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+	/* Mask tags and convert to kern addr */
+	addr = (addr & va_mask) + hyp_offset;
+	kvm_err(" [<%016lx>] %pB\n", addr, (void *)addr);
+}
+
+/**
+ * hyp_backtrace_entry - Dump an entry of the non-protected nVHE HYP stacktrace
+ *
+ * @arg    : the hypervisor offset, used for address translation
+ * @where  : the program counter corresponding to the stack frame
+ */
+static inline bool hyp_dump_backtrace_entry(void *arg, unsigned long where)
+{
+	kvm_nvhe_print_backtrace_entry(where, (unsigned long)arg);
+
+	return true;
+}
+
+/**
+ * hyp_dump_backtrace - Dump the non-proteced nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * The host can directly access HYP stack pages in non-protected
+ * mode, so the unwinding is done directly from EL1. This removes
+ * the need for shared buffers between host and hypervisor for
+ * the stacktrace.
+ */
+static inline void hyp_dump_backtrace(unsigned long hyp_offset)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	struct unwind_state state;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
+
+	kvm_err("Non-protected nVHE HYP call trace:\n");
+	unwind(&state, hyp_dump_backtrace_entry, (void *)hyp_offset);
+	kvm_err("---- End of Non-protected nVHE HYP call trace ----\n");
+}
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
 
@@ -206,22 +256,18 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
 	unsigned long *stacktrace_pos;
-	unsigned long va_mask, pc;
 
 	stacktrace_pos = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
-	va_mask = GENMASK_ULL(vabits_actual - 1, 0);
 
 	kvm_err("Protected nVHE HYP call trace:\n");
 
-	/* The stack trace is terminated by a null entry */
-	for (; *stacktrace_pos; stacktrace_pos++) {
-		/* Mask tags and convert to kern addr */
-		pc = (*stacktrace_pos & va_mask) + hyp_offset;
-		kvm_err(" [<%016lx>] %pB\n", pc, (void *)pc);
-	}
+	/* The saved stacktrace is terminated by a null entry */
+	for (; *stacktrace_pos; stacktrace_pos++)
+		kvm_nvhe_print_backtrace_entry(*stacktrace_pos, hyp_offset);
 
 	kvm_err("---- End of Protected nVHE HYP call trace ----\n");
 }
+
 #else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
@@ -238,6 +284,8 @@ static inline void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
 {
 	if (is_protected_kvm_enabled())
 		pkvm_dump_backtrace(hyp_offset);
+	else
+		hyp_dump_backtrace(hyp_offset);
 }
 #endif	/* __KVM_NVHE_HYPERVISOR__ */
 #endif	/* __ASM_STACKTRACE_NVHE_H */
-- 
2.37.0.170.g444d1eabd0-goog


WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com
Cc: will@kernel.org, qperret@google.com, tabba@google.com,
	 kaleshsingh@google.com, james.morse@arm.com,
	alexandru.elisei@arm.com,  suzuki.poulose@arm.com,
	catalin.marinas@arm.com, andreyknvl@gmail.com,
	 russell.king@oracle.com, vincenzo.frascino@arm.com,
	mhiramat@kernel.org,  ast@kernel.org, drjones@redhat.com,
	wangkefeng.wang@huawei.com,  elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org,  oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	 kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	 android-mm@google.com, kernel-team@android.com
Subject: [PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace()
Date: Thu, 14 Jul 2022 23:10:26 -0700	[thread overview]
Message-ID: <20220715061027.1612149-18-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In non-protected nVHE mode, unwinds and dumps the hypervisor backtrace
from EL1. This is possible beacuase the host can directly access the
hypervisor stack pages in non-proteced mode.

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 64 +++++++++++++++++++++---
 1 file changed, 56 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index ec1a4ee21c21..c322ac95b256 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -190,6 +190,56 @@ static int notrace unwind_next(struct unwind_state *state)
 }
 NOKPROBE_SYMBOL(unwind_next);
 
+/**
+ * kvm_nvhe_print_backtrace_entry - Symbolizes and prints the HYP stack address
+ */
+static inline void kvm_nvhe_print_backtrace_entry(unsigned long addr,
+						  unsigned long hyp_offset)
+{
+	unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+	/* Mask tags and convert to kern addr */
+	addr = (addr & va_mask) + hyp_offset;
+	kvm_err(" [<%016lx>] %pB\n", addr, (void *)addr);
+}
+
+/**
+ * hyp_backtrace_entry - Dump an entry of the non-protected nVHE HYP stacktrace
+ *
+ * @arg    : the hypervisor offset, used for address translation
+ * @where  : the program counter corresponding to the stack frame
+ */
+static inline bool hyp_dump_backtrace_entry(void *arg, unsigned long where)
+{
+	kvm_nvhe_print_backtrace_entry(where, (unsigned long)arg);
+
+	return true;
+}
+
+/**
+ * hyp_dump_backtrace - Dump the non-proteced nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * The host can directly access HYP stack pages in non-protected
+ * mode, so the unwinding is done directly from EL1. This removes
+ * the need for shared buffers between host and hypervisor for
+ * the stacktrace.
+ */
+static inline void hyp_dump_backtrace(unsigned long hyp_offset)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	struct unwind_state state;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
+
+	kvm_err("Non-protected nVHE HYP call trace:\n");
+	unwind(&state, hyp_dump_backtrace_entry, (void *)hyp_offset);
+	kvm_err("---- End of Non-protected nVHE HYP call trace ----\n");
+}
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
 
@@ -206,22 +256,18 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
 	unsigned long *stacktrace_pos;
-	unsigned long va_mask, pc;
 
 	stacktrace_pos = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
-	va_mask = GENMASK_ULL(vabits_actual - 1, 0);
 
 	kvm_err("Protected nVHE HYP call trace:\n");
 
-	/* The stack trace is terminated by a null entry */
-	for (; *stacktrace_pos; stacktrace_pos++) {
-		/* Mask tags and convert to kern addr */
-		pc = (*stacktrace_pos & va_mask) + hyp_offset;
-		kvm_err(" [<%016lx>] %pB\n", pc, (void *)pc);
-	}
+	/* The saved stacktrace is terminated by a null entry */
+	for (; *stacktrace_pos; stacktrace_pos++)
+		kvm_nvhe_print_backtrace_entry(*stacktrace_pos, hyp_offset);
 
 	kvm_err("---- End of Protected nVHE HYP call trace ----\n");
 }
+
 #else	/* !CONFIG_PROTECTED_NVHE_STACKTRACE */
 static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
 {
@@ -238,6 +284,8 @@ static inline void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
 {
 	if (is_protected_kvm_enabled())
 		pkvm_dump_backtrace(hyp_offset);
+	else
+		hyp_dump_backtrace(hyp_offset);
 }
 #endif	/* __KVM_NVHE_HYPERVISOR__ */
 #endif	/* __ASM_STACKTRACE_NVHE_H */
-- 
2.37.0.170.g444d1eabd0-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-07-15  6:11 UTC|newest]

Thread overview: 162+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-15  6:10 [PATCH v4 00/18] KVM nVHE Hypervisor stack unwinder Kalesh Singh
2022-07-15  6:10 ` Kalesh Singh
2022-07-15  6:10 ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 01/18] arm64: stacktrace: Add shared header for common stack unwinding code Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 12:37   ` Mark Brown
2022-07-15 12:37     ` Mark Brown
2022-07-15 12:37     ` Mark Brown
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-18 12:52   ` Russell King (Oracle)
2022-07-18 12:52     ` Russell King (Oracle)
2022-07-18 12:52     ` Russell King (Oracle)
2022-07-18 15:26     ` Kalesh Singh
2022-07-18 15:26       ` Kalesh Singh
2022-07-18 15:26       ` Kalesh Singh
2022-07-18 16:00       ` Russell King (Oracle)
2022-07-18 16:00         ` Russell King (Oracle)
2022-07-18 16:00         ` Russell King (Oracle)
2022-07-15  6:10 ` [PATCH v4 02/18] arm64: stacktrace: Factor out on_accessible_stack_common() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 16:28   ` Mark Brown
2022-07-15 16:28     ` Mark Brown
2022-07-15 16:28     ` Mark Brown
2022-07-15  6:10 ` [PATCH v4 03/18] arm64: stacktrace: Factor out unwind_next_common() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 16:29   ` Mark Brown
2022-07-15 16:29     ` Mark Brown
2022-07-15 16:29     ` Mark Brown
2022-07-15  6:10 ` [PATCH v4 04/18] arm64: stacktrace: Handle frame pointer from different address spaces Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:56   ` Fuad Tabba
2022-07-15 13:56     ` Fuad Tabba
2022-07-15 13:56     ` Fuad Tabba
2022-07-18 17:40     ` Kalesh Singh
2022-07-18 17:40       ` Kalesh Singh
2022-07-18 17:40       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 05/18] arm64: stacktrace: Factor out common unwind() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 06/18] arm64: stacktrace: Add description of stacktrace/common.h Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:59   ` Fuad Tabba
2022-07-15 13:59     ` Fuad Tabba
2022-07-15 13:59     ` Fuad Tabba
2022-07-17  9:57   ` Marc Zyngier
2022-07-17  9:57     ` Marc Zyngier
2022-07-17  9:57     ` Marc Zyngier
2022-07-18 16:53     ` Kalesh Singh
2022-07-18 16:53       ` Kalesh Singh
2022-07-18 16:53       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 07/18] KVM: arm64: On stack overflow switch to hyp overflow_stack Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  9:46   ` Fuad Tabba
2022-07-18  9:46     ` Fuad Tabba
2022-07-18  9:46     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 08/18] KVM: arm64: Add PROTECTED_NVHE_STACKTRACE Kconfig Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  6:55   ` Marc Zyngier
2022-07-18  6:55     ` Marc Zyngier
2022-07-18  6:55     ` Marc Zyngier
2022-07-18 17:03     ` Kalesh Singh
2022-07-18 17:03       ` Kalesh Singh
2022-07-18 17:03       ` Kalesh Singh
2022-07-19 10:35       ` Marc Zyngier
2022-07-19 10:35         ` Marc Zyngier
2022-07-19 10:35         ` Marc Zyngier
2022-07-19 18:23         ` Kalesh Singh
2022-07-19 18:23           ` Kalesh Singh
2022-07-19 18:23           ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 09/18] KVM: arm64: Allocate shared pKVM hyp stacktrace buffers Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:13   ` Marc Zyngier
2022-07-18  7:13     ` Marc Zyngier
2022-07-18  7:13     ` Marc Zyngier
2022-07-18 17:27     ` Kalesh Singh
2022-07-18 17:27       ` Kalesh Singh
2022-07-18 17:27       ` Kalesh Singh
2022-07-18 10:00   ` Fuad Tabba
2022-07-18 10:00     ` Fuad Tabba
2022-07-18 10:00     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 10/18] KVM: arm64: Stub implementation of pKVM HYP stack unwinder Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:20   ` Marc Zyngier
2022-07-18  7:20     ` Marc Zyngier
2022-07-18  7:20     ` Marc Zyngier
2022-07-15  6:10 ` [PATCH v4 11/18] KVM: arm64: Stub implementation of non-protected nVHE " Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:30   ` Marc Zyngier
2022-07-18  7:30     ` Marc Zyngier
2022-07-18  7:30     ` Marc Zyngier
2022-07-18 16:51     ` Kalesh Singh
2022-07-18 16:51       ` Kalesh Singh
2022-07-18 16:51       ` Kalesh Singh
2022-07-18 16:57       ` Marc Zyngier
2022-07-18 16:57         ` Marc Zyngier
2022-07-18 16:57         ` Marc Zyngier
2022-07-15  6:10 ` [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  9:36   ` Marc Zyngier
2022-07-18  9:36     ` Marc Zyngier
2022-07-18  9:36     ` Marc Zyngier
2022-07-18 17:32     ` Kalesh Singh
2022-07-18 17:32       ` Kalesh Singh
2022-07-18 17:32       ` Kalesh Singh
2022-07-18 10:07   ` Fuad Tabba
2022-07-18 10:07     ` Fuad Tabba
2022-07-18 10:07     ` Fuad Tabba
2022-07-18 17:36     ` Kalesh Singh
2022-07-18 17:36       ` Kalesh Singh
2022-07-18 17:36       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 13/18] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 14/18] KVM: arm64: Implement protected nVHE hyp stack unwinder Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 15/18] KVM: arm64: Implement non-protected " Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 16/18] KVM: arm64: Introduce pkvm_dump_backtrace() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` Kalesh Singh [this message]
2022-07-15  6:10   ` [PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 18/18] KVM: arm64: Dump nVHE hypervisor stack on panic Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:55 ` [PATCH v4 00/18] KVM nVHE Hypervisor stack unwinder Fuad Tabba
2022-07-15 13:55   ` Fuad Tabba
2022-07-15 13:55   ` Fuad Tabba
2022-07-15 18:58   ` Kalesh Singh
2022-07-15 18:58     ` Kalesh Singh
2022-07-15 18:58     ` Kalesh Singh
2022-07-16  0:04     ` Kalesh Singh
2022-07-16  0:04       ` Kalesh Singh
2022-07-16  0:04       ` Kalesh Singh
2022-07-19 10:43 ` Marc Zyngier
2022-07-19 10:43   ` Marc Zyngier
2022-07-19 10:43   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220715061027.1612149-18-kaleshsingh@google.com \
    --to=kaleshsingh@google.com \
    --cc=andreyknvl@gmail.com \
    --cc=android-mm@google.com \
    --cc=ast@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=drjones@redhat.com \
    --cc=elver@google.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=madvenka@linux.microsoft.com \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=russell.king@oracle.com \
    --cc=vincenzo.frascino@arm.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.