All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	madvenka@linux.microsoft.com
Cc: will@kernel.org, qperret@google.com, tabba@google.com,
	kaleshsingh@google.com, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	catalin.marinas@arm.com, andreyknvl@gmail.com,
	russell.king@oracle.com, vincenzo.frascino@arm.com,
	mhiramat@kernel.org, ast@kernel.org, drjones@redhat.com,
	wangkefeng.wang@huawei.com, elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org, oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	android-mm@google.com, kernel-team@android.com
Subject: [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace
Date: Thu, 14 Jul 2022 23:10:21 -0700	[thread overview]
Message-ID: <20220715061027.1612149-13-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In protected nVHE mode, the host cannot access private owned hypervisor
memory. Also the hypervisor aims to remains simple to reduce the attack
surface and does not provide any printk support.

For the above reasons, the approach taken to provide hypervisor stacktraces
in protected mode is:
   1) Unwind and save the hyp stack addresses in EL2 to a shared buffer
      with the host (done in this patch).
   2) Delegate the dumping and symbolization of the addresses to the
      host in EL1 (later patch in the series).

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 18 ++++++
 arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 70 ++++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 36cf7858ddd8..456a6ae08433 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -21,6 +21,22 @@
 
 #include <asm/stacktrace/common.h>
 
+/**
+ * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ */
+static __always_inline void kvm_nvhe_unwind_init(struct unwind_state *state,
+						 unsigned long fp,
+						 unsigned long pc)
+{
+	unwind_init_common(state, NULL);
+
+	state->fp = fp;
+	state->pc = pc;
+}
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
@@ -33,6 +49,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  */
 #ifdef __KVM_NVHE_HYPERVISOR__
 
+extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
index 96c8b93320eb..832a536e440f 100644
--- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
+++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
@@ -11,4 +11,74 @@ DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
 
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
+
+/**
+ * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
+ *
+ * @arg    : the position of the entry in the stacktrace buffer
+ * @where  : the program counter corresponding to the stack frame
+ *
+ * Save the return address of a stack frame to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
+{
+	unsigned long **stacktrace_pos = (unsigned long **)arg;
+	unsigned long stacktrace_start, stacktrace_end;
+
+	stacktrace_start = (unsigned long)this_cpu_ptr(pkvm_stacktrace);
+	stacktrace_end = stacktrace_start + NVHE_STACKTRACE_SIZE - (2 * sizeof(long));
+
+	if ((unsigned long) *stacktrace_pos > stacktrace_end)
+		return false;
+
+	/* Save the entry to the current pos in stacktrace buffer */
+	**stacktrace_pos = where;
+
+	/* A zero entry delimits the end of the stacktrace. */
+	*(*stacktrace_pos + 1) = 0UL;
+
+	/* Increment the current pos */
+	++*stacktrace_pos;
+
+	return true;
+}
+
+/**
+ * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the unwinded stack addresses to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+	void *stacktrace_start = (void *)this_cpu_ptr(pkvm_stacktrace);
+	struct unwind_state state;
+
+	kvm_nvhe_unwind_init(&state, fp, pc);
+
+	unwind(&state, pkvm_save_backtrace_entry, &stacktrace_start);
+}
+#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+}
 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
+
+/**
+ * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Saves the information needed by the host to dump the nVHE hypervisor
+ * backtrace.
+ */
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+	if (is_protected_kvm_enabled())
+		pkvm_save_backtrace(fp, pc);
+}
-- 
2.37.0.170.g444d1eabd0-goog


WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com
Cc: wangkefeng.wang@huawei.com, elver@google.com,
	catalin.marinas@arm.com, ast@kernel.org,
	vincenzo.frascino@arm.com, will@kernel.org,
	kvmarm@lists.cs.columbia.edu, android-mm@google.com,
	andreyknvl@gmail.com, kernel-team@android.com,
	drjones@redhat.com, linux-arm-kernel@lists.infradead.org,
	russell.king@oracle.com, linux-kernel@vger.kernel.org,
	mhiramat@kernel.org
Subject: [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace
Date: Thu, 14 Jul 2022 23:10:21 -0700	[thread overview]
Message-ID: <20220715061027.1612149-13-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In protected nVHE mode, the host cannot access private owned hypervisor
memory. Also the hypervisor aims to remains simple to reduce the attack
surface and does not provide any printk support.

For the above reasons, the approach taken to provide hypervisor stacktraces
in protected mode is:
   1) Unwind and save the hyp stack addresses in EL2 to a shared buffer
      with the host (done in this patch).
   2) Delegate the dumping and symbolization of the addresses to the
      host in EL1 (later patch in the series).

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 18 ++++++
 arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 70 ++++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 36cf7858ddd8..456a6ae08433 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -21,6 +21,22 @@
 
 #include <asm/stacktrace/common.h>
 
+/**
+ * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ */
+static __always_inline void kvm_nvhe_unwind_init(struct unwind_state *state,
+						 unsigned long fp,
+						 unsigned long pc)
+{
+	unwind_init_common(state, NULL);
+
+	state->fp = fp;
+	state->pc = pc;
+}
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
@@ -33,6 +49,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  */
 #ifdef __KVM_NVHE_HYPERVISOR__
 
+extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
index 96c8b93320eb..832a536e440f 100644
--- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
+++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
@@ -11,4 +11,74 @@ DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
 
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
+
+/**
+ * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
+ *
+ * @arg    : the position of the entry in the stacktrace buffer
+ * @where  : the program counter corresponding to the stack frame
+ *
+ * Save the return address of a stack frame to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
+{
+	unsigned long **stacktrace_pos = (unsigned long **)arg;
+	unsigned long stacktrace_start, stacktrace_end;
+
+	stacktrace_start = (unsigned long)this_cpu_ptr(pkvm_stacktrace);
+	stacktrace_end = stacktrace_start + NVHE_STACKTRACE_SIZE - (2 * sizeof(long));
+
+	if ((unsigned long) *stacktrace_pos > stacktrace_end)
+		return false;
+
+	/* Save the entry to the current pos in stacktrace buffer */
+	**stacktrace_pos = where;
+
+	/* A zero entry delimits the end of the stacktrace. */
+	*(*stacktrace_pos + 1) = 0UL;
+
+	/* Increment the current pos */
+	++*stacktrace_pos;
+
+	return true;
+}
+
+/**
+ * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the unwinded stack addresses to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+	void *stacktrace_start = (void *)this_cpu_ptr(pkvm_stacktrace);
+	struct unwind_state state;
+
+	kvm_nvhe_unwind_init(&state, fp, pc);
+
+	unwind(&state, pkvm_save_backtrace_entry, &stacktrace_start);
+}
+#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+}
 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
+
+/**
+ * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Saves the information needed by the host to dump the nVHE hypervisor
+ * backtrace.
+ */
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+	if (is_protected_kvm_enabled())
+		pkvm_save_backtrace(fp, pc);
+}
-- 
2.37.0.170.g444d1eabd0-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com
Cc: will@kernel.org, qperret@google.com, tabba@google.com,
	 kaleshsingh@google.com, james.morse@arm.com,
	alexandru.elisei@arm.com,  suzuki.poulose@arm.com,
	catalin.marinas@arm.com, andreyknvl@gmail.com,
	 russell.king@oracle.com, vincenzo.frascino@arm.com,
	mhiramat@kernel.org,  ast@kernel.org, drjones@redhat.com,
	wangkefeng.wang@huawei.com,  elver@google.com, keirf@google.com,
	yuzenghui@huawei.com, ardb@kernel.org,  oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	 kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	 android-mm@google.com, kernel-team@android.com
Subject: [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace
Date: Thu, 14 Jul 2022 23:10:21 -0700	[thread overview]
Message-ID: <20220715061027.1612149-13-kaleshsingh@google.com> (raw)
In-Reply-To: <20220715061027.1612149-1-kaleshsingh@google.com>

In protected nVHE mode, the host cannot access private owned hypervisor
memory. Also the hypervisor aims to remains simple to reduce the attack
surface and does not provide any printk support.

For the above reasons, the approach taken to provide hypervisor stacktraces
in protected mode is:
   1) Unwind and save the hyp stack addresses in EL2 to a shared buffer
      with the host (done in this patch).
   2) Delegate the dumping and symbolization of the addresses to the
      host in EL1 (later patch in the series).

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
---
 arch/arm64/include/asm/stacktrace/nvhe.h | 18 ++++++
 arch/arm64/kvm/hyp/nvhe/stacktrace.c     | 70 ++++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 36cf7858ddd8..456a6ae08433 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -21,6 +21,22 @@
 
 #include <asm/stacktrace/common.h>
 
+/**
+ * kvm_nvhe_unwind_init - Start an unwind from the given nVHE HYP fp and pc
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ */
+static __always_inline void kvm_nvhe_unwind_init(struct unwind_state *state,
+						 unsigned long fp,
+						 unsigned long pc)
+{
+	unwind_init_common(state, NULL);
+
+	state->fp = fp;
+	state->pc = pc;
+}
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
@@ -33,6 +49,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  */
 #ifdef __KVM_NVHE_HYPERVISOR__
 
+extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
index 96c8b93320eb..832a536e440f 100644
--- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c
+++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
@@ -11,4 +11,74 @@ DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
 
 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
+
+/**
+ * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
+ *
+ * @arg    : the position of the entry in the stacktrace buffer
+ * @where  : the program counter corresponding to the stack frame
+ *
+ * Save the return address of a stack frame to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
+{
+	unsigned long **stacktrace_pos = (unsigned long **)arg;
+	unsigned long stacktrace_start, stacktrace_end;
+
+	stacktrace_start = (unsigned long)this_cpu_ptr(pkvm_stacktrace);
+	stacktrace_end = stacktrace_start + NVHE_STACKTRACE_SIZE - (2 * sizeof(long));
+
+	if ((unsigned long) *stacktrace_pos > stacktrace_end)
+		return false;
+
+	/* Save the entry to the current pos in stacktrace buffer */
+	**stacktrace_pos = where;
+
+	/* A zero entry delimits the end of the stacktrace. */
+	*(*stacktrace_pos + 1) = 0UL;
+
+	/* Increment the current pos */
+	++*stacktrace_pos;
+
+	return true;
+}
+
+/**
+ * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the unwinded stack addresses to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+	void *stacktrace_start = (void *)this_cpu_ptr(pkvm_stacktrace);
+	struct unwind_state state;
+
+	kvm_nvhe_unwind_init(&state, fp, pc);
+
+	unwind(&state, pkvm_save_backtrace_entry, &stacktrace_start);
+}
+#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+}
 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
+
+/**
+ * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Saves the information needed by the host to dump the nVHE hypervisor
+ * backtrace.
+ */
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+	if (is_protected_kvm_enabled())
+		pkvm_save_backtrace(fp, pc);
+}
-- 
2.37.0.170.g444d1eabd0-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-07-15  6:12 UTC|newest]

Thread overview: 162+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-15  6:10 [PATCH v4 00/18] KVM nVHE Hypervisor stack unwinder Kalesh Singh
2022-07-15  6:10 ` Kalesh Singh
2022-07-15  6:10 ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 01/18] arm64: stacktrace: Add shared header for common stack unwinding code Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 12:37   ` Mark Brown
2022-07-15 12:37     ` Mark Brown
2022-07-15 12:37     ` Mark Brown
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-18 12:52   ` Russell King (Oracle)
2022-07-18 12:52     ` Russell King (Oracle)
2022-07-18 12:52     ` Russell King (Oracle)
2022-07-18 15:26     ` Kalesh Singh
2022-07-18 15:26       ` Kalesh Singh
2022-07-18 15:26       ` Kalesh Singh
2022-07-18 16:00       ` Russell King (Oracle)
2022-07-18 16:00         ` Russell King (Oracle)
2022-07-18 16:00         ` Russell King (Oracle)
2022-07-15  6:10 ` [PATCH v4 02/18] arm64: stacktrace: Factor out on_accessible_stack_common() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 16:28   ` Mark Brown
2022-07-15 16:28     ` Mark Brown
2022-07-15 16:28     ` Mark Brown
2022-07-15  6:10 ` [PATCH v4 03/18] arm64: stacktrace: Factor out unwind_next_common() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 16:29   ` Mark Brown
2022-07-15 16:29     ` Mark Brown
2022-07-15 16:29     ` Mark Brown
2022-07-15  6:10 ` [PATCH v4 04/18] arm64: stacktrace: Handle frame pointer from different address spaces Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:56   ` Fuad Tabba
2022-07-15 13:56     ` Fuad Tabba
2022-07-15 13:56     ` Fuad Tabba
2022-07-18 17:40     ` Kalesh Singh
2022-07-18 17:40       ` Kalesh Singh
2022-07-18 17:40       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 05/18] arm64: stacktrace: Factor out common unwind() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:58   ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15 13:58     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 06/18] arm64: stacktrace: Add description of stacktrace/common.h Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:59   ` Fuad Tabba
2022-07-15 13:59     ` Fuad Tabba
2022-07-15 13:59     ` Fuad Tabba
2022-07-17  9:57   ` Marc Zyngier
2022-07-17  9:57     ` Marc Zyngier
2022-07-17  9:57     ` Marc Zyngier
2022-07-18 16:53     ` Kalesh Singh
2022-07-18 16:53       ` Kalesh Singh
2022-07-18 16:53       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 07/18] KVM: arm64: On stack overflow switch to hyp overflow_stack Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  9:46   ` Fuad Tabba
2022-07-18  9:46     ` Fuad Tabba
2022-07-18  9:46     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 08/18] KVM: arm64: Add PROTECTED_NVHE_STACKTRACE Kconfig Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  6:55   ` Marc Zyngier
2022-07-18  6:55     ` Marc Zyngier
2022-07-18  6:55     ` Marc Zyngier
2022-07-18 17:03     ` Kalesh Singh
2022-07-18 17:03       ` Kalesh Singh
2022-07-18 17:03       ` Kalesh Singh
2022-07-19 10:35       ` Marc Zyngier
2022-07-19 10:35         ` Marc Zyngier
2022-07-19 10:35         ` Marc Zyngier
2022-07-19 18:23         ` Kalesh Singh
2022-07-19 18:23           ` Kalesh Singh
2022-07-19 18:23           ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 09/18] KVM: arm64: Allocate shared pKVM hyp stacktrace buffers Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:13   ` Marc Zyngier
2022-07-18  7:13     ` Marc Zyngier
2022-07-18  7:13     ` Marc Zyngier
2022-07-18 17:27     ` Kalesh Singh
2022-07-18 17:27       ` Kalesh Singh
2022-07-18 17:27       ` Kalesh Singh
2022-07-18 10:00   ` Fuad Tabba
2022-07-18 10:00     ` Fuad Tabba
2022-07-18 10:00     ` Fuad Tabba
2022-07-15  6:10 ` [PATCH v4 10/18] KVM: arm64: Stub implementation of pKVM HYP stack unwinder Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:20   ` Marc Zyngier
2022-07-18  7:20     ` Marc Zyngier
2022-07-18  7:20     ` Marc Zyngier
2022-07-15  6:10 ` [PATCH v4 11/18] KVM: arm64: Stub implementation of non-protected nVHE " Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  7:30   ` Marc Zyngier
2022-07-18  7:30     ` Marc Zyngier
2022-07-18  7:30     ` Marc Zyngier
2022-07-18 16:51     ` Kalesh Singh
2022-07-18 16:51       ` Kalesh Singh
2022-07-18 16:51       ` Kalesh Singh
2022-07-18 16:57       ` Marc Zyngier
2022-07-18 16:57         ` Marc Zyngier
2022-07-18 16:57         ` Marc Zyngier
2022-07-15  6:10 ` Kalesh Singh [this message]
2022-07-15  6:10   ` [PATCH v4 12/18] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-18  9:36   ` Marc Zyngier
2022-07-18  9:36     ` Marc Zyngier
2022-07-18  9:36     ` Marc Zyngier
2022-07-18 17:32     ` Kalesh Singh
2022-07-18 17:32       ` Kalesh Singh
2022-07-18 17:32       ` Kalesh Singh
2022-07-18 10:07   ` Fuad Tabba
2022-07-18 10:07     ` Fuad Tabba
2022-07-18 10:07     ` Fuad Tabba
2022-07-18 17:36     ` Kalesh Singh
2022-07-18 17:36       ` Kalesh Singh
2022-07-18 17:36       ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 13/18] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 14/18] KVM: arm64: Implement protected nVHE hyp stack unwinder Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 15/18] KVM: arm64: Implement non-protected " Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 16/18] KVM: arm64: Introduce pkvm_dump_backtrace() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace() Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10 ` [PATCH v4 18/18] KVM: arm64: Dump nVHE hypervisor stack on panic Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15  6:10   ` Kalesh Singh
2022-07-15 13:55 ` [PATCH v4 00/18] KVM nVHE Hypervisor stack unwinder Fuad Tabba
2022-07-15 13:55   ` Fuad Tabba
2022-07-15 13:55   ` Fuad Tabba
2022-07-15 18:58   ` Kalesh Singh
2022-07-15 18:58     ` Kalesh Singh
2022-07-15 18:58     ` Kalesh Singh
2022-07-16  0:04     ` Kalesh Singh
2022-07-16  0:04       ` Kalesh Singh
2022-07-16  0:04       ` Kalesh Singh
2022-07-19 10:43 ` Marc Zyngier
2022-07-19 10:43   ` Marc Zyngier
2022-07-19 10:43   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220715061027.1612149-13-kaleshsingh@google.com \
    --to=kaleshsingh@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=andreyknvl@gmail.com \
    --cc=android-mm@google.com \
    --cc=ardb@kernel.org \
    --cc=ast@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=drjones@redhat.com \
    --cc=elver@google.com \
    --cc=james.morse@arm.com \
    --cc=keirf@google.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=madvenka@linux.microsoft.com \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=oupton@google.com \
    --cc=qperret@google.com \
    --cc=russell.king@oracle.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=vincenzo.frascino@arm.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.