All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com, tabba@google.com,
	oliver.upton@linux.dev
Cc: wangkefeng.wang@huawei.com, catalin.marinas@arm.com,
	ast@kernel.org, vincenzo.frascino@arm.com, will@kernel.org,
	kvmarm@lists.cs.columbia.edu, android-mm@google.com,
	kernel-team@android.com, elver@google.com,
	linux-arm-kernel@lists.infradead.org, andreyknvl@gmail.com,
	linux-kernel@vger.kernel.org, mhiramat@kernel.org
Subject: [PATCH v6 10/17] KVM: arm64: Implement non-protected nVHE hyp stack unwinder
Date: Tue, 26 Jul 2022 00:37:43 -0700	[thread overview]
Message-ID: <20220726073750.3219117-11-kaleshsingh@google.com> (raw)
In-Reply-To: <20220726073750.3219117-1-kaleshsingh@google.com>

Implements the common framework necessary for unwind() to work
for non-protected nVHE mode:
    - on_accessible_stack()
    - on_overflow_stack()
    - unwind_next()

Non-protected nVHE unwind() is used to unwind and dump the hypervisor
stacktrace by the host in EL1

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
---

Changes in v6:
  - Add Fuad’s Reviewed-by and Tested-by tags

Changes in v5:
  - Use regular comments instead of doc comments, per Fuad

 arch/arm64/include/asm/stacktrace/common.h |  2 +
 arch/arm64/include/asm/stacktrace/nvhe.h   | 76 +++++++++++++++++++++-
 arch/arm64/kvm/arm.c                       |  2 +-
 3 files changed, 77 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 45474b383630..3ebb69ea374a 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -34,6 +34,7 @@ enum stack_type {
 	STACK_TYPE_OVERFLOW,
 	STACK_TYPE_SDEI_NORMAL,
 	STACK_TYPE_SDEI_CRITICAL,
+	STACK_TYPE_HYP,
 	__NR_STACK_TYPES
 };
 
@@ -186,6 +187,7 @@ static inline int unwind_next_common(struct unwind_state *state,
 	 *
 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+	 * HYP -> OVERFLOW
 	 *
 	 * ... but the nesting itself is strict. Once we transition from one
 	 * stack to another, it's never valid to unwind back to that first
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 1192ae0f80c1..21082fd4a0b7 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -16,10 +16,19 @@
 
 #include <asm/stacktrace/common.h>
 
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info);
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
 {
+	if (on_accessible_stack_common(tsk, sp, size, info))
+		return true;
+
+	if (on_hyp_stack(sp, size, info))
+		return true;
+
 	return false;
 }
 
@@ -31,15 +40,78 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  * (by the host in EL1).
  */
 
+DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+
+/*
+ * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
+ *
+ * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
+ * allow for guard pages below the stack. Consequently, the fixed offset address
+ * translation macros won't work here.
+ *
+ * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
+ * stack base.
+ *
+ * Returns true on success and updates @addr to its corresponding kernel VA;
+ * otherwise returns false.
+ */
+static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
+					  enum stack_type type)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	unsigned long hyp_base, kern_base, hyp_offset;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	switch (type) {
+	case STACK_TYPE_HYP:
+		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
+		hyp_base = (unsigned long)stacktrace_info->stack_base;
+		break;
+	case STACK_TYPE_OVERFLOW:
+		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
+		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
+		break;
+	default:
+		return false;
+	}
+
+	hyp_offset = *addr - hyp_base;
+
+	*addr = kern_base + hyp_offset;
+
+	return true;
+}
+
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
 {
-	return false;
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
+	unsigned long high = low + OVERFLOW_STACK_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+}
+
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->stack_base;
+	unsigned long high = low + PAGE_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
 }
 
 static inline int notrace unwind_next(struct unwind_state *state)
 {
-	return 0;
+	struct stack_info info;
+
+	return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va);
 }
 NOKPROBE_SYMBOL(unwind_next);
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a0188144a122..6a64293108c5 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -49,7 +49,7 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
-- 
2.37.1.359.gd136c6c3e2-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	madvenka@linux.microsoft.com, tabba@google.com,
	oliver.upton@linux.dev
Cc: will@kernel.org, qperret@google.com, kaleshsingh@google.com,
	james.morse@arm.com, alexandru.elisei@arm.com,
	suzuki.poulose@arm.com, catalin.marinas@arm.com,
	andreyknvl@gmail.com, vincenzo.frascino@arm.com,
	mhiramat@kernel.org, ast@kernel.org, wangkefeng.wang@huawei.com,
	elver@google.com, keirf@google.com, yuzenghui@huawei.com,
	ardb@kernel.org, oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	android-mm@google.com, kernel-team@android.com
Subject: [PATCH v6 10/17] KVM: arm64: Implement non-protected nVHE hyp stack unwinder
Date: Tue, 26 Jul 2022 00:37:43 -0700	[thread overview]
Message-ID: <20220726073750.3219117-11-kaleshsingh@google.com> (raw)
In-Reply-To: <20220726073750.3219117-1-kaleshsingh@google.com>

Implements the common framework necessary for unwind() to work
for non-protected nVHE mode:
    - on_accessible_stack()
    - on_overflow_stack()
    - unwind_next()

Non-protected nVHE unwind() is used to unwind and dump the hypervisor
stacktrace by the host in EL1

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
---

Changes in v6:
  - Add Fuad’s Reviewed-by and Tested-by tags

Changes in v5:
  - Use regular comments instead of doc comments, per Fuad

 arch/arm64/include/asm/stacktrace/common.h |  2 +
 arch/arm64/include/asm/stacktrace/nvhe.h   | 76 +++++++++++++++++++++-
 arch/arm64/kvm/arm.c                       |  2 +-
 3 files changed, 77 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 45474b383630..3ebb69ea374a 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -34,6 +34,7 @@ enum stack_type {
 	STACK_TYPE_OVERFLOW,
 	STACK_TYPE_SDEI_NORMAL,
 	STACK_TYPE_SDEI_CRITICAL,
+	STACK_TYPE_HYP,
 	__NR_STACK_TYPES
 };
 
@@ -186,6 +187,7 @@ static inline int unwind_next_common(struct unwind_state *state,
 	 *
 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+	 * HYP -> OVERFLOW
 	 *
 	 * ... but the nesting itself is strict. Once we transition from one
 	 * stack to another, it's never valid to unwind back to that first
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 1192ae0f80c1..21082fd4a0b7 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -16,10 +16,19 @@
 
 #include <asm/stacktrace/common.h>
 
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info);
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
 {
+	if (on_accessible_stack_common(tsk, sp, size, info))
+		return true;
+
+	if (on_hyp_stack(sp, size, info))
+		return true;
+
 	return false;
 }
 
@@ -31,15 +40,78 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  * (by the host in EL1).
  */
 
+DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+
+/*
+ * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
+ *
+ * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
+ * allow for guard pages below the stack. Consequently, the fixed offset address
+ * translation macros won't work here.
+ *
+ * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
+ * stack base.
+ *
+ * Returns true on success and updates @addr to its corresponding kernel VA;
+ * otherwise returns false.
+ */
+static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
+					  enum stack_type type)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	unsigned long hyp_base, kern_base, hyp_offset;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	switch (type) {
+	case STACK_TYPE_HYP:
+		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
+		hyp_base = (unsigned long)stacktrace_info->stack_base;
+		break;
+	case STACK_TYPE_OVERFLOW:
+		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
+		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
+		break;
+	default:
+		return false;
+	}
+
+	hyp_offset = *addr - hyp_base;
+
+	*addr = kern_base + hyp_offset;
+
+	return true;
+}
+
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
 {
-	return false;
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
+	unsigned long high = low + OVERFLOW_STACK_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+}
+
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->stack_base;
+	unsigned long high = low + PAGE_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
 }
 
 static inline int notrace unwind_next(struct unwind_state *state)
 {
-	return 0;
+	struct stack_info info;
+
+	return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va);
 }
 NOKPROBE_SYMBOL(unwind_next);
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a0188144a122..6a64293108c5 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -49,7 +49,7 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
-- 
2.37.1.359.gd136c6c3e2-goog


WARNING: multiple messages have this Message-ID (diff)
From: Kalesh Singh <kaleshsingh@google.com>
To: maz@kernel.org, mark.rutland@arm.com, broonie@kernel.org,
	 madvenka@linux.microsoft.com, tabba@google.com,
	oliver.upton@linux.dev
Cc: will@kernel.org, qperret@google.com, kaleshsingh@google.com,
	 james.morse@arm.com, alexandru.elisei@arm.com,
	suzuki.poulose@arm.com,  catalin.marinas@arm.com,
	andreyknvl@gmail.com, vincenzo.frascino@arm.com,
	 mhiramat@kernel.org, ast@kernel.org, wangkefeng.wang@huawei.com,
	 elver@google.com, keirf@google.com, yuzenghui@huawei.com,
	ardb@kernel.org,  oupton@google.com,
	linux-arm-kernel@lists.infradead.org,
	 kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	 android-mm@google.com, kernel-team@android.com
Subject: [PATCH v6 10/17] KVM: arm64: Implement non-protected nVHE hyp stack unwinder
Date: Tue, 26 Jul 2022 00:37:43 -0700	[thread overview]
Message-ID: <20220726073750.3219117-11-kaleshsingh@google.com> (raw)
In-Reply-To: <20220726073750.3219117-1-kaleshsingh@google.com>

Implements the common framework necessary for unwind() to work
for non-protected nVHE mode:
    - on_accessible_stack()
    - on_overflow_stack()
    - unwind_next()

Non-protected nVHE unwind() is used to unwind and dump the hypervisor
stacktrace by the host in EL1

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
---

Changes in v6:
  - Add Fuad’s Reviewed-by and Tested-by tags

Changes in v5:
  - Use regular comments instead of doc comments, per Fuad

 arch/arm64/include/asm/stacktrace/common.h |  2 +
 arch/arm64/include/asm/stacktrace/nvhe.h   | 76 +++++++++++++++++++++-
 arch/arm64/kvm/arm.c                       |  2 +-
 3 files changed, 77 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h
index 45474b383630..3ebb69ea374a 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -34,6 +34,7 @@ enum stack_type {
 	STACK_TYPE_OVERFLOW,
 	STACK_TYPE_SDEI_NORMAL,
 	STACK_TYPE_SDEI_CRITICAL,
+	STACK_TYPE_HYP,
 	__NR_STACK_TYPES
 };
 
@@ -186,6 +187,7 @@ static inline int unwind_next_common(struct unwind_state *state,
 	 *
 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+	 * HYP -> OVERFLOW
 	 *
 	 * ... but the nesting itself is strict. Once we transition from one
 	 * stack to another, it's never valid to unwind back to that first
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 1192ae0f80c1..21082fd4a0b7 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -16,10 +16,19 @@
 
 #include <asm/stacktrace/common.h>
 
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info);
+
 static inline bool on_accessible_stack(const struct task_struct *tsk,
 				       unsigned long sp, unsigned long size,
 				       struct stack_info *info)
 {
+	if (on_accessible_stack_common(tsk, sp, size, info))
+		return true;
+
+	if (on_hyp_stack(sp, size, info))
+		return true;
+
 	return false;
 }
 
@@ -31,15 +40,78 @@ static inline bool on_accessible_stack(const struct task_struct *tsk,
  * (by the host in EL1).
  */
 
+DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+
+/*
+ * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
+ *
+ * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
+ * allow for guard pages below the stack. Consequently, the fixed offset address
+ * translation macros won't work here.
+ *
+ * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
+ * stack base.
+ *
+ * Returns true on success and updates @addr to its corresponding kernel VA;
+ * otherwise returns false.
+ */
+static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
+					  enum stack_type type)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info;
+	unsigned long hyp_base, kern_base, hyp_offset;
+
+	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+	switch (type) {
+	case STACK_TYPE_HYP:
+		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
+		hyp_base = (unsigned long)stacktrace_info->stack_base;
+		break;
+	case STACK_TYPE_OVERFLOW:
+		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
+		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
+		break;
+	default:
+		return false;
+	}
+
+	hyp_offset = *addr - hyp_base;
+
+	*addr = kern_base + hyp_offset;
+
+	return true;
+}
+
 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 				     struct stack_info *info)
 {
-	return false;
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
+	unsigned long high = low + OVERFLOW_STACK_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
+}
+
+static inline bool on_hyp_stack(unsigned long sp, unsigned long size,
+				struct stack_info *info)
+{
+	struct kvm_nvhe_stacktrace_info *stacktrace_info
+				= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+	unsigned long low = (unsigned long)stacktrace_info->stack_base;
+	unsigned long high = low + PAGE_SIZE;
+
+	return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
 }
 
 static inline int notrace unwind_next(struct unwind_state *state)
 {
-	return 0;
+	struct stack_info info;
+
+	return unwind_next_common(state, &info, kvm_nvhe_stack_kern_va);
 }
 NOKPROBE_SYMBOL(unwind_next);
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a0188144a122..6a64293108c5 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -49,7 +49,7 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
-- 
2.37.1.359.gd136c6c3e2-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-07-26  7:38 UTC|newest]

Thread overview: 112+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-26  7:37 [PATCH v6 00/17] KVM nVHE Hypervisor stack unwinder Kalesh Singh
2022-07-26  7:37 ` Kalesh Singh
2022-07-26  7:37 ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 01/17] arm64: stacktrace: Add shared header for common stack unwinding code Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 02/17] arm64: stacktrace: Factor out on_accessible_stack_common() Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26 16:01   ` Marc Zyngier
2022-07-26 16:01     ` Marc Zyngier
2022-07-26 16:01     ` Marc Zyngier
2022-07-26 16:33     ` Kalesh Singh
2022-07-26 16:33       ` Kalesh Singh
2022-07-26 16:33       ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 03/17] arm64: stacktrace: Factor out unwind_next_common() Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 04/17] arm64: stacktrace: Handle frame pointer from different address spaces Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26 14:34   ` Mark Brown
2022-07-26 14:34     ` Mark Brown
2022-07-26 14:34     ` Mark Brown
2022-07-26 15:30     ` Kalesh Singh
2022-07-26 15:30       ` Kalesh Singh
2022-07-26 15:30       ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 05/17] arm64: stacktrace: Factor out common unwind() Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 06/17] arm64: stacktrace: Add description of stacktrace/common.h Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26 14:49   ` Mark Brown
2022-07-26 14:49     ` Mark Brown
2022-07-26 14:49     ` Mark Brown
2022-07-26  7:37 ` [PATCH v6 07/17] KVM: arm64: On stack overflow switch to hyp overflow_stack Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 08/17] KVM: arm64: Stub implementation of non-protected nVHE HYP stack unwinder Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 09/17] KVM: arm64: Prepare non-protected nVHE hypervisor stacktrace Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26 16:26   ` kernel test robot
2022-07-26  7:37 ` Kalesh Singh [this message]
2022-07-26  7:37   ` [PATCH v6 10/17] KVM: arm64: Implement non-protected nVHE hyp stack unwinder Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 11/17] KVM: arm64: Introduce hyp_dump_backtrace() Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 12/17] KVM: arm64: Add PROTECTED_NVHE_STACKTRACE Kconfig Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26 10:00   ` Marc Zyngier
2022-07-26 10:00     ` Marc Zyngier
2022-07-26 10:00     ` Marc Zyngier
2022-07-26 15:33     ` Kalesh Singh
2022-07-26 15:33       ` Kalesh Singh
2022-07-26 15:33       ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 13/17] KVM: arm64: Allocate shared pKVM hyp stacktrace buffers Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 14/17] KVM: arm64: Stub implementation of pKVM HYP stack unwinder Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 15/17] KVM: arm64: Save protected-nVHE (pKVM) hyp stacktrace Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 16/17] KVM: arm64: Implement protected nVHE hyp stack unwinder Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37 ` [PATCH v6 17/17] KVM: arm64: Introduce pkvm_dump_backtrace() Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-26  7:37   ` Kalesh Singh
2022-07-27 14:29   ` [PATCH 0/6] KVM: arm64: nVHE stack unwinder rework Marc Zyngier
2022-07-27 14:29     ` Marc Zyngier
2022-07-27 14:29     ` Marc Zyngier
2022-07-27 14:29     ` [PATCH 1/6] KVM: arm64: Move PROTECTED_NVHE_STACKTRACE around Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29     ` [PATCH 2/6] KVM: arm64: Move nVHE stacktrace unwinding into its own compilation unit Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29     ` [PATCH 3/6] KVM: arm64: Make unwind()/on_accessible_stack() per-unwinder functions Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 17:32       ` Mark Brown
2022-07-27 17:32         ` Mark Brown
2022-07-27 17:32         ` Mark Brown
2022-07-27 14:29     ` [PATCH 4/6] KVM: arm64: Move nVHE-only helpers into kvm/stacktrace.c Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29     ` [PATCH 5/6] KVM: arm64: Don't open code ARRAY_SIZE() Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29     ` [PATCH 6/6] arm64: Update 'unwinder howto' Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 14:29       ` Marc Zyngier
2022-07-27 15:56     ` [PATCH 0/6] KVM: arm64: nVHE stack unwinder rework Kalesh Singh
2022-07-27 15:56       ` Kalesh Singh
2022-07-27 15:56       ` Kalesh Singh
2022-07-27 16:01     ` Oliver Upton
2022-07-27 16:01       ` Oliver Upton
2022-07-27 16:01       ` Oliver Upton
2022-07-27 17:45     ` Marc Zyngier
2022-07-27 17:45       ` Marc Zyngier
2022-07-27 17:45       ` Marc Zyngier
2022-07-27 17:44 ` [PATCH v6 00/17] KVM nVHE Hypervisor stack unwinder Marc Zyngier
2022-07-27 17:44   ` Marc Zyngier
2022-07-27 17:44   ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220726073750.3219117-11-kaleshsingh@google.com \
    --to=kaleshsingh@google.com \
    --cc=andreyknvl@gmail.com \
    --cc=android-mm@google.com \
    --cc=ast@kernel.org \
    --cc=broonie@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=elver@google.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=madvenka@linux.microsoft.com \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=tabba@google.com \
    --cc=vincenzo.frascino@arm.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.