All of lore.kernel.org
 help / color / mirror / Atom feed
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: kernel-team@android.com, kvm@vger.kernel.org, maz@kernel.org,
	will@kernel.org, linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH v1 05/30] KVM: arm64: add accessors for kvm_cpu_context
Date: Fri, 24 Sep 2021 13:53:34 +0100	[thread overview]
Message-ID: <20210924125359.2587041-6-tabba@google.com> (raw)
In-Reply-To: <20210924125359.2587041-1-tabba@google.com>

Add accessors to get/set elements of struct kvm_cpu_context.

Simplifies future refactoring, and makes the code more consistent.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 53 ++++++++++++++++++++++------
 arch/arm64/include/asm/kvm_host.h    | 18 +++++++++-
 arch/arm64/kvm/hyp/exception.c       | 43 +++++++++++++++++-----
 3 files changed, 94 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 01b9857757f2..ad6e53cef1a4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,19 +127,34 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
 	vcpu->arch.vsesr_el2 = vsesr;
 }
 
+static __always_inline unsigned long *ctxt_pc(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pc;
+}
+
 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
+	return ctxt_pc(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline unsigned long *ctxt_cpsr(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pstate;
 }
 
 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
+	return ctxt_cpsr(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline bool ctxt_mode_is_32bit(const struct kvm_cpu_context *ctxt)
+{
+	return !!(*ctxt_cpsr(ctxt) & PSR_MODE32_BIT);
 }
 
 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
+	return ctxt_mode_is_32bit(&vcpu_ctxt(vcpu));
 }
 
 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
@@ -150,27 +165,45 @@ static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 	return true;
 }
 
+static inline void ctxt_set_thumb(struct kvm_cpu_context *ctxt)
+{
+	*ctxt_cpsr(ctxt) |= PSR_AA32_T_BIT;
+}
+
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 {
-	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
+	ctxt_set_thumb(&vcpu_ctxt(vcpu));
 }
 
 /*
- * vcpu_get_reg and vcpu_set_reg should always be passed a register number
- * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
- * AArch32 with banked registers.
+ * vcpu/ctxt_get_reg and vcpu/ctxt_set_reg should always be passed a register
+ * number coming from a read of ESR_EL2. Otherwise, it may give the wrong result
+ * on AArch32 with banked registers.
  */
+static __always_inline unsigned long
+ctxt_get_reg(const struct kvm_cpu_context *ctxt, u8 reg_num)
+{
+	return (reg_num == 31) ? 0 : ctxt_gp_regs(ctxt)->regs[reg_num];
+}
+
+static __always_inline void
+ctxt_set_reg(struct kvm_cpu_context *ctxt, u8 reg_num, unsigned long val)
+{
+	if (reg_num != 31)
+		ctxt_gp_regs(ctxt)->regs[reg_num] = val;
+}
+
 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
 					 u8 reg_num)
 {
-	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+	return ctxt_get_reg(&vcpu_ctxt(vcpu), reg_num);
+
 }
 
 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
 				unsigned long val)
 {
-	if (reg_num != 31)
-		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+	ctxt_set_reg(&vcpu_ctxt(vcpu), reg_num, val);
 }
 
 /*
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index adb21a7f0891..097e5f533af9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -446,7 +446,23 @@ struct kvm_vcpu_arch {
 #define vcpu_has_ptrauth(vcpu)		false
 #endif
 
-#define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
+#define vcpu_ctxt(vcpu) ((vcpu)->arch.ctxt)
+
+/* VCPU Context accessors (direct) */
+#define ctxt_gp_regs(c)		(&(c)->regs)
+#define ctxt_spsr_abt(c)	(&(c)->spsr_abt)
+#define ctxt_spsr_und(c)	(&(c)->spsr_und)
+#define ctxt_spsr_irq(c)	(&(c)->spsr_irq)
+#define ctxt_spsr_fiq(c)	(&(c)->spsr_fiq)
+#define ctxt_fp_regs(c)		(&(c)->fp_regs)
+
+/* VCPU Context accessors */
+#define vcpu_gp_regs(v)		ctxt_gp_regs(&vcpu_ctxt(v))
+#define vcpu_spsr_abt(v)	ctxt_spsr_abt(&vcpu_ctxt(v))
+#define vcpu_spsr_und(v)	ctxt_spsr_und(&vcpu_ctxt(v))
+#define vcpu_spsr_irq(v)	ctxt_spsr_irq(&vcpu_ctxt(v))
+#define vcpu_spsr_fiq(v)	ctxt_spsr_fiq(&vcpu_ctxt(v))
+#define vcpu_fp_regs(v)		ctxt_fp_regs(&vcpu_ctxt(v))
 
 /*
  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 11541b94b328..643c5844f684 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -18,43 +18,68 @@
 #error Hypervisor code only!
 #endif
 
-static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+static inline u64 __ctxt_read_sys_reg(const struct kvm_cpu_context *vcpu_ctxt, int reg)
 {
 	u64 val;
 
 	if (__vcpu_read_sys_reg_from_cpu(reg, &val))
 		return val;
 
-	return __vcpu_sys_reg(vcpu, reg);
+	return ctxt_sys_reg(vcpu_ctxt, reg);
 }
 
-static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+static inline void __ctxt_write_sys_reg(struct kvm_cpu_context *vcpu_ctxt, u64 val, int reg)
 {
 	if (__vcpu_write_sys_reg_to_cpu(val, reg))
 		return;
 
-	 __vcpu_sys_reg(vcpu, reg) = val;
+	 ctxt_sys_reg(vcpu_ctxt, reg) = val;
 }
 
-static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	write_sysreg_el1(val, SYS_SPSR);
 }
 
-static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_abt(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_abt);
 	else
-		vcpu->arch.ctxt.spsr_abt = val;
+		*ctxt_spsr_abt(vcpu_ctxt) = val;
 }
 
-static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_und(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_und);
 	else
-		vcpu->arch.ctxt.spsr_und = val;
+		*ctxt_spsr_und(vcpu_ctxt) = val;
+}
+
+static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+{
+	return __ctxt_read_sys_reg(&vcpu_ctxt(vcpu), reg);
+}
+
+static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+{
+	__ctxt_write_sys_reg(&vcpu_ctxt(vcpu), val, reg);
+}
+
+static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_abt(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_und(&vcpu_ctxt(vcpu), val);
 }
 
 /*
-- 
2.33.0.685.g46640cef36-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, will@kernel.org, james.morse@arm.com,
	 alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com,  christoffer.dall@arm.com,
	drjones@redhat.com, qperret@google.com,  kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,  kernel-team@android.com,
	tabba@google.com
Subject: [RFC PATCH v1 05/30] KVM: arm64: add accessors for kvm_cpu_context
Date: Fri, 24 Sep 2021 13:53:34 +0100	[thread overview]
Message-ID: <20210924125359.2587041-6-tabba@google.com> (raw)
In-Reply-To: <20210924125359.2587041-1-tabba@google.com>

Add accessors to get/set elements of struct kvm_cpu_context.

Simplifies future refactoring, and makes the code more consistent.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 53 ++++++++++++++++++++++------
 arch/arm64/include/asm/kvm_host.h    | 18 +++++++++-
 arch/arm64/kvm/hyp/exception.c       | 43 +++++++++++++++++-----
 3 files changed, 94 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 01b9857757f2..ad6e53cef1a4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,19 +127,34 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
 	vcpu->arch.vsesr_el2 = vsesr;
 }
 
+static __always_inline unsigned long *ctxt_pc(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pc;
+}
+
 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
+	return ctxt_pc(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline unsigned long *ctxt_cpsr(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pstate;
 }
 
 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
+	return ctxt_cpsr(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline bool ctxt_mode_is_32bit(const struct kvm_cpu_context *ctxt)
+{
+	return !!(*ctxt_cpsr(ctxt) & PSR_MODE32_BIT);
 }
 
 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
+	return ctxt_mode_is_32bit(&vcpu_ctxt(vcpu));
 }
 
 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
@@ -150,27 +165,45 @@ static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 	return true;
 }
 
+static inline void ctxt_set_thumb(struct kvm_cpu_context *ctxt)
+{
+	*ctxt_cpsr(ctxt) |= PSR_AA32_T_BIT;
+}
+
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 {
-	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
+	ctxt_set_thumb(&vcpu_ctxt(vcpu));
 }
 
 /*
- * vcpu_get_reg and vcpu_set_reg should always be passed a register number
- * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
- * AArch32 with banked registers.
+ * vcpu/ctxt_get_reg and vcpu/ctxt_set_reg should always be passed a register
+ * number coming from a read of ESR_EL2. Otherwise, it may give the wrong result
+ * on AArch32 with banked registers.
  */
+static __always_inline unsigned long
+ctxt_get_reg(const struct kvm_cpu_context *ctxt, u8 reg_num)
+{
+	return (reg_num == 31) ? 0 : ctxt_gp_regs(ctxt)->regs[reg_num];
+}
+
+static __always_inline void
+ctxt_set_reg(struct kvm_cpu_context *ctxt, u8 reg_num, unsigned long val)
+{
+	if (reg_num != 31)
+		ctxt_gp_regs(ctxt)->regs[reg_num] = val;
+}
+
 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
 					 u8 reg_num)
 {
-	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+	return ctxt_get_reg(&vcpu_ctxt(vcpu), reg_num);
+
 }
 
 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
 				unsigned long val)
 {
-	if (reg_num != 31)
-		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+	ctxt_set_reg(&vcpu_ctxt(vcpu), reg_num, val);
 }
 
 /*
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index adb21a7f0891..097e5f533af9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -446,7 +446,23 @@ struct kvm_vcpu_arch {
 #define vcpu_has_ptrauth(vcpu)		false
 #endif
 
-#define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
+#define vcpu_ctxt(vcpu) ((vcpu)->arch.ctxt)
+
+/* VCPU Context accessors (direct) */
+#define ctxt_gp_regs(c)		(&(c)->regs)
+#define ctxt_spsr_abt(c)	(&(c)->spsr_abt)
+#define ctxt_spsr_und(c)	(&(c)->spsr_und)
+#define ctxt_spsr_irq(c)	(&(c)->spsr_irq)
+#define ctxt_spsr_fiq(c)	(&(c)->spsr_fiq)
+#define ctxt_fp_regs(c)		(&(c)->fp_regs)
+
+/* VCPU Context accessors */
+#define vcpu_gp_regs(v)		ctxt_gp_regs(&vcpu_ctxt(v))
+#define vcpu_spsr_abt(v)	ctxt_spsr_abt(&vcpu_ctxt(v))
+#define vcpu_spsr_und(v)	ctxt_spsr_und(&vcpu_ctxt(v))
+#define vcpu_spsr_irq(v)	ctxt_spsr_irq(&vcpu_ctxt(v))
+#define vcpu_spsr_fiq(v)	ctxt_spsr_fiq(&vcpu_ctxt(v))
+#define vcpu_fp_regs(v)		ctxt_fp_regs(&vcpu_ctxt(v))
 
 /*
  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 11541b94b328..643c5844f684 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -18,43 +18,68 @@
 #error Hypervisor code only!
 #endif
 
-static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+static inline u64 __ctxt_read_sys_reg(const struct kvm_cpu_context *vcpu_ctxt, int reg)
 {
 	u64 val;
 
 	if (__vcpu_read_sys_reg_from_cpu(reg, &val))
 		return val;
 
-	return __vcpu_sys_reg(vcpu, reg);
+	return ctxt_sys_reg(vcpu_ctxt, reg);
 }
 
-static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+static inline void __ctxt_write_sys_reg(struct kvm_cpu_context *vcpu_ctxt, u64 val, int reg)
 {
 	if (__vcpu_write_sys_reg_to_cpu(val, reg))
 		return;
 
-	 __vcpu_sys_reg(vcpu, reg) = val;
+	 ctxt_sys_reg(vcpu_ctxt, reg) = val;
 }
 
-static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	write_sysreg_el1(val, SYS_SPSR);
 }
 
-static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_abt(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_abt);
 	else
-		vcpu->arch.ctxt.spsr_abt = val;
+		*ctxt_spsr_abt(vcpu_ctxt) = val;
 }
 
-static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_und(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_und);
 	else
-		vcpu->arch.ctxt.spsr_und = val;
+		*ctxt_spsr_und(vcpu_ctxt) = val;
+}
+
+static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+{
+	return __ctxt_read_sys_reg(&vcpu_ctxt(vcpu), reg);
+}
+
+static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+{
+	__ctxt_write_sys_reg(&vcpu_ctxt(vcpu), val, reg);
+}
+
+static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_abt(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_und(&vcpu_ctxt(vcpu), val);
 }
 
 /*
-- 
2.33.0.685.g46640cef36-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Fuad Tabba <tabba@google.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, will@kernel.org, james.morse@arm.com,
	alexandru.elisei@arm.com, suzuki.poulose@arm.com,
	mark.rutland@arm.com, christoffer.dall@arm.com,
	drjones@redhat.com, qperret@google.com, kvm@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, kernel-team@android.com,
	tabba@google.com
Subject: [RFC PATCH v1 05/30] KVM: arm64: add accessors for kvm_cpu_context
Date: Fri, 24 Sep 2021 13:53:34 +0100	[thread overview]
Message-ID: <20210924125359.2587041-6-tabba@google.com> (raw)
In-Reply-To: <20210924125359.2587041-1-tabba@google.com>

Add accessors to get/set elements of struct kvm_cpu_context.

Simplifies future refactoring, and makes the code more consistent.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 53 ++++++++++++++++++++++------
 arch/arm64/include/asm/kvm_host.h    | 18 +++++++++-
 arch/arm64/kvm/hyp/exception.c       | 43 +++++++++++++++++-----
 3 files changed, 94 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 01b9857757f2..ad6e53cef1a4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,19 +127,34 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
 	vcpu->arch.vsesr_el2 = vsesr;
 }
 
+static __always_inline unsigned long *ctxt_pc(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pc;
+}
+
 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
+	return ctxt_pc(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline unsigned long *ctxt_cpsr(const struct kvm_cpu_context *ctxt)
+{
+	return (unsigned long *)&ctxt_gp_regs(ctxt)->pstate;
 }
 
 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 {
-	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
+	return ctxt_cpsr(&vcpu_ctxt(vcpu));
+}
+
+static __always_inline bool ctxt_mode_is_32bit(const struct kvm_cpu_context *ctxt)
+{
+	return !!(*ctxt_cpsr(ctxt) & PSR_MODE32_BIT);
 }
 
 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 {
-	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
+	return ctxt_mode_is_32bit(&vcpu_ctxt(vcpu));
 }
 
 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
@@ -150,27 +165,45 @@ static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 	return true;
 }
 
+static inline void ctxt_set_thumb(struct kvm_cpu_context *ctxt)
+{
+	*ctxt_cpsr(ctxt) |= PSR_AA32_T_BIT;
+}
+
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 {
-	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
+	ctxt_set_thumb(&vcpu_ctxt(vcpu));
 }
 
 /*
- * vcpu_get_reg and vcpu_set_reg should always be passed a register number
- * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
- * AArch32 with banked registers.
+ * vcpu/ctxt_get_reg and vcpu/ctxt_set_reg should always be passed a register
+ * number coming from a read of ESR_EL2. Otherwise, it may give the wrong result
+ * on AArch32 with banked registers.
  */
+static __always_inline unsigned long
+ctxt_get_reg(const struct kvm_cpu_context *ctxt, u8 reg_num)
+{
+	return (reg_num == 31) ? 0 : ctxt_gp_regs(ctxt)->regs[reg_num];
+}
+
+static __always_inline void
+ctxt_set_reg(struct kvm_cpu_context *ctxt, u8 reg_num, unsigned long val)
+{
+	if (reg_num != 31)
+		ctxt_gp_regs(ctxt)->regs[reg_num] = val;
+}
+
 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
 					 u8 reg_num)
 {
-	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+	return ctxt_get_reg(&vcpu_ctxt(vcpu), reg_num);
+
 }
 
 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
 				unsigned long val)
 {
-	if (reg_num != 31)
-		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+	ctxt_set_reg(&vcpu_ctxt(vcpu), reg_num, val);
 }
 
 /*
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index adb21a7f0891..097e5f533af9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -446,7 +446,23 @@ struct kvm_vcpu_arch {
 #define vcpu_has_ptrauth(vcpu)		false
 #endif
 
-#define vcpu_gp_regs(v)		(&(v)->arch.ctxt.regs)
+#define vcpu_ctxt(vcpu) ((vcpu)->arch.ctxt)
+
+/* VCPU Context accessors (direct) */
+#define ctxt_gp_regs(c)		(&(c)->regs)
+#define ctxt_spsr_abt(c)	(&(c)->spsr_abt)
+#define ctxt_spsr_und(c)	(&(c)->spsr_und)
+#define ctxt_spsr_irq(c)	(&(c)->spsr_irq)
+#define ctxt_spsr_fiq(c)	(&(c)->spsr_fiq)
+#define ctxt_fp_regs(c)		(&(c)->fp_regs)
+
+/* VCPU Context accessors */
+#define vcpu_gp_regs(v)		ctxt_gp_regs(&vcpu_ctxt(v))
+#define vcpu_spsr_abt(v)	ctxt_spsr_abt(&vcpu_ctxt(v))
+#define vcpu_spsr_und(v)	ctxt_spsr_und(&vcpu_ctxt(v))
+#define vcpu_spsr_irq(v)	ctxt_spsr_irq(&vcpu_ctxt(v))
+#define vcpu_spsr_fiq(v)	ctxt_spsr_fiq(&vcpu_ctxt(v))
+#define vcpu_fp_regs(v)		ctxt_fp_regs(&vcpu_ctxt(v))
 
 /*
  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 11541b94b328..643c5844f684 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -18,43 +18,68 @@
 #error Hypervisor code only!
 #endif
 
-static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+static inline u64 __ctxt_read_sys_reg(const struct kvm_cpu_context *vcpu_ctxt, int reg)
 {
 	u64 val;
 
 	if (__vcpu_read_sys_reg_from_cpu(reg, &val))
 		return val;
 
-	return __vcpu_sys_reg(vcpu, reg);
+	return ctxt_sys_reg(vcpu_ctxt, reg);
 }
 
-static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+static inline void __ctxt_write_sys_reg(struct kvm_cpu_context *vcpu_ctxt, u64 val, int reg)
 {
 	if (__vcpu_write_sys_reg_to_cpu(val, reg))
 		return;
 
-	 __vcpu_sys_reg(vcpu, reg) = val;
+	 ctxt_sys_reg(vcpu_ctxt, reg) = val;
 }
 
-static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	write_sysreg_el1(val, SYS_SPSR);
 }
 
-static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_abt(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_abt);
 	else
-		vcpu->arch.ctxt.spsr_abt = val;
+		*ctxt_spsr_abt(vcpu_ctxt) = val;
 }
 
-static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+static void __ctxt_write_spsr_und(struct kvm_cpu_context *vcpu_ctxt, u64 val)
 {
 	if (has_vhe())
 		write_sysreg(val, spsr_und);
 	else
-		vcpu->arch.ctxt.spsr_und = val;
+		*ctxt_spsr_und(vcpu_ctxt) = val;
+}
+
+static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+{
+	return __ctxt_read_sys_reg(&vcpu_ctxt(vcpu), reg);
+}
+
+static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+{
+	__ctxt_write_sys_reg(&vcpu_ctxt(vcpu), val, reg);
+}
+
+static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_abt(&vcpu_ctxt(vcpu), val);
+}
+
+static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
+{
+	__ctxt_write_spsr_und(&vcpu_ctxt(vcpu), val);
 }
 
 /*
-- 
2.33.0.685.g46640cef36-goog


  parent reply	other threads:[~2021-09-24 12:54 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-24 12:53 [RFC PATCH v1 00/30] Reduce scope of vcpu state at hyp by refactoring out state hyp needs Fuad Tabba
2021-09-24 12:53 ` Fuad Tabba
2021-09-24 12:53 ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 01/30] KVM: arm64: placeholder to check if VM is protected Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-27 15:50   ` Quentin Perret
2021-09-27 15:50     ` Quentin Perret
2021-09-27 15:50     ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 02/30] [DONOTMERGE] Temporarily disable unused variable warning Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 03/30] [DONOTMERGE] Coccinelle scripts for refactoring Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 04/30] KVM: arm64: remove unused parameters and asm offsets Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` Fuad Tabba [this message]
2021-09-24 12:53   ` [RFC PATCH v1 05/30] KVM: arm64: add accessors for kvm_cpu_context Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-27 15:57   ` Quentin Perret
2021-09-27 15:57     ` Quentin Perret
2021-09-27 15:57     ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 06/30] KVM: arm64: COCCI: use_ctxt_access.cocci: use kvm_cpu_context accessors Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 07/30] KVM: arm64: COCCI: add_ctxt.cocci use_ctxt.cocci: reduce scope of functions to kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 08/30] KVM: arm64: add hypervisor state accessors Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 09/30] KVM: arm64: COCCI: vcpu_hyp_accessors.cocci: use accessors for hypervisor state vcpu variables Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 10/30] KVM: arm64: Add accessors for hypervisor state in kvm_vcpu_arch Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-27 16:10   ` Quentin Perret
2021-09-27 16:10     ` Quentin Perret
2021-09-27 16:10     ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 11/30] KVM: arm64: create and use a new vcpu_hyp_state struct Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-27 16:32   ` Quentin Perret
2021-09-27 16:32     ` Quentin Perret
2021-09-27 16:32     ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 12/30] KVM: arm64: COCCI: add_hypstate.cocci use_hypstate.cocci: Reduce scope of functions to hyp_state Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-27 16:40   ` Quentin Perret
2021-09-27 16:40     ` Quentin Perret
2021-09-27 16:40     ` Quentin Perret
2021-09-24 12:53 ` [RFC PATCH v1 13/30] KVM: arm64: change function parameters to use kvm_cpu_ctxt and hyp_state Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 14/30] KVM: arm64: reduce scope of vgic v2 Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 15/30] KVM: arm64: COCCI: vgic3_cpu.cocci: reduce scope of vgic v3 Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 16/30] KVM: arm64: reduce scope of vgic_v3 access parameters Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 17/30] KVM: arm64: access __hyp_running_vcpu via accessors only Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 18/30] KVM: arm64: reduce scope of __guest_exit to only depend on kvm_cpu_context Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 19/30] KVM: arm64: change calls of get_loaded_vcpu to get_loaded_vcpu_ctxt Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 20/30] KVM: arm64: add __hyp_running_ctxt and __hyp_running_hyps Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 21/30] KVM: arm64: transition code to " Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 22/30] KVM: arm64: reduce scope of __guest_enter to depend only on kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 23/30] KVM: arm64: COCCI: remove_unused.cocci: remove unused ctxt and hypstate variables Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 24/30] KVM: arm64: remove unused functions Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 25/30] KVM: arm64: separate kvm_run() for protected VMs Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 26/30] KVM: arm64: pVM activate_traps to use vcpu_ctxt and vcpu_hyp_state Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 27/30] KVM: arm64: remove unsupported pVM features Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 28/30] KVM: arm64: reduce scope of pVM fixup_guest_exit to hyp_state and kvm_cpu_ctxt Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 29/30] [DONOTMERGE] Remove Coccinelle scripts added for refactoring Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53 ` [RFC PATCH v1 30/30] [DONOTMERGE] Re-enable warnings Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba
2021-09-24 12:53   ` Fuad Tabba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210924125359.2587041-6-tabba@google.com \
    --to=tabba@google.com \
    --cc=kernel-team@android.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.