All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suraj Jitindar Singh <surajjs@amazon.com>
To: <jingzhangos@google.com>
Cc: <alexandru.elisei@arm.com>, <james.morse@arm.com>,
	<kvm@vger.kernel.org>, <kvmarm@lists.linux.dev>,
	<linux-arm-kernel@lists.infradead.org>, <maz@kernel.org>,
	<oupton@google.com>, <pbonzini@redhat.com>, <rananta@google.com>,
	<reijiw@google.com>, <suzuki.poulose@arm.com>, <tabba@google.com>,
	<will@kernel.org>, <sjitindarsingh@gmail.com>,
	"Suraj Jitindar Singh" <surajjs@amazon.com>
Subject: [PATCH 2/3] KVM: arm64: Move non per vcpu flag checks out of kvm_arm_update_id_reg()
Date: Fri, 2 Jun 2023 15:14:46 -0700	[thread overview]
Message-ID: <20230602221447.1809849-3-surajjs@amazon.com> (raw)
In-Reply-To: <20230602221447.1809849-1-surajjs@amazon.com>

There are features which are masked in kvm_arm_update_id_reg() which cannot
change throughout the lifecycle of a VM. Thus rather than masking them each
time the register is read, mask them at idreg init time so that the value
in the kvm id_reg correctly reflects the state of support for that feature.

Move masking of AA64PFR0_EL1.GIC and AA64PFR0_EL1.AMU into
read_sanitised_id_aa64pfr0_el1().
Create read_sanitised_id_aa64pfr1_el1() and mask AA64PFR1_EL1.SME.
Create read_sanitised_id_[mmfr4|aa64mmfr2] and mask CCIDX.

Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
---
 arch/arm64/kvm/sys_regs.c | 104 +++++++++++++++++++++++++++++++-------
 1 file changed, 86 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a4e662bd218b..59f8adda47fa 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1355,16 +1355,10 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 	case SYS_ID_AA64PFR0_EL1:
 		if (!vcpu_has_sve(vcpu))
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
-		if (kvm_vgic_global_state.type == VGIC_V3) {
-			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
-			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
-		}
 		break;
 	case SYS_ID_AA64PFR1_EL1:
 		if (!kvm_has_mte(vcpu->kvm))
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
-		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
 		break;
 	case SYS_ID_AA64ISAR1_EL1:
 		if (!vcpu_has_ptrauth(vcpu))
@@ -1377,8 +1371,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 		if (!vcpu_has_ptrauth(vcpu))
 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
 				 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
-		if (!cpus_have_final_cap(ARM64_HAS_WFXT))
-			val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
 		break;
 	case SYS_ID_AA64DFR0_EL1:
 		/* Set PMUver to the required version */
@@ -1391,12 +1383,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
 				  pmuver_to_perfmon(vcpu_pmuver(vcpu)));
 		break;
-	case SYS_ID_AA64MMFR2_EL1:
-		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
-		break;
-	case SYS_ID_MMFR4_EL1:
-		val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
-		break;
 	}
 
 	return val;
@@ -1490,6 +1476,20 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
 	return REG_HIDDEN;
 }
 
+static u64 read_sanitised_id_mmfr4_el1(struct kvm_vcpu *vcpu,
+				       const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* CCIDX is not supported */
+	val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+
+	return val;
+}
+
 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 					  const struct sys_reg_desc *rd)
 {
@@ -1516,6 +1516,25 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 
 	val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
 
+	if (kvm_vgic_global_state.type == VGIC_V3) {
+		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
+		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
+	}
+
+	return val;
+}
+
+static u64 read_sanitised_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+					  const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* SME is not supported */
+	val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
+
 	return val;
 }
 
@@ -1638,6 +1657,34 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 	return pmuver_update(vcpu, rd, val, perfmon_to_pmuver(perfmon), valid_pmu);
 }
 
+static u64 read_sanitised_id_aa64isar2_el1(struct kvm_vcpu *vcpu,
+					   const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	if (!cpus_have_final_cap(ARM64_HAS_WFXT))
+		val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
+
+	return val;
+}
+
+static u64 read_sanitised_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
+					   const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* CCIDX is not supported */
+	val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+
+	return val;
+}
+
 /*
  * cpufeature ID register user accessors
  *
@@ -2033,7 +2080,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	AA32_ID_SANITISED(ID_ISAR3_EL1),
 	AA32_ID_SANITISED(ID_ISAR4_EL1),
 	AA32_ID_SANITISED(ID_ISAR5_EL1),
-	AA32_ID_SANITISED(ID_MMFR4_EL1),
+	{ SYS_DESC(SYS_ID_MMFR4_EL1),
+	  .access     = access_id_reg,
+	  .get_user   = get_id_reg,
+	  .set_user   = set_id_reg,
+	  .visibility = aa32_id_visibility,
+	  .reset      = read_sanitised_id_mmfr4_el1,
+	  .val        = 0, },
 	AA32_ID_SANITISED(ID_ISAR6_EL1),
 
 	/* CRm=3 */
@@ -2054,7 +2107,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	  .set_user = set_id_reg,
 	  .reset = read_sanitised_id_aa64pfr0_el1,
 	  .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
-	ID_SANITISED(ID_AA64PFR1_EL1),
+	{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64pfr1_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(4,2),
 	ID_UNALLOCATED(4,3),
 	ID_SANITISED(ID_AA64ZFR0_EL1),
@@ -2080,7 +2138,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	/* CRm=6 */
 	ID_SANITISED(ID_AA64ISAR0_EL1),
 	ID_SANITISED(ID_AA64ISAR1_EL1),
-	ID_SANITISED(ID_AA64ISAR2_EL1),
+	{ SYS_DESC(SYS_ID_AA64ISAR2_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64isar2_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(6,3),
 	ID_UNALLOCATED(6,4),
 	ID_UNALLOCATED(6,5),
@@ -2090,7 +2153,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	/* CRm=7 */
 	ID_SANITISED(ID_AA64MMFR0_EL1),
 	ID_SANITISED(ID_AA64MMFR1_EL1),
-	ID_SANITISED(ID_AA64MMFR2_EL1),
+	{ SYS_DESC(SYS_ID_AA64MMFR2_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64mmfr2_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(7,3),
 	ID_UNALLOCATED(7,4),
 	ID_UNALLOCATED(7,5),
-- 
2.34.1


WARNING: multiple messages have this Message-ID (diff)
From: Suraj Jitindar Singh <surajjs@amazon.com>
To: <jingzhangos@google.com>
Cc: <alexandru.elisei@arm.com>, <james.morse@arm.com>,
	<kvm@vger.kernel.org>, <kvmarm@lists.linux.dev>,
	<linux-arm-kernel@lists.infradead.org>, <maz@kernel.org>,
	<oupton@google.com>, <pbonzini@redhat.com>, <rananta@google.com>,
	<reijiw@google.com>, <suzuki.poulose@arm.com>, <tabba@google.com>,
	<will@kernel.org>, <sjitindarsingh@gmail.com>,
	"Suraj Jitindar Singh" <surajjs@amazon.com>
Subject: [PATCH 2/3] KVM: arm64: Move non per vcpu flag checks out of kvm_arm_update_id_reg()
Date: Fri, 2 Jun 2023 15:14:46 -0700	[thread overview]
Message-ID: <20230602221447.1809849-3-surajjs@amazon.com> (raw)
In-Reply-To: <20230602221447.1809849-1-surajjs@amazon.com>

There are features which are masked in kvm_arm_update_id_reg() which cannot
change throughout the lifecycle of a VM. Thus rather than masking them each
time the register is read, mask them at idreg init time so that the value
in the kvm id_reg correctly reflects the state of support for that feature.

Move masking of AA64PFR0_EL1.GIC and AA64PFR0_EL1.AMU into
read_sanitised_id_aa64pfr0_el1().
Create read_sanitised_id_aa64pfr1_el1() and mask AA64PFR1_EL1.SME.
Create read_sanitised_id_[mmfr4|aa64mmfr2] and mask CCIDX.

Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
---
 arch/arm64/kvm/sys_regs.c | 104 +++++++++++++++++++++++++++++++-------
 1 file changed, 86 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a4e662bd218b..59f8adda47fa 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1355,16 +1355,10 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 	case SYS_ID_AA64PFR0_EL1:
 		if (!vcpu_has_sve(vcpu))
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
-		if (kvm_vgic_global_state.type == VGIC_V3) {
-			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
-			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
-		}
 		break;
 	case SYS_ID_AA64PFR1_EL1:
 		if (!kvm_has_mte(vcpu->kvm))
 			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
-
-		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
 		break;
 	case SYS_ID_AA64ISAR1_EL1:
 		if (!vcpu_has_ptrauth(vcpu))
@@ -1377,8 +1371,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 		if (!vcpu_has_ptrauth(vcpu))
 			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
 				 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
-		if (!cpus_have_final_cap(ARM64_HAS_WFXT))
-			val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
 		break;
 	case SYS_ID_AA64DFR0_EL1:
 		/* Set PMUver to the required version */
@@ -1391,12 +1383,6 @@ static u64 kvm_arm_update_id_reg(const struct kvm_vcpu *vcpu, u32 encoding, u64
 		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
 				  pmuver_to_perfmon(vcpu_pmuver(vcpu)));
 		break;
-	case SYS_ID_AA64MMFR2_EL1:
-		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
-		break;
-	case SYS_ID_MMFR4_EL1:
-		val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
-		break;
 	}
 
 	return val;
@@ -1490,6 +1476,20 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
 	return REG_HIDDEN;
 }
 
+static u64 read_sanitised_id_mmfr4_el1(struct kvm_vcpu *vcpu,
+				       const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* CCIDX is not supported */
+	val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
+
+	return val;
+}
+
 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 					  const struct sys_reg_desc *rd)
 {
@@ -1516,6 +1516,25 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
 
 	val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
 
+	if (kvm_vgic_global_state.type == VGIC_V3) {
+		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
+		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
+	}
+
+	return val;
+}
+
+static u64 read_sanitised_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+					  const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* SME is not supported */
+	val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
+
 	return val;
 }
 
@@ -1638,6 +1657,34 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 	return pmuver_update(vcpu, rd, val, perfmon_to_pmuver(perfmon), valid_pmu);
 }
 
+static u64 read_sanitised_id_aa64isar2_el1(struct kvm_vcpu *vcpu,
+					   const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	if (!cpus_have_final_cap(ARM64_HAS_WFXT))
+		val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
+
+	return val;
+}
+
+static u64 read_sanitised_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
+					   const struct sys_reg_desc *rd)
+{
+	u64 val;
+	u32 id = reg_to_encoding(rd);
+
+	val = read_sanitised_ftr_reg(id);
+
+	/* CCIDX is not supported */
+	val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+
+	return val;
+}
+
 /*
  * cpufeature ID register user accessors
  *
@@ -2033,7 +2080,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	AA32_ID_SANITISED(ID_ISAR3_EL1),
 	AA32_ID_SANITISED(ID_ISAR4_EL1),
 	AA32_ID_SANITISED(ID_ISAR5_EL1),
-	AA32_ID_SANITISED(ID_MMFR4_EL1),
+	{ SYS_DESC(SYS_ID_MMFR4_EL1),
+	  .access     = access_id_reg,
+	  .get_user   = get_id_reg,
+	  .set_user   = set_id_reg,
+	  .visibility = aa32_id_visibility,
+	  .reset      = read_sanitised_id_mmfr4_el1,
+	  .val        = 0, },
 	AA32_ID_SANITISED(ID_ISAR6_EL1),
 
 	/* CRm=3 */
@@ -2054,7 +2107,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	  .set_user = set_id_reg,
 	  .reset = read_sanitised_id_aa64pfr0_el1,
 	  .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
-	ID_SANITISED(ID_AA64PFR1_EL1),
+	{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64pfr1_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(4,2),
 	ID_UNALLOCATED(4,3),
 	ID_SANITISED(ID_AA64ZFR0_EL1),
@@ -2080,7 +2138,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	/* CRm=6 */
 	ID_SANITISED(ID_AA64ISAR0_EL1),
 	ID_SANITISED(ID_AA64ISAR1_EL1),
-	ID_SANITISED(ID_AA64ISAR2_EL1),
+	{ SYS_DESC(SYS_ID_AA64ISAR2_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64isar2_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(6,3),
 	ID_UNALLOCATED(6,4),
 	ID_UNALLOCATED(6,5),
@@ -2090,7 +2153,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
 	/* CRm=7 */
 	ID_SANITISED(ID_AA64MMFR0_EL1),
 	ID_SANITISED(ID_AA64MMFR1_EL1),
-	ID_SANITISED(ID_AA64MMFR2_EL1),
+	{ SYS_DESC(SYS_ID_AA64MMFR2_EL1),
+	  .access   = access_id_reg,
+	  .get_user = get_id_reg,
+	  .set_user = set_id_reg,
+	  .reset    = read_sanitised_id_aa64mmfr2_el1,
+	  .val      = 0, },
 	ID_UNALLOCATED(7,3),
 	ID_UNALLOCATED(7,4),
 	ID_UNALLOCATED(7,5),
-- 
2.34.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2023-06-02 22:15 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-02  0:51 [PATCH v11 0/5] Support writable CPU ID registers from userspace Jing Zhang
2023-06-02  0:51 ` Jing Zhang
2023-06-02  0:51 ` [PATCH v11 1/5] KVM: arm64: Save ID registers' sanitized value per guest Jing Zhang
2023-06-02  0:51   ` Jing Zhang
2023-06-02  0:51 ` [PATCH v11 2/5] KVM: arm64: Use per guest ID register for ID_AA64PFR0_EL1.[CSV2|CSV3] Jing Zhang
2023-06-02  0:51   ` Jing Zhang
2023-06-02  0:51 ` [PATCH v11 3/5] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer Jing Zhang
2023-06-02  0:51   ` Jing Zhang
2023-06-02  0:51 ` [PATCH v11 4/5] KVM: arm64: Reuse fields of sys_reg_desc for idreg Jing Zhang
2023-06-02  0:51   ` Jing Zhang
2023-06-02  0:51 ` [PATCH v11 5/5] KVM: arm64: Refactor writings for PMUVer/CSV2/CSV3 Jing Zhang
2023-06-02  0:51   ` Jing Zhang
2023-06-02 17:15   ` Jing Zhang
2023-06-02 17:15     ` Jing Zhang
2023-06-02 22:27     ` Jitindar Singh, Suraj
2023-06-02 22:27       ` Jitindar Singh, Suraj
2023-06-03  0:08       ` Jing Zhang
2023-06-03  0:08         ` Jing Zhang
2023-06-02 19:21   ` Jitindar Singh, Suraj
2023-06-02 19:21     ` Jitindar Singh, Suraj
2023-06-03  0:03     ` Jing Zhang
2023-06-03  0:03       ` Jing Zhang
2023-06-02 22:14 ` [PATCH 0/3] RE: Support writable CPU ID registers from userspace [v11] Suraj Jitindar Singh
2023-06-02 22:14   ` Suraj Jitindar Singh
2023-06-02 22:14   ` [PATCH 1/3] KVM: arm64: Update id_reg limit value based on per vcpu flags Suraj Jitindar Singh
2023-06-02 22:14     ` Suraj Jitindar Singh
2023-06-02 22:14   ` Suraj Jitindar Singh [this message]
2023-06-02 22:14     ` [PATCH 2/3] KVM: arm64: Move non per vcpu flag checks out of kvm_arm_update_id_reg() Suraj Jitindar Singh
2023-06-02 22:14   ` [PATCH 3/3] KVM: arm64: Use per guest ID register for ID_AA64PFR1_EL1.MTE Suraj Jitindar Singh
2023-06-02 22:14     ` Suraj Jitindar Singh
2023-06-03  8:28     ` Marc Zyngier
2023-06-03  8:28       ` Marc Zyngier
2023-06-05 16:39       ` Cornelia Huck
2023-06-05 16:39         ` Cornelia Huck
2023-06-06 16:42         ` Marc Zyngier
2023-06-06 16:42           ` Marc Zyngier
2023-06-07 10:09           ` Cornelia Huck
2023-06-07 10:09             ` Cornelia Huck
2023-06-08 17:57           ` Catalin Marinas
2023-06-08 17:57             ` Catalin Marinas

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230602221447.1809849-3-surajjs@amazon.com \
    --to=surajjs@amazon.com \
    --cc=alexandru.elisei@arm.com \
    --cc=james.morse@arm.com \
    --cc=jingzhangos@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=rananta@google.com \
    --cc=reijiw@google.com \
    --cc=sjitindarsingh@gmail.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.