All of lore.kernel.org
 help / color / mirror / Atom feed
From: Reiji Watanabe <reijiw@google.com>
To: Marc Zyngier <maz@kernel.org>, kvmarm@lists.cs.columbia.edu
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Will Deacon <will@kernel.org>, Andrew Jones <drjones@redhat.com>,
	Peng Liang <liangpeng10@huawei.com>,
	Peter Shier <pshier@google.com>,
	Ricardo Koller <ricarkol@google.com>,
	Oliver Upton <oupton@google.com>,
	Jing Zhang <jingzhangos@google.com>,
	Raghavendra Rao Anata <rananta@google.com>,
	Reiji Watanabe <reijiw@google.com>
Subject: [RFC PATCH v2 09/28] KVM: arm64: Make ID_AA64MMFR0_EL1 writable
Date: Tue,  2 Nov 2021 23:25:01 -0700	[thread overview]
Message-ID: <20211103062520.1445832-10-reijiw@google.com> (raw)
In-Reply-To: <20211103062520.1445832-1-reijiw@google.com>

This patch adds id_reg_info for ID_AA64MMFR0_EL1 to make it
writable by userspace.

Since ID_AA64MMFR0_EL1 stage 2 granule size fields don't follow the
standard ID scheme, we need a special handling to validate those fields.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/sys_regs.c | 118 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 118 insertions(+)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 83b05d37afbd..7c1ac456dc94 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -508,6 +508,113 @@ static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+/*
+ * Check if the requested stage2 translation granule size indicated in
+ * @mmfr0 is also indicated in @mmfr0_lim.  This function assumes that
+ * the stage1 granule size indicated in @mmfr0 has been validated already.
+ */
+static int aa64mmfr0_tgran2_check(int field, u64 mmfr0, u64 mmfr0_lim)
+{
+	s64 tgran2, lim_tgran2, rtgran1;
+	int f1;
+	bool is_signed = true;
+
+	tgran2 = cpuid_feature_extract_unsigned_field(mmfr0, field);
+	lim_tgran2 = cpuid_feature_extract_unsigned_field(mmfr0_lim, field);
+	if (tgran2 == lim_tgran2)
+		return 0;
+
+	if (tgran2 && lim_tgran2)
+		return (tgran2 > lim_tgran2) ? -E2BIG : 0;
+
+	/*
+	 * Either tgran2 or lim_tgran2 is zero.
+	 * Need stage1 granule size to validate tgran2.
+	 */
+	switch (field) {
+	case ID_AA64MMFR0_TGRAN4_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN4_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN64_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN64_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN16_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN16_SHIFT;
+		is_signed = false;
+		break;
+	default:
+		/* Should never happen */
+		WARN_ONCE(1, "Unexpected stage2 granule field (%d)\n", field);
+		return 0;
+	}
+
+	/*
+	 * If tgran2 == 0 (&& lim_tgran2 != 0), the requested stage2 granule
+	 * size is indicated in the stage1 granule size field of @mmfr0.
+	 * So, validate the stage1 granule size against the stage2 limit
+	 * granule size.
+	 * If lim_tgran2 == 0 (&& tgran2 != 0), the stage2 limit granule size
+	 * is indicated in the stage1 granule size field of @mmfr0_lim.
+	 * So, validate the requested stage2 granule size against the stage1
+	 * limit granule size.
+	 */
+
+	 /* Get the relevant stage1 granule size to validate tgran2 */
+	if (tgran2 == 0)
+		/* The requested stage1 granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0, f1, is_signed);
+	else /* lim_tgran2 == 0 */
+		/* The stage1 limit granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0_lim, f1, is_signed);
+
+	/*
+	 * Adjust the value of rtgran1 to compare with stage2 granule size,
+	 * which indicates: 1: Not supported, 2: Supported, etc.
+	 */
+	if (is_signed)
+		/* For signed, -1: Not supported, 0: Supported, etc. */
+		rtgran1 += 0x2;
+	else
+		/* For unsigned, 0: Not supported, 1: Supported, etc. */
+		rtgran1 += 0x1;
+
+	if ((tgran2 == 0) && (rtgran1 > lim_tgran2))
+		/*
+		 * The requested stage1 granule size (== the requested stage2
+		 * granule size) is larger than the stage2 limit granule size.
+		 */
+		return -E2BIG;
+	else if ((lim_tgran2 == 0) && (tgran2 > rtgran1))
+		/*
+		 * The requested stage2 granule size is larger than the stage1
+		 * limit granulze size (== the stage2 limit granule size).
+		 */
+		return -E2BIG;
+
+	return 0;
+}
+
+static int validate_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+				     const struct id_reg_info *id_reg, u64 val)
+{
+	u64 limit = id_reg->vcpu_limit_val;
+	int ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN4_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN64_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN16_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg)
 {
 	u64 limit = id_reg->vcpu_limit_val;
@@ -601,6 +708,16 @@ static struct id_reg_info id_aa64isar1_el1_info = {
 	.get_reset_val = get_reset_id_aa64isar1_el1,
 };
 
+static struct id_reg_info id_aa64mmfr0_el1_info = {
+	.sys_reg = SYS_ID_AA64MMFR0_EL1,
+	.ftr_check_types = S_FCT(ID_AA64MMFR0_TGRAN4_SHIFT, FCT_LOWER_SAFE) |
+			   S_FCT(ID_AA64MMFR0_TGRAN64_SHIFT, FCT_LOWER_SAFE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN4_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN64_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN16_2_SHIFT, FCT_IGNORE),
+	.validate = validate_id_aa64mmfr0_el1,
+};
+
 /*
  * An ID register that needs special handling to control the value for the
  * guest must have its own id_reg_info in id_reg_info_table.
@@ -614,6 +731,7 @@ static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {
 	[IDREG_IDX(SYS_ID_AA64PFR1_EL1)] = &id_aa64pfr1_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR0_EL1)] = &id_aa64isar0_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR1_EL1)] = &id_aa64isar1_el1_info,
+	[IDREG_IDX(SYS_ID_AA64MMFR0_EL1)] = &id_aa64mmfr0_el1_info,
 };
 
 static int validate_id_reg(struct kvm_vcpu *vcpu,
-- 
2.33.1.1089.g2158813163f-goog


WARNING: multiple messages have this Message-ID (diff)
From: Reiji Watanabe <reijiw@google.com>
To: Marc Zyngier <maz@kernel.org>, kvmarm@lists.cs.columbia.edu
Cc: kvm@vger.kernel.org, Will Deacon <will@kernel.org>,
	Peter Shier <pshier@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linux-arm-kernel@lists.infradead.org
Subject: [RFC PATCH v2 09/28] KVM: arm64: Make ID_AA64MMFR0_EL1 writable
Date: Tue,  2 Nov 2021 23:25:01 -0700	[thread overview]
Message-ID: <20211103062520.1445832-10-reijiw@google.com> (raw)
In-Reply-To: <20211103062520.1445832-1-reijiw@google.com>

This patch adds id_reg_info for ID_AA64MMFR0_EL1 to make it
writable by userspace.

Since ID_AA64MMFR0_EL1 stage 2 granule size fields don't follow the
standard ID scheme, we need a special handling to validate those fields.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/sys_regs.c | 118 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 118 insertions(+)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 83b05d37afbd..7c1ac456dc94 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -508,6 +508,113 @@ static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+/*
+ * Check if the requested stage2 translation granule size indicated in
+ * @mmfr0 is also indicated in @mmfr0_lim.  This function assumes that
+ * the stage1 granule size indicated in @mmfr0 has been validated already.
+ */
+static int aa64mmfr0_tgran2_check(int field, u64 mmfr0, u64 mmfr0_lim)
+{
+	s64 tgran2, lim_tgran2, rtgran1;
+	int f1;
+	bool is_signed = true;
+
+	tgran2 = cpuid_feature_extract_unsigned_field(mmfr0, field);
+	lim_tgran2 = cpuid_feature_extract_unsigned_field(mmfr0_lim, field);
+	if (tgran2 == lim_tgran2)
+		return 0;
+
+	if (tgran2 && lim_tgran2)
+		return (tgran2 > lim_tgran2) ? -E2BIG : 0;
+
+	/*
+	 * Either tgran2 or lim_tgran2 is zero.
+	 * Need stage1 granule size to validate tgran2.
+	 */
+	switch (field) {
+	case ID_AA64MMFR0_TGRAN4_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN4_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN64_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN64_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN16_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN16_SHIFT;
+		is_signed = false;
+		break;
+	default:
+		/* Should never happen */
+		WARN_ONCE(1, "Unexpected stage2 granule field (%d)\n", field);
+		return 0;
+	}
+
+	/*
+	 * If tgran2 == 0 (&& lim_tgran2 != 0), the requested stage2 granule
+	 * size is indicated in the stage1 granule size field of @mmfr0.
+	 * So, validate the stage1 granule size against the stage2 limit
+	 * granule size.
+	 * If lim_tgran2 == 0 (&& tgran2 != 0), the stage2 limit granule size
+	 * is indicated in the stage1 granule size field of @mmfr0_lim.
+	 * So, validate the requested stage2 granule size against the stage1
+	 * limit granule size.
+	 */
+
+	 /* Get the relevant stage1 granule size to validate tgran2 */
+	if (tgran2 == 0)
+		/* The requested stage1 granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0, f1, is_signed);
+	else /* lim_tgran2 == 0 */
+		/* The stage1 limit granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0_lim, f1, is_signed);
+
+	/*
+	 * Adjust the value of rtgran1 to compare with stage2 granule size,
+	 * which indicates: 1: Not supported, 2: Supported, etc.
+	 */
+	if (is_signed)
+		/* For signed, -1: Not supported, 0: Supported, etc. */
+		rtgran1 += 0x2;
+	else
+		/* For unsigned, 0: Not supported, 1: Supported, etc. */
+		rtgran1 += 0x1;
+
+	if ((tgran2 == 0) && (rtgran1 > lim_tgran2))
+		/*
+		 * The requested stage1 granule size (== the requested stage2
+		 * granule size) is larger than the stage2 limit granule size.
+		 */
+		return -E2BIG;
+	else if ((lim_tgran2 == 0) && (tgran2 > rtgran1))
+		/*
+		 * The requested stage2 granule size is larger than the stage1
+		 * limit granulze size (== the stage2 limit granule size).
+		 */
+		return -E2BIG;
+
+	return 0;
+}
+
+static int validate_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+				     const struct id_reg_info *id_reg, u64 val)
+{
+	u64 limit = id_reg->vcpu_limit_val;
+	int ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN4_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN64_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN16_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg)
 {
 	u64 limit = id_reg->vcpu_limit_val;
@@ -601,6 +708,16 @@ static struct id_reg_info id_aa64isar1_el1_info = {
 	.get_reset_val = get_reset_id_aa64isar1_el1,
 };
 
+static struct id_reg_info id_aa64mmfr0_el1_info = {
+	.sys_reg = SYS_ID_AA64MMFR0_EL1,
+	.ftr_check_types = S_FCT(ID_AA64MMFR0_TGRAN4_SHIFT, FCT_LOWER_SAFE) |
+			   S_FCT(ID_AA64MMFR0_TGRAN64_SHIFT, FCT_LOWER_SAFE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN4_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN64_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN16_2_SHIFT, FCT_IGNORE),
+	.validate = validate_id_aa64mmfr0_el1,
+};
+
 /*
  * An ID register that needs special handling to control the value for the
  * guest must have its own id_reg_info in id_reg_info_table.
@@ -614,6 +731,7 @@ static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {
 	[IDREG_IDX(SYS_ID_AA64PFR1_EL1)] = &id_aa64pfr1_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR0_EL1)] = &id_aa64isar0_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR1_EL1)] = &id_aa64isar1_el1_info,
+	[IDREG_IDX(SYS_ID_AA64MMFR0_EL1)] = &id_aa64mmfr0_el1_info,
 };
 
 static int validate_id_reg(struct kvm_vcpu *vcpu,
-- 
2.33.1.1089.g2158813163f-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Reiji Watanabe <reijiw@google.com>
To: Marc Zyngier <maz@kernel.org>, kvmarm@lists.cs.columbia.edu
Cc: kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	 James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Will Deacon <will@kernel.org>, Andrew Jones <drjones@redhat.com>,
	 Peng Liang <liangpeng10@huawei.com>,
	Peter Shier <pshier@google.com>,
	 Ricardo Koller <ricarkol@google.com>,
	Oliver Upton <oupton@google.com>,
	 Jing Zhang <jingzhangos@google.com>,
	Raghavendra Rao Anata <rananta@google.com>,
	Reiji Watanabe <reijiw@google.com>
Subject: [RFC PATCH v2 09/28] KVM: arm64: Make ID_AA64MMFR0_EL1 writable
Date: Tue,  2 Nov 2021 23:25:01 -0700	[thread overview]
Message-ID: <20211103062520.1445832-10-reijiw@google.com> (raw)
In-Reply-To: <20211103062520.1445832-1-reijiw@google.com>

This patch adds id_reg_info for ID_AA64MMFR0_EL1 to make it
writable by userspace.

Since ID_AA64MMFR0_EL1 stage 2 granule size fields don't follow the
standard ID scheme, we need a special handling to validate those fields.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/kvm/sys_regs.c | 118 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 118 insertions(+)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 83b05d37afbd..7c1ac456dc94 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -508,6 +508,113 @@ static int validate_id_aa64isar1_el1(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+/*
+ * Check if the requested stage2 translation granule size indicated in
+ * @mmfr0 is also indicated in @mmfr0_lim.  This function assumes that
+ * the stage1 granule size indicated in @mmfr0 has been validated already.
+ */
+static int aa64mmfr0_tgran2_check(int field, u64 mmfr0, u64 mmfr0_lim)
+{
+	s64 tgran2, lim_tgran2, rtgran1;
+	int f1;
+	bool is_signed = true;
+
+	tgran2 = cpuid_feature_extract_unsigned_field(mmfr0, field);
+	lim_tgran2 = cpuid_feature_extract_unsigned_field(mmfr0_lim, field);
+	if (tgran2 == lim_tgran2)
+		return 0;
+
+	if (tgran2 && lim_tgran2)
+		return (tgran2 > lim_tgran2) ? -E2BIG : 0;
+
+	/*
+	 * Either tgran2 or lim_tgran2 is zero.
+	 * Need stage1 granule size to validate tgran2.
+	 */
+	switch (field) {
+	case ID_AA64MMFR0_TGRAN4_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN4_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN64_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN64_SHIFT;
+		break;
+	case ID_AA64MMFR0_TGRAN16_2_SHIFT:
+		f1 = ID_AA64MMFR0_TGRAN16_SHIFT;
+		is_signed = false;
+		break;
+	default:
+		/* Should never happen */
+		WARN_ONCE(1, "Unexpected stage2 granule field (%d)\n", field);
+		return 0;
+	}
+
+	/*
+	 * If tgran2 == 0 (&& lim_tgran2 != 0), the requested stage2 granule
+	 * size is indicated in the stage1 granule size field of @mmfr0.
+	 * So, validate the stage1 granule size against the stage2 limit
+	 * granule size.
+	 * If lim_tgran2 == 0 (&& tgran2 != 0), the stage2 limit granule size
+	 * is indicated in the stage1 granule size field of @mmfr0_lim.
+	 * So, validate the requested stage2 granule size against the stage1
+	 * limit granule size.
+	 */
+
+	 /* Get the relevant stage1 granule size to validate tgran2 */
+	if (tgran2 == 0)
+		/* The requested stage1 granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0, f1, is_signed);
+	else /* lim_tgran2 == 0 */
+		/* The stage1 limit granule size */
+		rtgran1 = cpuid_feature_extract_field(mmfr0_lim, f1, is_signed);
+
+	/*
+	 * Adjust the value of rtgran1 to compare with stage2 granule size,
+	 * which indicates: 1: Not supported, 2: Supported, etc.
+	 */
+	if (is_signed)
+		/* For signed, -1: Not supported, 0: Supported, etc. */
+		rtgran1 += 0x2;
+	else
+		/* For unsigned, 0: Not supported, 1: Supported, etc. */
+		rtgran1 += 0x1;
+
+	if ((tgran2 == 0) && (rtgran1 > lim_tgran2))
+		/*
+		 * The requested stage1 granule size (== the requested stage2
+		 * granule size) is larger than the stage2 limit granule size.
+		 */
+		return -E2BIG;
+	else if ((lim_tgran2 == 0) && (tgran2 > rtgran1))
+		/*
+		 * The requested stage2 granule size is larger than the stage1
+		 * limit granulze size (== the stage2 limit granule size).
+		 */
+		return -E2BIG;
+
+	return 0;
+}
+
+static int validate_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+				     const struct id_reg_info *id_reg, u64 val)
+{
+	u64 limit = id_reg->vcpu_limit_val;
+	int ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN4_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN64_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	ret = aa64mmfr0_tgran2_check(ID_AA64MMFR0_TGRAN16_2_SHIFT, val, limit);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static void init_id_aa64pfr0_el1_info(struct id_reg_info *id_reg)
 {
 	u64 limit = id_reg->vcpu_limit_val;
@@ -601,6 +708,16 @@ static struct id_reg_info id_aa64isar1_el1_info = {
 	.get_reset_val = get_reset_id_aa64isar1_el1,
 };
 
+static struct id_reg_info id_aa64mmfr0_el1_info = {
+	.sys_reg = SYS_ID_AA64MMFR0_EL1,
+	.ftr_check_types = S_FCT(ID_AA64MMFR0_TGRAN4_SHIFT, FCT_LOWER_SAFE) |
+			   S_FCT(ID_AA64MMFR0_TGRAN64_SHIFT, FCT_LOWER_SAFE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN4_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN64_2_SHIFT, FCT_IGNORE) |
+			   U_FCT(ID_AA64MMFR0_TGRAN16_2_SHIFT, FCT_IGNORE),
+	.validate = validate_id_aa64mmfr0_el1,
+};
+
 /*
  * An ID register that needs special handling to control the value for the
  * guest must have its own id_reg_info in id_reg_info_table.
@@ -614,6 +731,7 @@ static struct id_reg_info *id_reg_info_table[KVM_ARM_ID_REG_MAX_NUM] = {
 	[IDREG_IDX(SYS_ID_AA64PFR1_EL1)] = &id_aa64pfr1_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR0_EL1)] = &id_aa64isar0_el1_info,
 	[IDREG_IDX(SYS_ID_AA64ISAR1_EL1)] = &id_aa64isar1_el1_info,
+	[IDREG_IDX(SYS_ID_AA64MMFR0_EL1)] = &id_aa64mmfr0_el1_info,
 };
 
 static int validate_id_reg(struct kvm_vcpu *vcpu,
-- 
2.33.1.1089.g2158813163f-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-11-03  6:28 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-03  6:24 [RFC PATCH v2 00/28] KVM: arm64: Make CPU ID registers writable by userspace Reiji Watanabe
2021-11-03  6:24 ` Reiji Watanabe
2021-11-03  6:24 ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 01/28] KVM: arm64: Add has_reset_once flag for vcpu Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-04 16:10   ` Oliver Upton
2021-11-04 16:10     ` Oliver Upton
2021-11-04 16:10     ` Oliver Upton
2021-11-03  6:24 ` [RFC PATCH v2 02/28] KVM: arm64: Save ID registers' sanitized value per vCPU Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-04 16:14   ` Oliver Upton
2021-11-04 16:14     ` Oliver Upton
2021-11-04 16:14     ` Oliver Upton
2021-11-04 21:39     ` Reiji Watanabe
2021-11-04 21:39       ` Reiji Watanabe
2021-11-04 21:39       ` Reiji Watanabe
2021-11-05  1:33       ` Oliver Upton
2021-11-05  1:33         ` Oliver Upton
2021-11-05  1:33         ` Oliver Upton
2021-11-05  6:25       ` Reiji Watanabe
2021-11-05  6:25         ` Reiji Watanabe
2021-11-05  6:25         ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 03/28] KVM: arm64: Introduce struct id_reg_info Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 04/28] KVM: arm64: Keep consistency of ID registers between vCPUs Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-04 16:33   ` Oliver Upton
2021-11-04 16:33     ` Oliver Upton
2021-11-04 16:33     ` Oliver Upton
2021-11-08  7:45     ` Reiji Watanabe
2021-11-08  7:45       ` Reiji Watanabe
2021-11-08  7:45       ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 05/28] KVM: arm64: Make ID_AA64PFR0_EL1 writable Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 06/28] KVM: arm64: Make ID_AA64PFR1_EL1 writable Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24 ` [RFC PATCH v2 07/28] KVM: arm64: Make ID_AA64ISAR0_EL1 writable Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:24   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 08/28] KVM: arm64: Make ID_AA64ISAR1_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` Reiji Watanabe [this message]
2021-11-03  6:25   ` [RFC PATCH v2 09/28] KVM: arm64: Make ID_AA64MMFR0_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 10/28] KVM: arm64: Hide IMPLEMENTATION DEFINED PMU support for the guest Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 11/28] KVM: arm64: Make ID_AA64DFR0_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 12/28] KVM: arm64: Make ID_DFR0_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 13/28] KVM: arm64: Make ID_DFR1_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 14/28] KVM: arm64: Make ID_MMFR0_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 15/28] KVM: arm64: Make MVFR1_EL1 writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 16/28] KVM: arm64: Make ID registers without id_reg_info writable Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 17/28] KVM: arm64: Add consistency checking for frac fields of ID registers Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 18/28] KVM: arm64: Introduce KVM_CAP_ARM_ID_REG_WRITABLE capability Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-04 16:40   ` Oliver Upton
2021-11-04 16:40     ` Oliver Upton
2021-11-04 16:40     ` Oliver Upton
2021-11-05  4:07     ` Reiji Watanabe
2021-11-05  4:07       ` Reiji Watanabe
2021-11-05  4:07       ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 19/28] KVM: arm64: Use vcpu->arch cptr_el2 to track value of cptr_el2 for VHE Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 20/28] KVM: arm64: Use vcpu->arch.mdcr_el2 to track value of mdcr_el2 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 21/28] KVM: arm64: Introduce framework to trap disabled features Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 22/28] KVM: arm64: Trap disabled features of ID_AA64PFR0_EL1 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 23/28] KVM: arm64: Trap disabled features of ID_AA64PFR1_EL1 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 24/28] KVM: arm64: Trap disabled features of ID_AA64DFR0_EL1 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 25/28] KVM: arm64: Trap disabled features of ID_AA64MMFR1_EL1 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 26/28] KVM: arm64: Trap disabled features of ID_AA64ISAR1_EL1 Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 27/28] KVM: arm64: Activate trapping of disabled CPU features for the guest Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25 ` [RFC PATCH v2 28/28] KVM: arm64: selftests: Introduce id_reg_test Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe
2021-11-03  6:25   ` Reiji Watanabe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211103062520.1445832-10-reijiw@google.com \
    --to=reijiw@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=drjones@redhat.com \
    --cc=james.morse@arm.com \
    --cc=jingzhangos@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=liangpeng10@huawei.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=pshier@google.com \
    --cc=rananta@google.com \
    --cc=ricarkol@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.