All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <marc.zyngier@arm.com>
To: linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org,
	kvmarm@lists.cs.columbia.edu
Cc: Mark Rutland <mark.rutland@arm.com>,
	Peter Maydell <peter.maydell@linaro.org>,
	Andrew Jones <drjones@redhat.com>,
	Christoffer Dall <cdall@kernel.org>,
	Steve Capper <steve.capper@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Kristina Martsenko <kristina.martsenko@arm.com>,
	James Morse <james.morse@arm.com>
Subject: [PATCH v6 03/26] arm64: insn: Add encoder for bitwise operations using literals
Date: Wed, 14 Mar 2018 16:50:26 +0000	[thread overview]
Message-ID: <20180314165049.30105-4-marc.zyngier@arm.com> (raw)
In-Reply-To: <20180314165049.30105-1-marc.zyngier@arm.com>

We lack a way to encode operations such as AND, ORR, EOR that take
an immediate value. Doing so is quite involved, and is all about
reverse engineering the decoding algorithm described in the
pseudocode function DecodeBitMasks().

This has been tested by feeding it all the possible literal values
and comparing the output with that of GAS.

Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/insn.h |   9 +++
 arch/arm64/kernel/insn.c      | 136 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 145 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 21fffdd290a3..815b35bc53ed 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -315,6 +315,10 @@ __AARCH64_INSN_FUNCS(eor,	0x7F200000, 0x4A000000)
 __AARCH64_INSN_FUNCS(eon,	0x7F200000, 0x4A200000)
 __AARCH64_INSN_FUNCS(ands,	0x7F200000, 0x6A000000)
 __AARCH64_INSN_FUNCS(bics,	0x7F200000, 0x6A200000)
+__AARCH64_INSN_FUNCS(and_imm,	0x7F800000, 0x12000000)
+__AARCH64_INSN_FUNCS(orr_imm,	0x7F800000, 0x32000000)
+__AARCH64_INSN_FUNCS(eor_imm,	0x7F800000, 0x52000000)
+__AARCH64_INSN_FUNCS(ands_imm,	0x7F800000, 0x72000000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0x7F000000, 0x34000000)
@@ -424,6 +428,11 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
 					 int shift,
 					 enum aarch64_insn_variant variant,
 					 enum aarch64_insn_logic_type type);
+u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
+				       enum aarch64_insn_variant variant,
+				       enum aarch64_insn_register Rn,
+				       enum aarch64_insn_register Rd,
+				       u64 imm);
 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 			      enum aarch64_insn_prfm_type type,
 			      enum aarch64_insn_prfm_target target,
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7e432662d454..e87d6dcd7c82 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1485,3 +1485,139 @@ pstate_check_t * const aarch32_opcode_cond_checks[16] = {
 	__check_hi, __check_ls, __check_ge, __check_lt,
 	__check_gt, __check_le, __check_al, __check_al
 };
+
+static bool range_of_ones(u64 val)
+{
+	/* Doesn't handle full ones or full zeroes */
+	u64 sval = val >> __ffs64(val);
+
+	/* One of Sean Eron Anderson's bithack tricks */
+	return ((sval + 1) & (sval)) == 0;
+}
+
+static u32 aarch64_encode_immediate(u64 imm,
+				    enum aarch64_insn_variant variant,
+				    u32 insn)
+{
+	unsigned int immr, imms, n, ones, ror, esz, tmp;
+	u64 mask = ~0UL;
+
+	/* Can't encode full zeroes or full ones */
+	if (!imm || !~imm)
+		return AARCH64_BREAK_FAULT;
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		if (upper_32_bits(imm))
+			return AARCH64_BREAK_FAULT;
+		esz = 32;
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= AARCH64_INSN_SF_BIT;
+		esz = 64;
+		break;
+	default:
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	/*
+	 * Inverse of Replicate(). Try to spot a repeating pattern
+	 * with a pow2 stride.
+	 */
+	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
+		u64 emask = BIT(tmp) - 1;
+
+		if ((imm & emask) != ((imm >> tmp) & emask))
+			break;
+
+		esz = tmp;
+		mask = emask;
+	}
+
+	/* N is only set if we're encoding a 64bit value */
+	n = esz == 64;
+
+	/* Trim imm to the element size */
+	imm &= mask;
+
+	/* That's how many ones we need to encode */
+	ones = hweight64(imm);
+
+	/*
+	 * imms is set to (ones - 1), prefixed with a string of ones
+	 * and a zero if they fit. Cap it to 6 bits.
+	 */
+	imms  = ones - 1;
+	imms |= 0xf << ffs(esz);
+	imms &= BIT(6) - 1;
+
+	/* Compute the rotation */
+	if (range_of_ones(imm)) {
+		/*
+		 * Pattern: 0..01..10..0
+		 *
+		 * Compute how many rotate we need to align it right
+		 */
+		ror = __ffs64(imm);
+	} else {
+		/*
+		 * Pattern: 0..01..10..01..1
+		 *
+		 * Fill the unused top bits with ones, and check if
+		 * the result is a valid immediate (all ones with a
+		 * contiguous ranges of zeroes).
+		 */
+		imm |= ~mask;
+		if (!range_of_ones(~imm))
+			return AARCH64_BREAK_FAULT;
+
+		/*
+		 * Compute the rotation to get a continuous set of
+		 * ones, with the first bit set at position 0
+		 */
+		ror = fls(~imm);
+	}
+
+	/*
+	 * immr is the number of bits we need to rotate back to the
+	 * original set of ones. Note that this is relative to the
+	 * element size...
+	 */
+	immr = (esz - ror) % esz;
+
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
+				       enum aarch64_insn_variant variant,
+				       enum aarch64_insn_register Rn,
+				       enum aarch64_insn_register Rd,
+				       u64 imm)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_LOGIC_AND:
+		insn = aarch64_insn_get_and_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_ORR:
+		insn = aarch64_insn_get_orr_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_EOR:
+		insn = aarch64_insn_get_eor_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+		insn = aarch64_insn_get_ands_imm_value();
+		break;
+	default:
+		pr_err("%s: unknown logical encoding %d\n", __func__, type);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
+	return aarch64_encode_immediate(imm, variant, insn);
+}
-- 
2.14.2

WARNING: multiple messages have this Message-ID (diff)
From: marc.zyngier@arm.com (Marc Zyngier)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v6 03/26] arm64: insn: Add encoder for bitwise operations using literals
Date: Wed, 14 Mar 2018 16:50:26 +0000	[thread overview]
Message-ID: <20180314165049.30105-4-marc.zyngier@arm.com> (raw)
In-Reply-To: <20180314165049.30105-1-marc.zyngier@arm.com>

We lack a way to encode operations such as AND, ORR, EOR that take
an immediate value. Doing so is quite involved, and is all about
reverse engineering the decoding algorithm described in the
pseudocode function DecodeBitMasks().

This has been tested by feeding it all the possible literal values
and comparing the output with that of GAS.

Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/insn.h |   9 +++
 arch/arm64/kernel/insn.c      | 136 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 145 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 21fffdd290a3..815b35bc53ed 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -315,6 +315,10 @@ __AARCH64_INSN_FUNCS(eor,	0x7F200000, 0x4A000000)
 __AARCH64_INSN_FUNCS(eon,	0x7F200000, 0x4A200000)
 __AARCH64_INSN_FUNCS(ands,	0x7F200000, 0x6A000000)
 __AARCH64_INSN_FUNCS(bics,	0x7F200000, 0x6A200000)
+__AARCH64_INSN_FUNCS(and_imm,	0x7F800000, 0x12000000)
+__AARCH64_INSN_FUNCS(orr_imm,	0x7F800000, 0x32000000)
+__AARCH64_INSN_FUNCS(eor_imm,	0x7F800000, 0x52000000)
+__AARCH64_INSN_FUNCS(ands_imm,	0x7F800000, 0x72000000)
 __AARCH64_INSN_FUNCS(b,		0xFC000000, 0x14000000)
 __AARCH64_INSN_FUNCS(bl,	0xFC000000, 0x94000000)
 __AARCH64_INSN_FUNCS(cbz,	0x7F000000, 0x34000000)
@@ -424,6 +428,11 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
 					 int shift,
 					 enum aarch64_insn_variant variant,
 					 enum aarch64_insn_logic_type type);
+u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
+				       enum aarch64_insn_variant variant,
+				       enum aarch64_insn_register Rn,
+				       enum aarch64_insn_register Rd,
+				       u64 imm);
 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 			      enum aarch64_insn_prfm_type type,
 			      enum aarch64_insn_prfm_target target,
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7e432662d454..e87d6dcd7c82 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1485,3 +1485,139 @@ pstate_check_t * const aarch32_opcode_cond_checks[16] = {
 	__check_hi, __check_ls, __check_ge, __check_lt,
 	__check_gt, __check_le, __check_al, __check_al
 };
+
+static bool range_of_ones(u64 val)
+{
+	/* Doesn't handle full ones or full zeroes */
+	u64 sval = val >> __ffs64(val);
+
+	/* One of Sean Eron Anderson's bithack tricks */
+	return ((sval + 1) & (sval)) == 0;
+}
+
+static u32 aarch64_encode_immediate(u64 imm,
+				    enum aarch64_insn_variant variant,
+				    u32 insn)
+{
+	unsigned int immr, imms, n, ones, ror, esz, tmp;
+	u64 mask = ~0UL;
+
+	/* Can't encode full zeroes or full ones */
+	if (!imm || !~imm)
+		return AARCH64_BREAK_FAULT;
+
+	switch (variant) {
+	case AARCH64_INSN_VARIANT_32BIT:
+		if (upper_32_bits(imm))
+			return AARCH64_BREAK_FAULT;
+		esz = 32;
+		break;
+	case AARCH64_INSN_VARIANT_64BIT:
+		insn |= AARCH64_INSN_SF_BIT;
+		esz = 64;
+		break;
+	default:
+		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	/*
+	 * Inverse of Replicate(). Try to spot a repeating pattern
+	 * with a pow2 stride.
+	 */
+	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
+		u64 emask = BIT(tmp) - 1;
+
+		if ((imm & emask) != ((imm >> tmp) & emask))
+			break;
+
+		esz = tmp;
+		mask = emask;
+	}
+
+	/* N is only set if we're encoding a 64bit value */
+	n = esz == 64;
+
+	/* Trim imm to the element size */
+	imm &= mask;
+
+	/* That's how many ones we need to encode */
+	ones = hweight64(imm);
+
+	/*
+	 * imms is set to (ones - 1), prefixed with a string of ones
+	 * and a zero if they fit. Cap it to 6 bits.
+	 */
+	imms  = ones - 1;
+	imms |= 0xf << ffs(esz);
+	imms &= BIT(6) - 1;
+
+	/* Compute the rotation */
+	if (range_of_ones(imm)) {
+		/*
+		 * Pattern: 0..01..10..0
+		 *
+		 * Compute how many rotate we need to align it right
+		 */
+		ror = __ffs64(imm);
+	} else {
+		/*
+		 * Pattern: 0..01..10..01..1
+		 *
+		 * Fill the unused top bits with ones, and check if
+		 * the result is a valid immediate (all ones with a
+		 * contiguous ranges of zeroes).
+		 */
+		imm |= ~mask;
+		if (!range_of_ones(~imm))
+			return AARCH64_BREAK_FAULT;
+
+		/*
+		 * Compute the rotation to get a continuous set of
+		 * ones, with the first bit set@position 0
+		 */
+		ror = fls(~imm);
+	}
+
+	/*
+	 * immr is the number of bits we need to rotate back to the
+	 * original set of ones. Note that this is relative to the
+	 * element size...
+	 */
+	immr = (esz - ror) % esz;
+
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
+	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
+	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
+}
+
+u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
+				       enum aarch64_insn_variant variant,
+				       enum aarch64_insn_register Rn,
+				       enum aarch64_insn_register Rd,
+				       u64 imm)
+{
+	u32 insn;
+
+	switch (type) {
+	case AARCH64_INSN_LOGIC_AND:
+		insn = aarch64_insn_get_and_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_ORR:
+		insn = aarch64_insn_get_orr_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_EOR:
+		insn = aarch64_insn_get_eor_imm_value();
+		break;
+	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
+		insn = aarch64_insn_get_ands_imm_value();
+		break;
+	default:
+		pr_err("%s: unknown logical encoding %d\n", __func__, type);
+		return AARCH64_BREAK_FAULT;
+	}
+
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
+	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
+	return aarch64_encode_immediate(imm, variant, insn);
+}
-- 
2.14.2

  parent reply	other threads:[~2018-03-14 16:50 UTC|newest]

Thread overview: 124+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-14 16:50 [PATCH v6 00/26] KVM/arm64: Randomise EL2 mappings (variant 3a mitigation) Marc Zyngier
2018-03-14 16:50 ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 01/26] arm64: alternatives: Add dynamic patching feature Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 02/26] arm64: insn: Add N immediate encoding Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` Marc Zyngier [this message]
2018-03-14 16:50   ` [PATCH v6 03/26] arm64: insn: Add encoder for bitwise operations using literals Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 04/26] arm64: KVM: Dynamically patch the kernel/hyp VA mask Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 19:15   ` James Morse
2018-03-15 19:15     ` James Morse
2018-03-16  8:52     ` Marc Zyngier
2018-03-16  8:52       ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 05/26] arm64: cpufeatures: Drop the ARM64_HYP_OFFSET_LOW feature flag Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 06/26] KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 19:16   ` James Morse
2018-03-15 19:16     ` James Morse
2018-03-16  9:31     ` Marc Zyngier
2018-03-16  9:31       ` Marc Zyngier
2018-03-16 11:35       ` Andrew Jones
2018-03-16 11:35         ` Andrew Jones
2018-03-16 11:38         ` Ard Biesheuvel
2018-03-16 11:38           ` Ard Biesheuvel
2018-03-16 11:51           ` Marc Zyngier
2018-03-16 11:51             ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 07/26] KVM: arm/arm64: Demote HYP VA range display to being a debug feature Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 08/26] KVM: arm/arm64: Move ioremap calls to create_hyp_io_mappings Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 09/26] KVM: arm/arm64: Keep GICv2 HYP VAs in kvm_vgic_global_state Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 10/26] KVM: arm/arm64: Fix idmap size and alignment Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 19:15   ` James Morse
2018-03-15 19:15     ` James Morse
2018-03-16  8:55     ` Marc Zyngier
2018-03-16  8:55       ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 11/26] KVM: arm64: Fix HYP idmap unmap when using 52bit PA Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-16 16:07   ` Catalin Marinas
2018-03-16 16:07     ` Catalin Marinas
2018-03-16 16:47   ` Suzuki K Poulose
2018-03-16 16:47     ` Suzuki K Poulose
2018-03-14 16:50 ` [PATCH v6 12/26] KVM: arm/arm64: Move HYP IO VAs to the "idmap" range Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 19:09   ` James Morse
2018-03-15 19:09     ` James Morse
2018-03-16  8:44     ` Marc Zyngier
2018-03-16  8:44       ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 13/26] arm64; insn: Add encoder for the EXTR instruction Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 14/26] arm64: insn: Allow ADD/SUB (immediate) with LSL #12 Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 15/26] arm64: KVM: Dynamically compute the HYP VA mask Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 16/26] arm64: KVM: Introduce EL2 VA randomisation Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 17/26] arm64: Update the KVM memory map documentation Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 18/26] arm64: KVM: Move vector offsetting from hyp-init.S to kvm_get_hyp_vector Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 19/26] arm64: KVM: Move stashing of x0/x1 into the vector code itself Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 14:39   ` Andrew Jones
2018-03-15 14:39     ` Andrew Jones
2018-03-16 16:22   ` Catalin Marinas
2018-03-16 16:22     ` Catalin Marinas
2018-03-16 16:37     ` Marc Zyngier
2018-03-16 16:37       ` Marc Zyngier
2018-03-16 16:38     ` Marc Zyngier
2018-03-16 16:38       ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 20/26] arm64: KVM: Move BP hardening vectors into .hyp.text section Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 14:42   ` Andrew Jones
2018-03-15 14:42     ` Andrew Jones
2018-03-16 16:24   ` Catalin Marinas
2018-03-16 16:24     ` Catalin Marinas
2018-03-14 16:50 ` [PATCH v6 21/26] arm64: KVM: Reserve 4 additional instructions in the BPI template Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 14:46   ` Andrew Jones
2018-03-15 14:46     ` Andrew Jones
2018-03-16 16:30   ` Catalin Marinas
2018-03-16 16:30     ` Catalin Marinas
2018-03-14 16:50 ` [PATCH v6 22/26] arm64: KVM: Allow far branches from vector slots to the main vectors Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 23/26] arm/arm64: KVM: Introduce EL2-specific executable mappings Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 15:03   ` Andrew Jones
2018-03-15 15:03     ` Andrew Jones
2018-03-15 15:53     ` Marc Zyngier
2018-03-15 15:53       ` Marc Zyngier
2018-03-14 16:50 ` [PATCH v6 24/26] arm64: Make BP hardening slot counter available Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 15:05   ` Andrew Jones
2018-03-15 15:05     ` Andrew Jones
2018-03-14 16:50 ` [PATCH v6 25/26] arm64: KVM: Allow mapping of vectors outside of the RAM region Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 15:54   ` Andrew Jones
2018-03-15 15:54     ` Andrew Jones
2018-03-15 16:17     ` Marc Zyngier
2018-03-15 16:17       ` Marc Zyngier
2018-03-15 17:08       ` Andrew Jones
2018-03-15 17:08         ` Andrew Jones
2018-03-15 18:47         ` Marc Zyngier
2018-03-15 18:47           ` Marc Zyngier
2018-03-16 12:33           ` Andrew Jones
2018-03-16 12:33             ` Andrew Jones
2018-03-14 16:50 ` [PATCH v6 26/26] arm64: Enable ARM64_HARDEN_EL2_VECTORS on Cortex-A57 and A72 Marc Zyngier
2018-03-14 16:50   ` Marc Zyngier
2018-03-15 15:57 ` [PATCH v6 00/26] KVM/arm64: Randomise EL2 mappings (variant 3a mitigation) Andrew Jones
2018-03-15 15:57   ` Andrew Jones
2018-03-15 16:19   ` Marc Zyngier
2018-03-15 16:19     ` Marc Zyngier
2018-03-15 16:40     ` Andrew Jones
2018-03-15 16:40       ` Andrew Jones
2018-03-15 16:52       ` Marc Zyngier
2018-03-15 16:52         ` Marc Zyngier
2018-03-16 17:46 ` Catalin Marinas
2018-03-16 17:46   ` Catalin Marinas
2018-03-16 18:05   ` Marc Zyngier
2018-03-16 18:05     ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180314165049.30105-4-marc.zyngier@arm.com \
    --to=marc.zyngier@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=cdall@kernel.org \
    --cc=drjones@redhat.com \
    --cc=james.morse@arm.com \
    --cc=kristina.martsenko@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=peter.maydell@linaro.org \
    --cc=steve.capper@arm.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.