stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	stable@vger.kernel.org, Catalin Marinas <catalin.marinas@arm.com>,
	James Morse <james.morse@arm.com>
Subject: [PATCH 5.10 39/58] arm64: Mitigate spectre style branch history side channels
Date: Thu, 10 Mar 2022 15:18:59 +0100	[thread overview]
Message-ID: <20220310140813.984380513@linuxfoundation.org> (raw)
In-Reply-To: <20220310140812.869208747@linuxfoundation.org>

From: James Morse <james.morse@arm.com>

commit 558c303c9734af5a813739cd284879227f7297d2 upstream.

Speculation attacks against some high-performance processors can
make use of branch history to influence future speculation.
When taking an exception from user-space, a sequence of branches
or a firmware call overwrites or invalidates the branch history.

The sequence of branches is added to the vectors, and should appear
before the first indirect branch. For systems using KPTI the sequence
is added to the kpti trampoline where it has a free register as the exit
from the trampoline is via a 'ret'. For systems not using KPTI, the same
register tricks are used to free up a register in the vectors.

For the firmware call, arch-workaround-3 clobbers 4 registers, so
there is no choice but to save them to the EL1 stack. This only happens
for entry from EL0, so if we take an exception due to the stack access,
it will not become re-entrant.

For KVM, the existing branch-predictor-hardening vectors are used.
When a spectre version of these vectors is in use, the firmware call
is sufficient to mitigate against Spectre-BHB. For the non-spectre
versions, the sequence of branches is added to the indirect vector.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
[ modified for stable, removed bitmap of mitigations,  use kvm template
  infrastructure ]
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 arch/arm64/Kconfig                  |    9 +
 arch/arm64/include/asm/assembler.h  |    4 
 arch/arm64/include/asm/cpufeature.h |   15 ++
 arch/arm64/include/asm/cputype.h    |    8 +
 arch/arm64/include/asm/spectre.h    |    4 
 arch/arm64/include/asm/sysreg.h     |    1 
 arch/arm64/include/asm/vectors.h    |    5 
 arch/arm64/kernel/cpu_errata.c      |    7 +
 arch/arm64/kernel/proton-pack.c     |  234 +++++++++++++++++++++++++++++++++++-
 arch/arm64/kvm/hyp/hyp-entry.S      |    4 
 10 files changed, 288 insertions(+), 3 deletions(-)

--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1184,6 +1184,15 @@ config UNMAP_KERNEL_AT_EL0
 
 	  If unsure, say Y.
 
+config MITIGATE_SPECTRE_BRANCH_HISTORY
+	bool "Mitigate Spectre style attacks against branch history" if EXPERT
+	default y
+	help
+	  Speculation attacks against some high-performance processors can
+	  make use of branch history to influence future speculation.
+	  When taking an exception from user-space, a sequence of branches
+	  or a firmware call overwrites the branch history.
+
 config RODATA_FULL_DEFAULT_ENABLED
 	bool "Apply r/o permissions of VM areas also to their linear aliases"
 	default y
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -797,7 +797,9 @@ USER(\label, ic	ivau, \tmp2)			// invali
 
 	.macro __mitigate_spectre_bhb_loop      tmp
 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
-	mov	\tmp, #32
+alternative_cb  spectre_bhb_patch_loop_iter
+	mov	\tmp, #32		// Patched to correct the immediate
+alternative_cb_end
 .Lspectre_bhb_loop\@:
 	b	. + 4
 	subs	\tmp, \tmp, #1
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -606,6 +606,21 @@ static inline bool cpu_supports_mixed_en
 	return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 }
 
+static inline bool supports_csv2p3(int scope)
+{
+	u64 pfr0;
+	u8 csv2_val;
+
+	if (scope == SCOPE_LOCAL_CPU)
+		pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1);
+	else
+		pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+	csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
+							ID_AA64PFR0_CSV2_SHIFT);
+	return csv2_val == 3;
+}
+
 static inline bool system_supports_32bit_el0(void)
 {
 	return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -73,10 +73,14 @@
 #define ARM_CPU_PART_CORTEX_A76		0xD0B
 #define ARM_CPU_PART_NEOVERSE_N1	0xD0C
 #define ARM_CPU_PART_CORTEX_A77		0xD0D
+#define ARM_CPU_PART_NEOVERSE_V1	0xD40
+#define ARM_CPU_PART_CORTEX_A78		0xD41
+#define ARM_CPU_PART_CORTEX_X1		0xD44
 #define ARM_CPU_PART_CORTEX_A510	0xD46
 #define ARM_CPU_PART_CORTEX_A710	0xD47
 #define ARM_CPU_PART_CORTEX_X2		0xD48
 #define ARM_CPU_PART_NEOVERSE_N2	0xD49
+#define ARM_CPU_PART_CORTEX_A78C	0xD4B
 
 #define APM_CPU_PART_POTENZA		0x000
 
@@ -117,10 +121,14 @@
 #define MIDR_CORTEX_A76	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
 #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
 #define MIDR_CORTEX_A77	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_NEOVERSE_V1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+#define MIDR_CORTEX_A78	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_X1	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+#define MIDR_CORTEX_A78C	MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
 #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -30,5 +30,7 @@ void spectre_v4_enable_mitigation(const
 void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
 
 enum mitigation_state arm64_get_spectre_bhb_state(void);
-
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+u8 spectre_bhb_loop_affected(int scope);
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 #endif	/* __ASM_SPECTRE_H */
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -801,6 +801,7 @@
 #endif
 
 /* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ECBHB_SHIFT	60
 #define ID_AA64MMFR1_AFP_SHIFT		44
 #define ID_AA64MMFR1_ETS_SHIFT		36
 #define ID_AA64MMFR1_TWED_SHIFT		32
--- a/arch/arm64/include/asm/vectors.h
+++ b/arch/arm64/include/asm/vectors.h
@@ -40,6 +40,11 @@ enum arm64_bp_harden_el1_vectors {
 	EL1_VECTOR_KPTI,
 };
 
+#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+#define EL1_VECTOR_BHB_LOOP		-1
+#define EL1_VECTOR_BHB_FW		-1
+#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
 /* The vectors to use on return from EL0. e.g. to remap the kernel */
 DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector);
 
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -473,6 +473,13 @@ const struct arm64_cpu_capabilities arm6
 		.matches = has_spectre_v4,
 		.cpu_enable = spectre_v4_enable_mitigation,
 	},
+	{
+		.desc = "Spectre-BHB",
+		.capability = ARM64_SPECTRE_BHB,
+		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+		.matches = is_spectre_bhb_affected,
+		.cpu_enable = spectre_bhb_enable_mitigation,
+	},
 #ifdef CONFIG_ARM64_ERRATUM_1418040
 	{
 		.desc = "ARM erratum 1418040",
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -24,8 +24,11 @@
 #include <linux/prctl.h>
 #include <linux/sched/task_stack.h>
 
+#include <asm/insn.h>
 #include <asm/spectre.h>
 #include <asm/traps.h>
+#include <asm/vectors.h>
+#include <asm/virt.h>
 
 /*
  * We try to ensure that the mitigation state can never change as the result of
@@ -814,6 +817,17 @@ int arch_prctl_spec_ctrl_get(struct task
 	}
 }
 
+/*
+ * Spectre BHB.
+ *
+ * A CPU is either:
+ * - Mitigated by a branchy loop a CPU specific number of times, and listed
+ *   in our "loop mitigated list".
+ * - Mitigated in software by the firmware Spectre v2 call.
+ * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
+ *   software mitigation in the vectors is needed.
+ * - Has CSV2.3, so is unaffected.
+ */
 static enum mitigation_state spectre_bhb_state;
 
 enum mitigation_state arm64_get_spectre_bhb_state(void)
@@ -821,6 +835,150 @@ enum mitigation_state arm64_get_spectre_
 	return spectre_bhb_state;
 }
 
+/*
+ * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
+ * SCOPE_SYSTEM call will give the right answer.
+ */
+u8 spectre_bhb_loop_affected(int scope)
+{
+	u8 k = 0;
+	static u8 max_bhb_k;
+
+	if (scope == SCOPE_LOCAL_CPU) {
+		static const struct midr_range spectre_bhb_k32_list[] = {
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+			{},
+		};
+		static const struct midr_range spectre_bhb_k24_list[] = {
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+			{},
+		};
+		static const struct midr_range spectre_bhb_k8_list[] = {
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+			{},
+		};
+
+		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
+			k = 32;
+		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
+			k = 24;
+		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
+			k =  8;
+
+		max_bhb_k = max(max_bhb_k, k);
+	} else {
+		k = max_bhb_k;
+	}
+
+	return k;
+}
+
+static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
+{
+	int ret;
+	struct arm_smccc_res res;
+
+	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+			     ARM_SMCCC_ARCH_WORKAROUND_3, &res);
+
+	ret = res.a0;
+	switch (ret) {
+	case SMCCC_RET_SUCCESS:
+		return SPECTRE_MITIGATED;
+	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
+		return SPECTRE_UNAFFECTED;
+	default:
+		fallthrough;
+	case SMCCC_RET_NOT_SUPPORTED:
+		return SPECTRE_VULNERABLE;
+	}
+}
+
+static bool is_spectre_bhb_fw_affected(int scope)
+{
+	static bool system_affected;
+	enum mitigation_state fw_state;
+	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
+	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+		{},
+	};
+	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
+					 spectre_bhb_firmware_mitigated_list);
+
+	if (scope != SCOPE_LOCAL_CPU)
+		return system_affected;
+
+	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
+		system_affected = true;
+		return true;
+	}
+
+	return false;
+}
+
+static bool supports_ecbhb(int scope)
+{
+	u64 mmfr1;
+
+	if (scope == SCOPE_LOCAL_CPU)
+		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
+	else
+		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+	return cpuid_feature_extract_unsigned_field(mmfr1,
+						    ID_AA64MMFR1_ECBHB_SHIFT);
+}
+
+bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
+			     int scope)
+{
+	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+	if (supports_csv2p3(scope))
+		return false;
+
+	if (spectre_bhb_loop_affected(scope))
+		return true;
+
+	if (is_spectre_bhb_fw_affected(scope))
+		return true;
+
+	return false;
+}
+
+static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
+{
+	const char *v = arm64_get_bp_hardening_vector(slot);
+
+	if (slot < 0)
+		return;
+
+	__this_cpu_write(this_cpu_vector, v);
+
+	/*
+	 * When KPTI is in use, the vectors are switched when exiting to
+	 * user-space.
+	 */
+	if (arm64_kernel_unmapped_at_el0())
+		return;
+
+	write_sysreg(v, vbar_el1);
+	isb();
+}
+
+#ifdef CONFIG_KVM
 static int kvm_bhb_get_vecs_size(const char *start)
 {
 	if (start == __smccc_workaround_3_smc)
@@ -833,7 +991,7 @@ static int kvm_bhb_get_vecs_size(const c
 	return 0;
 }
 
-void kvm_setup_bhb_slot(const char *hyp_vecs_start)
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
 {
 	int cpu, slot = -1, size;
 	const char *hyp_vecs_end;
@@ -864,3 +1022,77 @@ void kvm_setup_bhb_slot(const char *hyp_
 	__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
 	raw_spin_unlock(&bp_lock);
 }
+#else
+#define __smccc_workaround_3_smc NULL
+#define __spectre_bhb_loop_k8 NULL
+#define __spectre_bhb_loop_k24 NULL
+#define __spectre_bhb_loop_k32 NULL
+
+static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
+#endif /* CONFIG_KVM */
+
+void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
+{
+	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+
+	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
+		return;
+
+	if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
+		/* No point mitigating Spectre-BHB alone. */
+	} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
+		pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
+	} else if (cpu_mitigations_off()) {
+		pr_info_once("spectre-bhb mitigation disabled by command line option\n");
+	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
+		state = SPECTRE_MITIGATED;
+	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+		switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
+		case 8:
+			kvm_setup_bhb_slot(__spectre_bhb_loop_k8);
+			break;
+		case 24:
+			kvm_setup_bhb_slot(__spectre_bhb_loop_k24);
+			break;
+		case 32:
+			kvm_setup_bhb_slot(__spectre_bhb_loop_k32);
+			break;
+		default:
+			WARN_ON_ONCE(1);
+		}
+		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
+
+		state = SPECTRE_MITIGATED;
+	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
+		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
+		if (fw_state == SPECTRE_MITIGATED) {
+			kvm_setup_bhb_slot(__smccc_workaround_3_smc);
+			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+			state = SPECTRE_MITIGATED;
+		}
+	}
+
+	update_mitigation_state(&spectre_bhb_state, state);
+}
+
+/* Patched to correct the immediate */
+void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+				   __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	u8 rd;
+	u32 insn;
+	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
+
+	BUG_ON(nr_inst != 1); /* MOV -> MOV */
+
+	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
+		return;
+
+	insn = le32_to_cpu(*origptr);
+	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
+	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+					 AARCH64_INSN_VARIANT_64BIT,
+					 AARCH64_INSN_MOVEWIDE_ZERO);
+	*updptr++ = cpu_to_le32(insn);
+}
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -61,6 +61,10 @@ el1_sync:				// Guest trapped into EL2
 	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
 	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
 			  ARM_SMCCC_ARCH_WORKAROUND_2)
+	cbz	w1, wa_epilogue
+
+	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
+			  ARM_SMCCC_ARCH_WORKAROUND_3)
 	cbnz	w1, el1_trap
 
 wa_epilogue:



  parent reply	other threads:[~2022-03-10 14:46 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-10 14:18 [PATCH 5.10 00/58] 5.10.105-rc2 review Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 01/58] x86,bugs: Unconditionally allow spectre_v2=retpoline,amd Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 02/58] x86/speculation: Rename RETPOLINE_AMD to RETPOLINE_LFENCE Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 03/58] x86/speculation: Add eIBRS + Retpoline options Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 04/58] Documentation/hw-vuln: Update spectre doc Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 05/58] x86/speculation: Include unprivileged eBPF status in Spectre v2 mitigation reporting Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 06/58] x86/speculation: Use generic retpoline by default on AMD Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 07/58] x86/speculation: Update link to AMD speculation whitepaper Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 08/58] x86/speculation: Warn about Spectre v2 LFENCE mitigation Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 09/58] x86/speculation: Warn about eIBRS + LFENCE + Unprivileged eBPF + SMT Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 10/58] ARM: report Spectre v2 status through sysfs Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 11/58] ARM: early traps initialisation Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 12/58] ARM: use LOADADDR() to get load address of sections Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 13/58] ARM: Spectre-BHB workaround Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 14/58] ARM: include unprivileged BPF status in Spectre V2 reporting Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 15/58] arm64: cputype: Add CPU implementor & types for the Apple M1 cores Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 16/58] arm64: Add Neoverse-N2, Cortex-A710 CPU part definition Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 17/58] arm64: Add Cortex-X2 " Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 18/58] arm64: Add Cortex-A510 " Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 19/58] arm64: Add HWCAP for self-synchronising virtual counter Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 20/58] arm64: add ID_AA64ISAR2_EL1 sys register Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 21/58] arm64: cpufeature: add HWCAP for FEAT_AFP Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 22/58] arm64: cpufeature: add HWCAP for FEAT_RPRES Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 23/58] arm64: entry.S: Add ventry overflow sanity checks Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 24/58] arm64: spectre: Rename spectre_v4_patch_fw_mitigation_conduit Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 25/58] arm64: entry: Make the trampoline cleanup optional Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 26/58] arm64: entry: Free up another register on kptis tramp_exit path Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 27/58] arm64: entry: Move the trampoline data page before the text page Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 28/58] arm64: entry: Allow tramp_alias to access symbols after the 4K boundary Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 29/58] arm64: entry: Dont assume tramp_vectors is the start of the vectors Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 30/58] arm64: entry: Move trampoline macros out of ifdefd section Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 31/58] arm64: entry: Make the kpti trampolines kpti sequence optional Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 32/58] arm64: entry: Allow the trampoline text to occupy multiple pages Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 33/58] arm64: entry: Add non-kpti __bp_harden_el1_vectors for mitigations Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 34/58] arm64: entry: Add vectors that have the bhb mitigation sequences Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 35/58] arm64: entry: Add macro for reading symbol addresses from the trampoline Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 36/58] arm64: Add percpu vectors for EL1 Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 37/58] arm64: proton-pack: Report Spectre-BHB vulnerabilities as part of Spectre-v2 Greg Kroah-Hartman
2022-03-10 14:18 ` [PATCH 5.10 38/58] KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A Greg Kroah-Hartman
2022-03-10 23:48   ` Pavel Machek
2022-03-11  6:42     ` Greg Kroah-Hartman
2022-03-15 12:20       ` James Morse
2022-03-15 12:27         ` James Morse
2022-03-15 12:41           ` Greg Kroah-Hartman
2022-03-15 12:29         ` Greg Kroah-Hartman
2022-03-10 14:18 ` Greg Kroah-Hartman [this message]
2022-03-10 14:19 ` [PATCH 5.10 40/58] KVM: arm64: Allow SMCCC_ARCH_WORKAROUND_3 to be discovered and migrated Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 41/58] arm64: Use the clearbhb instruction in mitigations Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 42/58] arm64: proton-pack: Include unprivileged eBPF status in Spectre v2 mitigation reporting Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 43/58] ARM: fix build error when BPF_SYSCALL is disabled Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 44/58] ARM: fix co-processor register typo Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 45/58] ARM: Do not use NOCROSSREFS directive with ld.lld Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 46/58] ARM: fix build warning in proc-v7-bugs.c Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 47/58] xen/xenbus: dont let xenbus_grant_ring() remove grants in error case Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 48/58] xen/grant-table: add gnttab_try_end_foreign_access() Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 49/58] xen/blkfront: dont use gnttab_query_foreign_access() for mapped status Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 50/58] xen/netfront: " Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 51/58] xen/scsifront: " Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 52/58] xen/gntalloc: dont use gnttab_query_foreign_access() Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 53/58] xen: remove gnttab_query_foreign_access() Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 54/58] xen/9p: use alloc/free_pages_exact() Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 55/58] xen/pvcalls: " Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 56/58] xen/gnttab: fix gnttab_end_foreign_access() without page specified Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 57/58] xen/netfront: react properly to failing gnttab_end_foreign_access_ref() Greg Kroah-Hartman
2022-03-10 14:19 ` [PATCH 5.10 58/58] Revert "ACPI: PM: s2idle: Cancel wakeup before dispatching EC GPE" Greg Kroah-Hartman
2022-03-10 17:59 ` [PATCH 5.10 00/58] 5.10.105-rc2 review Pavel Machek
2022-03-10 18:48 ` Jon Hunter
2022-03-10 19:33 ` Shuah Khan
2022-03-10 22:36 ` Florian Fainelli
2022-03-11  1:04 ` Guenter Roeck
2022-03-11  6:48 ` Bagas Sanjaya
2022-03-11 10:11 ` Sudip Mukherjee
2022-03-11 11:52 ` Naresh Kamboju
2022-03-12  1:22 ` Fox Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220310140813.984380513@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=james.morse@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).