All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
To: stable@vger.kernel.org
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>,
	Will Deacon <will@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Jeremy Linton <jeremy.linton@arm.com>,
	Andre Przywara <andre.przywara@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Dave Martin <dave.martin@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	James Morse <james.morse@arm.com>,
	Robin Murphy <robin.murphy@arm.com>,
	Julien Thierry <julien.thierry@arm.com>
Subject: [PATCH for-stable-4.14 10/48] arm64: capabilities: Update prototype for enable call back
Date: Thu, 24 Oct 2019 14:47:55 +0200	[thread overview]
Message-ID: <20191024124833.4158-11-ard.biesheuvel@linaro.org> (raw)
In-Reply-To: <20191024124833.4158-1-ard.biesheuvel@linaro.org>

From: Dave Martin <dave.martin@arm.com>

[ Upstream commit c0cda3b8ee6b4b6851b2fd8b6db91fd7b0e2524a ]

We issue the enable() call back for all CPU hwcaps capabilities
available on the system, on all the CPUs. So far we have ignored
the argument passed to the call back, which had a prototype to
accept a "void *" for use with on_each_cpu() and later with
stop_machine(). However, with commit 0a0d111d40fd1
("arm64: cpufeature: Pass capability structure to ->enable callback"),
there are some users of the argument who wants the matching capability
struct pointer where there are multiple matching criteria for a single
capability. Clean up the declaration of the call back to make it clear.

 1) Renamed to cpu_enable(), to imply taking necessary actions on the
    called CPU for the entry.
 2) Pass const pointer to the capability, to allow the call back to
    check the entry. (e.,g to check if any action is needed on the CPU)
 3) We don't care about the result of the call back, turning this to
    a void.

Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: James Morse <james.morse@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Dave Martin <dave.martin@arm.com>
[suzuki: convert more users, rename call back and drop results]
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 arch/arm64/include/asm/cpufeature.h |  7 ++-
 arch/arm64/include/asm/processor.h  |  5 +-
 arch/arm64/kernel/cpu_errata.c      | 55 +++++++++-----------
 arch/arm64/kernel/cpufeature.c      | 34 +++++++-----
 arch/arm64/kernel/fpsimd.c          |  1 +
 arch/arm64/kernel/traps.c           |  4 +-
 arch/arm64/mm/fault.c               |  3 +-
 7 files changed, 60 insertions(+), 49 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 5048c7a55eef..bff4d95db039 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -96,7 +96,12 @@ struct arm64_cpu_capabilities {
 	u16 capability;
 	int def_scope;			/* default scope */
 	bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
-	int (*enable)(void *);		/* Called on all active CPUs */
+	/*
+	 * Take the appropriate actions to enable this capability for this CPU.
+	 * For each successfully booted CPU, this method is called for each
+	 * globally detected capability.
+	 */
+	void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
 	union {
 		struct {	/* To be used for erratum handling only */
 			u32 midr_model;
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 91bb97d8bdbf..9b6ac522a71a 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -37,6 +37,7 @@
 #include <linux/string.h>
 
 #include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/lse.h>
@@ -222,8 +223,8 @@ static inline void spin_lock_prefetch(const void *ptr)
 
 #endif
 
-int cpu_enable_pan(void *__unused);
-int cpu_enable_cache_maint_trap(void *__unused);
+void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
+void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 3d6d7fae45de..3c2a68d766a2 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -61,11 +61,11 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
 	       (arm64_ftr_reg_ctrel0.sys_val & mask);
 }
 
-static int cpu_enable_trap_ctr_access(void *__unused)
+static void
+cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 {
 	/* Clear SCTLR_EL1.UCT */
 	config_sctlr_el1(SCTLR_EL1_UCT, 0);
-	return 0;
 }
 
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@@ -169,25 +169,25 @@ static void call_hvc_arch_workaround_1(void)
 	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 }
 
-static int enable_smccc_arch_workaround_1(void *data)
+static void
+enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
 {
-	const struct arm64_cpu_capabilities *entry = data;
 	bp_hardening_cb_t cb;
 	void *smccc_start, *smccc_end;
 	struct arm_smccc_res res;
 
 	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
-		return 0;
+		return;
 
 	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
-		return 0;
+		return;
 
 	switch (psci_ops.conduit) {
 	case PSCI_CONDUIT_HVC:
 		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 		if ((int)res.a0 < 0)
-			return 0;
+			return;
 		cb = call_hvc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_hvc_start;
 		smccc_end = __smccc_workaround_1_hvc_end;
@@ -197,19 +197,19 @@ static int enable_smccc_arch_workaround_1(void *data)
 		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 		if ((int)res.a0 < 0)
-			return 0;
+			return;
 		cb = call_smc_arch_workaround_1;
 		smccc_start = __smccc_workaround_1_smc_start;
 		smccc_end = __smccc_workaround_1_smc_end;
 		break;
 
 	default:
-		return 0;
+		return;
 	}
 
 	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
 
-	return 0;
+	return;
 }
 
 static void qcom_link_stack_sanitization(void)
@@ -224,15 +224,12 @@ static void qcom_link_stack_sanitization(void)
 		     : "=&r" (tmp));
 }
 
-static int qcom_enable_link_stack_sanitization(void *data)
+static void
+qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
 {
-	const struct arm64_cpu_capabilities *entry = data;
-
 	install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
 				__qcom_hyp_sanitize_link_stack_start,
 				__qcom_hyp_sanitize_link_stack_end);
-
-	return 0;
 }
 #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
@@ -431,7 +428,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.desc = "ARM errata 826319, 827319, 824069",
 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
 		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
-		.enable = cpu_enable_cache_maint_trap,
+		.cpu_enable = cpu_enable_cache_maint_trap,
 	},
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_819472
@@ -440,7 +437,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.desc = "ARM errata 819472",
 		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
 		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
-		.enable = cpu_enable_cache_maint_trap,
+		.cpu_enable = cpu_enable_cache_maint_trap,
 	},
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_832075
@@ -521,14 +518,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
 		.matches = has_mismatched_cache_type,
 		.def_scope = SCOPE_LOCAL_CPU,
-		.enable = cpu_enable_trap_ctr_access,
+		.cpu_enable = cpu_enable_trap_ctr_access,
 	},
 	{
 		.desc = "Mismatched cache type",
 		.capability = ARM64_MISMATCHED_CACHE_TYPE,
 		.matches = has_mismatched_cache_type,
 		.def_scope = SCOPE_LOCAL_CPU,
-		.enable = cpu_enable_trap_ctr_access,
+		.cpu_enable = cpu_enable_trap_ctr_access,
 	},
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 	{
@@ -567,27 +564,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
-		.enable = qcom_enable_link_stack_sanitization,
+		.cpu_enable = qcom_enable_link_stack_sanitization,
 	},
 	{
 		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
@@ -596,7 +593,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
-		.enable = qcom_enable_link_stack_sanitization,
+		.cpu_enable = qcom_enable_link_stack_sanitization,
 	},
 	{
 		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
@@ -605,12 +602,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 	{
 		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
-		.enable = enable_smccc_arch_workaround_1,
+		.cpu_enable = enable_smccc_arch_workaround_1,
 	},
 #endif
 #ifdef CONFIG_ARM64_SSBD
@@ -636,8 +633,8 @@ void verify_local_cpu_errata_workarounds(void)
 
 	for (; caps->matches; caps++) {
 		if (cpus_have_cap(caps->capability)) {
-			if (caps->enable)
-				caps->enable((void *)caps);
+			if (caps->cpu_enable)
+				caps->cpu_enable(caps);
 		} else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
 			pr_crit("CPU%d: Requires work around for %s, not detected"
 					" at boot time\n",
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d3a93e23a87c..17aa34d70771 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -859,7 +859,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
 						     ID_AA64PFR0_CSV3_SHIFT);
 }
 
-static int kpti_install_ng_mappings(void *__unused)
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
 {
 	typedef void (kpti_remap_fn)(int, int, phys_addr_t);
 	extern kpti_remap_fn idmap_kpti_install_ng_mappings;
@@ -869,7 +870,7 @@ static int kpti_install_ng_mappings(void *__unused)
 	int cpu = smp_processor_id();
 
 	if (kpti_applied)
-		return 0;
+		return;
 
 	remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
 
@@ -880,7 +881,7 @@ static int kpti_install_ng_mappings(void *__unused)
 	if (!cpu)
 		kpti_applied = true;
 
-	return 0;
+	return;
 }
 
 static int __init parse_kpti(char *str)
@@ -897,7 +898,7 @@ static int __init parse_kpti(char *str)
 early_param("kpti", parse_kpti);
 #endif	/* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
-static int cpu_copy_el2regs(void *__unused)
+static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
 {
 	/*
 	 * Copy register values that aren't redirected by hardware.
@@ -909,8 +910,6 @@ static int cpu_copy_el2regs(void *__unused)
 	 */
 	if (!alternatives_applied)
 		write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
-
-	return 0;
 }
 
 static const struct arm64_cpu_capabilities arm64_features[] = {
@@ -934,7 +933,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.field_pos = ID_AA64MMFR1_PAN_SHIFT,
 		.sign = FTR_UNSIGNED,
 		.min_field_value = 1,
-		.enable = cpu_enable_pan,
+		.cpu_enable = cpu_enable_pan,
 	},
 #endif /* CONFIG_ARM64_PAN */
 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
@@ -982,7 +981,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.capability = ARM64_HAS_VIRT_HOST_EXTN,
 		.def_scope = SCOPE_SYSTEM,
 		.matches = runs_at_el2,
-		.enable = cpu_copy_el2regs,
+		.cpu_enable = cpu_copy_el2regs,
 	},
 	{
 		.desc = "32-bit EL0 Support",
@@ -1006,7 +1005,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
 		.def_scope = SCOPE_SYSTEM,
 		.matches = unmap_kernel_at_el0,
-		.enable = kpti_install_ng_mappings,
+		.cpu_enable = kpti_install_ng_mappings,
 	},
 #endif
 	{
@@ -1169,6 +1168,14 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 	}
 }
 
+static int __enable_cpu_capability(void *arg)
+{
+	const struct arm64_cpu_capabilities *cap = arg;
+
+	cap->cpu_enable(cap);
+	return 0;
+}
+
 /*
  * Run through the enabled capabilities and enable() it on all active
  * CPUs
@@ -1184,14 +1191,15 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 		/* Ensure cpus_have_const_cap(num) works */
 		static_branch_enable(&cpu_hwcap_keys[num]);
 
-		if (caps->enable) {
+		if (caps->cpu_enable) {
 			/*
 			 * Use stop_machine() as it schedules the work allowing
 			 * us to modify PSTATE, instead of on_each_cpu() which
 			 * uses an IPI, giving us a PSTATE that disappears when
 			 * we return.
 			 */
-			stop_machine(caps->enable, (void *)caps, cpu_online_mask);
+			stop_machine(__enable_cpu_capability, (void *)caps,
+				     cpu_online_mask);
 		}
 	}
 }
@@ -1249,8 +1257,8 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
 					smp_processor_id(), caps->desc);
 			cpu_die_early();
 		}
-		if (caps->enable)
-			caps->enable((void *)caps);
+		if (caps->cpu_enable)
+			caps->cpu_enable(caps);
 	}
 }
 
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5d547deb6996..f4fdf6420ac5 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -28,6 +28,7 @@
 #include <linux/signal.h>
 
 #include <asm/fpsimd.h>
+#include <asm/cpufeature.h>
 #include <asm/cputype.h>
 #include <asm/simd.h>
 
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 74259ae9c7f2..a4e49e947684 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
 
 #include <asm/atomic.h>
 #include <asm/bug.h>
+#include <asm/cpufeature.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/insn.h>
@@ -436,10 +437,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 	force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
 }
 
-int cpu_enable_cache_maint_trap(void *__unused)
+void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
 {
 	config_sctlr_el1(SCTLR_EL1_UCI, 0);
-	return 0;
 }
 
 #define __user_cache_maint(insn, address, res)			\
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 465b90d7abf2..bf7c285d0c82 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -875,7 +875,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
 NOKPROBE_SYMBOL(do_debug_exception);
 
 #ifdef CONFIG_ARM64_PAN
-int cpu_enable_pan(void *__unused)
+void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
 {
 	/*
 	 * We modify PSTATE. This won't work from irq context as the PSTATE
@@ -885,6 +885,5 @@ int cpu_enable_pan(void *__unused)
 
 	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
 	asm(SET_PSTATE_PAN(1));
-	return 0;
 }
 #endif /* CONFIG_ARM64_PAN */
-- 
2.20.1


  parent reply	other threads:[~2019-10-24 12:49 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-24 12:47 [PATCH for-stable-4.14 00/48] arm64 spec mitigation backports Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 01/48] arm64: sysreg: Move to use definitions for all the SCTLR bits Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 02/48] arm64: Expose support for optional ARMv8-A features Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 03/48] arm64: Fix the feature type for ID register fields Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 04/48] arm64: v8.4: Support for new floating point multiplication instructions Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 05/48] arm64: Documentation: cpu-feature-registers: Remove RES0 fields Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 06/48] arm64: Expose Arm v8.4 features Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 07/48] arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h> Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 08/48] arm64: add PSR_AA32_* definitions Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 09/48] arm64: Introduce sysreg_clear_set() Ard Biesheuvel
2019-10-24 12:47 ` Ard Biesheuvel [this message]
2019-10-24 12:47 ` [PATCH for-stable-4.14 11/48] arm64: capabilities: Move errata work around check on boot CPU Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 12/48] arm64: capabilities: Move errata processing code Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 13/48] arm64: capabilities: Prepare for fine grained capabilities Ard Biesheuvel
2019-10-24 12:47 ` [PATCH for-stable-4.14 14/48] arm64: capabilities: Add flags to handle the conflicts on late CPU Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 15/48] arm64: capabilities: Unify the verification Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 16/48] arm64: capabilities: Filter the entries based on a given mask Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 17/48] arm64: capabilities: Prepare for grouping features and errata work arounds Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 18/48] arm64: capabilities: Split the processing of " Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 19/48] arm64: capabilities: Allow features based on local CPU scope Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 20/48] arm64: capabilities: Group handling of features and errata workarounds Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 21/48] arm64: capabilities: Introduce weak features based on local CPU Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 22/48] arm64: capabilities: Restrict KPTI detection to boot-time CPUs Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 23/48] arm64: capabilities: Add support for features enabled early Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 24/48] arm64: capabilities: Change scope of VHE to Boot CPU feature Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 25/48] arm64: capabilities: Clean up midr range helpers Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 26/48] arm64: Add helpers for checking CPU MIDR against a range Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 27/48] arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35 Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 28/48] arm64: capabilities: Add support for checks based on a list of MIDRs Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 29/48] arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 30/48] arm64: don't zero DIT on signal return Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 31/48] arm64: Get rid of __smccc_workaround_1_hvc_* Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 32/48] arm64: cpufeature: Detect SSBS and advertise to userspace Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 33/48] arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3 Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 34/48] KVM: arm64: Set SCTLR_EL2.DSSBS if SSBD is forcefully disabled and !vhe Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 35/48] arm64: fix SSBS sanitization Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 36/48] arm64: Add sysfs vulnerability show for spectre-v1 Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 37/48] arm64: add sysfs vulnerability show for meltdown Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 38/48] arm64: enable generic CPU vulnerabilites support Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 39/48] arm64: Always enable ssb vulnerability detection Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 40/48] arm64: Provide a command line to disable spectre_v2 mitigation Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 41/48] arm64: Advertise mitigation of Spectre-v2, or lack thereof Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 42/48] arm64: Always enable spectre-v2 vulnerability detection Ard Biesheuvel
2019-10-24 14:34   ` Alexandru Elisei
2019-10-24 14:37     ` Ard Biesheuvel
2019-10-25 15:25       ` Sasha Levin
2019-10-25 15:28         ` Ard Biesheuvel
2019-10-25 15:39           ` Ard Biesheuvel
2019-10-26  8:01             ` Greg KH
2019-10-26 15:40               ` Sasha Levin
2019-10-26 15:46                 ` Ard Biesheuvel
2019-10-27 13:39                   ` Greg KH
2019-10-27 17:39                     ` Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 43/48] arm64: add sysfs vulnerability show for spectre-v2 Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 44/48] arm64: add sysfs vulnerability show for speculative store bypass Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 45/48] arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 46/48] arm64: Force SSBS on context switch Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 47/48] arm64: Use firmware to detect CPUs that are not affected by Spectre-v2 Ard Biesheuvel
2019-10-24 12:48 ` [PATCH for-stable-4.14 48/48] arm64/speculation: Support 'mitigations=' cmdline option Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191024124833.4158-11-ard.biesheuvel@linaro.org \
    --to=ard.biesheuvel@linaro.org \
    --cc=alexandru.elisei@arm.com \
    --cc=andre.przywara@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=dave.martin@arm.com \
    --cc=james.morse@arm.com \
    --cc=jeremy.linton@arm.com \
    --cc=julien.thierry@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=stable@vger.kernel.org \
    --cc=suzuki.poulose@arm.com \
    --cc=will.deacon@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.