linux-next.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stephen Rothwell <sfr@canb.auug.org.au>
To: Christoffer Dall <cdall@cs.columbia.edu>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>
Cc: Linux-Next Mailing List <linux-next@vger.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Shanker Donthineni <shankerd@codeaurora.org>,
	Dave Martin <dave.martin@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>
Subject: linux-next: manual merge of the kvm-arm tree with the arm64 tree
Date: Wed, 28 Mar 2018 16:00:34 +1100	[thread overview]
Message-ID: <20180328160034.61e0e588@canb.auug.org.au> (raw)

[-- Attachment #1: Type: text/plain, Size: 9316 bytes --]

Hi all,

Today's linux-next merge of the kvm-arm tree got a conflict in:

  arch/arm64/kernel/cpu_errata.c

between commit:

  c0cda3b8ee6b ("arm64: capabilities: Update prototype for enable call back")
  followed by a series of patches cleaning up capabilities

from the arm64 tree and commits:

  4b472ffd1513 ("arm64: Enable ARM64_HARDEN_EL2_VECTORS on Cortex-A57 and A72")
  f9f5dc19509b ("arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening")

from the kvm-arm tree.

I fixed it up (maybe, please check the result and see below) and can
carry the fix as necessary. This is now fixed as far as linux-next is
concerned, but any non trivial conflicts should be mentioned to your
upstream maintainer when your tree is submitted for merging.  You may
also want to consider cooperating with the maintainer of the conflicting
tree to minimise any particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/arm64/kernel/cpu_errata.c
index 2df792771053,caa73af7d26e..000000000000
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@@ -76,8 -57,11 +76,10 @@@ cpu_enable_trap_ctr_access(const struc
  {
  	/* Clear SCTLR_EL1.UCT */
  	config_sctlr_el1(SCTLR_EL1_UCT, 0);
 -	return 0;
  }
  
+ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
+ 
  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  #include <asm/mmu_context.h>
  #include <asm/cacheflush.h>
@@@ -179,18 -156,31 +174,31 @@@ static void call_hvc_arch_workaround_1(
  	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
  }
  
+ static void qcom_link_stack_sanitization(void)
+ {
+ 	u64 tmp;
+ 
+ 	asm volatile("mov	%0, x30		\n"
+ 		     ".rept	16		\n"
+ 		     "bl	. + 4		\n"
+ 		     ".endr			\n"
+ 		     "mov	x30, %0		\n"
+ 		     : "=&r" (tmp));
+ }
+ 
 -static int enable_smccc_arch_workaround_1(void *data)
 +static void
 +enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
  {
 -	const struct arm64_cpu_capabilities *entry = data;
  	bp_hardening_cb_t cb;
  	void *smccc_start, *smccc_end;
  	struct arm_smccc_res res;
+ 	u32 midr = read_cpuid_id();
  
  	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
 -		return 0;
 +		return;
  
  	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
 -		return 0;
 +		return;
  
  	switch (psci_ops.conduit) {
  	case PSCI_CONDUIT_HVC:
@@@ -214,139 -204,33 +222,124 @@@
  		break;
  
  	default:
 -		return 0;
 +		return;
  	}
  
+ 	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
+ 	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
+ 		cb = qcom_link_stack_sanitization;
+ 
  	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
  
 -	return 0;
 +	return;
  }
  
- static void qcom_link_stack_sanitization(void)
- {
- 	u64 tmp;
- 
- 	asm volatile("mov	%0, x30		\n"
- 		     ".rept	16		\n"
- 		     "bl	. + 4		\n"
- 		     ".endr			\n"
- 		     "mov	x30, %0		\n"
- 		     : "=&r" (tmp));
- }
- 
- static void
- qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
- {
- 	install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
- 				__qcom_hyp_sanitize_link_stack_start,
- 				__qcom_hyp_sanitize_link_stack_end);
- }
  #endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
  
 -#define MIDR_RANGE(model, min, max) \
 -	.def_scope = SCOPE_LOCAL_CPU, \
 -	.matches = is_affected_midr_range, \
 -	.midr_model = model, \
 -	.midr_range_min = min, \
 -	.midr_range_max = max
 +#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
 +	.matches = is_affected_midr_range,			\
 +	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_ALL_VERSIONS(model)					\
 +	.matches = is_affected_midr_range,				\
 +	.midr_range = MIDR_ALL_VERSIONS(model)
 +
 +#define MIDR_FIXED(rev, revidr_mask) \
 +	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
 +
 +#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
 +	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
 +
 +#define CAP_MIDR_RANGE_LIST(list)				\
 +	.matches = is_affected_midr_range_list,			\
 +	.midr_range_list = list
 +
 +/* Errata affecting a range of revisions of  given model variant */
 +#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
 +	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
 +
 +/* Errata affecting a single variant/revision of a model */
 +#define ERRATA_MIDR_REV(model, var, rev)	\
 +	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
 +
 +/* Errata affecting all variants/revisions of a given a model */
 +#define ERRATA_MIDR_ALL_VERSIONS(model)				\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 +	CAP_MIDR_ALL_VERSIONS(model)
 +
 +/* Errata affecting a list of midr ranges, with same work around */
 +#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
 +	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
 +	CAP_MIDR_RANGE_LIST(midr_list)
 +
 +/*
 + * Generic helper for handling capabilties with multiple (match,enable) pairs
 + * of call backs, sharing the same capability bit.
 + * Iterate over each entry to see if at least one matches.
 + */
 +static bool __maybe_unused
 +multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
 +{
 +	const struct arm64_cpu_capabilities *caps;
 +
 +	for (caps = entry->match_list; caps->matches; caps++)
 +		if (caps->matches(caps, scope))
 +			return true;
 +
 +	return false;
 +}
 +
 +/*
 + * Take appropriate action for all matching entries in the shared capability
 + * entry.
 + */
 +static void __maybe_unused
 +multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
 +{
 +	const struct arm64_cpu_capabilities *caps;
 +
 +	for (caps = entry->match_list; caps->matches; caps++)
 +		if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
 +		    caps->cpu_enable)
 +			caps->cpu_enable(caps);
 +}
 +
 +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 +
 +/*
 + * List of CPUs where we need to issue a psci call to
 + * harden the branch predictor.
 + */
 +static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 +	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 +	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 +	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 +	{},
 +};
 +
 +static const struct midr_range qcom_bp_harden_cpus[] = {
 +	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 +	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 +	{},
 +};
 +
 +static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
 +	{
 +		CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
 +		.cpu_enable = enable_smccc_arch_workaround_1,
 +	},
 +	{
 +		CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
 +		.cpu_enable = qcom_enable_link_stack_sanitization,
 +	},
 +	{},
 +};
  
 -#define MIDR_ALL_VERSIONS(model) \
 -	.def_scope = SCOPE_LOCAL_CPU, \
 -	.matches = is_affected_midr_range, \
 -	.midr_model = model, \
 -	.midr_range_min = 0, \
 -	.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
 +#endif
  
  const struct arm64_cpu_capabilities arm64_errata[] = {
  #if	defined(CONFIG_ARM64_ERRATUM_826319) || \
@@@ -491,15 -369,56 +484,27 @@@
  #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
  	{
  		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
 -		.enable = enable_smccc_arch_workaround_1,
 +		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
 +		.matches = multi_entry_cap_matches,
 +		.cpu_enable = multi_entry_cap_cpu_enable,
 +		.match_list = arm64_bp_harden_list,
  	},
  	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
 -		.enable = enable_smccc_arch_workaround_1,
 -	},
 -	{
 -		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 -		MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
 -		.enable = enable_smccc_arch_workaround_1,
 +		.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
 +		ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
  	},
+ #endif
+ #ifdef CONFIG_HARDEN_EL2_VECTORS
+ 	{
+ 		.desc = "Cortex-A57 EL2 vector hardening",
+ 		.capability = ARM64_HARDEN_EL2_VECTORS,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ 	},
+ 	{
+ 		.desc = "Cortex-A72 EL2 vector hardening",
+ 		.capability = ARM64_HARDEN_EL2_VECTORS,
 -		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
++		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ 	},
  #endif
  	{
  	}

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

             reply	other threads:[~2018-03-28  5:00 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-28  5:00 Stephen Rothwell [this message]
2018-03-28 11:53 ` linux-next: manual merge of the kvm-arm tree with the arm64 tree Will Deacon
  -- strict thread matches above, loose matches on Subject: below --
2024-03-08  1:54 Stephen Rothwell
2024-03-08  6:25 ` Oliver Upton
2024-03-08 12:30   ` Catalin Marinas
2024-03-08  1:47 Stephen Rothwell
2024-02-20  2:21 Stephen Rothwell
2024-02-19  2:58 Stephen Rothwell
2024-02-19 12:14 ` Catalin Marinas
2024-02-19 15:22   ` Marc Zyngier
2024-02-19 15:35     ` Mark Rutland
2024-02-19 15:43     ` Ard Biesheuvel
2024-02-19 16:49     ` Catalin Marinas
2023-10-24  2:28 Stephen Rothwell
2023-10-24  6:49 ` Oliver Upton
2023-10-17  1:30 Stephen Rothwell
2023-10-17 11:13 ` Catalin Marinas
2023-10-18 23:02   ` Oliver Upton
2023-11-01  2:36 ` Stephen Rothwell
2023-06-15  2:45 Stephen Rothwell
2023-06-15  7:37 ` Oliver Upton
2023-06-15  8:32   ` Catalin Marinas
2023-06-15  2:22 Stephen Rothwell
2023-06-15  7:14 ` Catalin Marinas
2023-07-03  0:50 ` Stephen Rothwell
2023-06-06  1:49 Stephen Rothwell
2023-06-06  9:20 ` Catalin Marinas
2023-06-07  1:05 ` Stephen Rothwell
2023-06-07  5:33   ` Oliver Upton
2023-06-07  8:45     ` Catalin Marinas
2023-02-06  1:44 Stephen Rothwell
2023-02-06  4:21 ` Stephen Rothwell
2023-02-06  8:37 ` Marc Zyngier
2023-02-06  8:43   ` Marc Zyngier
2022-09-19  4:05 Stephen Rothwell
2022-09-19  9:04 ` Marc Zyngier
2022-09-23 10:26   ` Catalin Marinas
2022-05-04  4:35 Stephen Rothwell
2022-05-04  7:06 ` Marc Zyngier
2022-05-04  8:08   ` Catalin Marinas
2022-05-23  6:36 ` Stephen Rothwell
2021-08-19  4:05 Stephen Rothwell
2021-08-20  9:27 ` Catalin Marinas
2021-08-12  3:33 Stephen Rothwell
2021-06-23  5:58 Stephen Rothwell
2021-04-13  5:43 Stephen Rothwell
2021-04-13 11:21 ` Ard Biesheuvel
2021-01-27  5:24 Stephen Rothwell
2020-12-04  5:44 Stephen Rothwell
2020-12-04 11:12 ` Marc Zyngier
2020-12-04  5:17 Stephen Rothwell
2020-09-30  6:26 Stephen Rothwell
2020-05-29  7:00 Stephen Rothwell
2019-07-08  7:24 Stephen Rothwell
2018-10-04  4:22 Stephen Rothwell
2018-10-04  4:07 Stephen Rothwell
2018-07-23  4:46 Stephen Rothwell
2018-07-23 10:45 ` Marc Zyngier
2018-08-16  0:15 ` Stephen Rothwell
2018-08-17  8:32   ` Paolo Bonzini
2018-08-17  9:33     ` Marc Zyngier
2018-06-01  6:23 Stephen Rothwell
2018-06-01  8:23 ` Marc Zyngier
2018-06-01  6:13 Stephen Rothwell
2018-03-28  5:05 Stephen Rothwell
2018-03-29  5:16 ` Stephen Rothwell
2017-08-25  4:57 Stephen Rothwell
2017-08-25  8:11 ` Marc Zyngier
2017-08-25  8:44   ` Christoffer Dall
2016-02-29  5:18 Stephen Rothwell
2016-02-24  2:38 Stephen Rothwell
2016-02-22  2:33 Stephen Rothwell
2016-02-22  9:26 ` Catalin Marinas
2016-02-22  2:28 Stephen Rothwell
2016-02-22  9:24 ` Catalin Marinas
2016-02-12  2:26 Stephen Rothwell
2015-01-22  5:07 Stephen Rothwell
2015-01-22  8:51 ` Marc Zyngier
2015-01-22 10:29   ` Mark Rutland
2015-01-22 23:05     ` Stephen Rothwell
2015-01-23  1:36   ` Wei Huang
2015-01-23 11:53   ` Christoffer Dall
2015-01-22  5:06 Stephen Rothwell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180328160034.61e0e588@canb.auug.org.au \
    --to=sfr@canb.auug.org.au \
    --cc=catalin.marinas@arm.com \
    --cc=cdall@cs.columbia.edu \
    --cc=dave.martin@arm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=shankerd@codeaurora.org \
    --cc=suzuki.poulose@arm.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).