All of lore.kernel.org
 help / color / mirror / Atom feed
From: Viresh Kumar <viresh.kumar@linaro.org>
To: stable@vger.kernel.org, Julien Thierry <Julien.Thierry@arm.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	mark.brown@arm.com
Subject: [PATCH v4.4 V2 40/43] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
Date: Fri, 12 Jul 2019 10:58:28 +0530	[thread overview]
Message-ID: <ed53e9e0d2f7eb7fb59f7fd78dc455e7eb4bb106.1562908075.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1562908074.git.viresh.kumar@linaro.org>

From: Marc Zyngier <marc.zyngier@arm.com>

commit f2d3b2e8759a5833df6f022e42df2d581e6d843c upstream.

One of the major improvement of SMCCC v1.1 is that it only clobbers
the first 4 registers, both on 32 and 64bit. This means that it
becomes very easy to provide an inline version of the SMC call
primitive, and avoid performing a function call to stash the
registers that would otherwise be clobbered by SMCCC v1.0.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 include/linux/arm-smccc.h | 141 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 141 insertions(+)

diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c45fd75db5d..60c2ad6316d8 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -122,5 +122,146 @@ asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
 			unsigned long a5, unsigned long a6, unsigned long a7,
 			struct arm_smccc_res *res);
 
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST	"smc	#0"
+#define SMCCC_HVC_INST	"hvc	#0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST	__SMC(0)
+#define SMCCC_HVC_INST	__HVC(0)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...)						\
+	___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0						\
+	"+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1						\
+	"+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4	__constraint_write_3
+#define __constraint_write_5	__constraint_write_4
+#define __constraint_write_6	__constraint_write_5
+#define __constraint_write_7	__constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4	"r" (r4)
+#define __constraint_read_5	__constraint_read_4, "r" (r5)
+#define __constraint_read_6	__constraint_read_5, "r" (r6)
+#define __constraint_read_7	__constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register unsigned long r1 asm("r1");				\
+	register unsigned long r2 asm("r2");				\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_1(a0, a1, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register unsigned long r2 asm("r2");				\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_2(a0, a1, a2, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register typeof(a2)    r2 asm("r2") = a2;			\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_3(a0, a1, a2, a3, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register typeof(a2)    r2 asm("r2") = a2;			\
+	register typeof(a3)    r3 asm("r3") = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res)			\
+	__declare_arg_3(a0, a1, a2, a3, res);				\
+	register typeof(a4) r4 asm("r4") = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)			\
+	__declare_arg_4(a0, a1, a2, a3, a4, res);			\
+	register typeof(a5) r5 asm("r5") = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)		\
+	__declare_arg_5(a0, a1, a2, a3, a4, a5, res);			\
+	register typeof(a6) r6 asm("r6") = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)		\
+	__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);		\
+	register typeof(a7) r7 asm("r7") = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count)						\
+	: __constraint_write_ ## count					\
+	: __constraint_read_ ## count					\
+	: "memory"
+#define __constraints(count)	___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...)					\
+	do {								\
+		__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__);	\
+		asm volatile(inst "\n"					\
+			     __constraints(__count_args(__VA_ARGS__)));	\
+		if (___res)						\
+			*___res = (typeof(*___res)){r0, r1, r2, r3};	\
+	} while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...)	__arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...)	__arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
 #endif /*__ASSEMBLY__*/
 #endif /*__LINUX_ARM_SMCCC_H*/
-- 
2.21.0.rc0.269.g1a574e7a288b


WARNING: multiple messages have this Message-ID (diff)
From: Viresh Kumar <viresh.kumar@linaro.org>
To: stable@vger.kernel.org, Julien Thierry <Julien.Thierry@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Will Deacon <will.deacon@arm.com>,
	mark.brown@arm.com, Catalin Marinas <catalin.marinas@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH v4.4 V2 40/43] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
Date: Fri, 12 Jul 2019 10:58:28 +0530	[thread overview]
Message-ID: <ed53e9e0d2f7eb7fb59f7fd78dc455e7eb4bb106.1562908075.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1562908074.git.viresh.kumar@linaro.org>

From: Marc Zyngier <marc.zyngier@arm.com>

commit f2d3b2e8759a5833df6f022e42df2d581e6d843c upstream.

One of the major improvement of SMCCC v1.1 is that it only clobbers
the first 4 registers, both on 32 and 64bit. This means that it
becomes very easy to provide an inline version of the SMC call
primitive, and avoid performing a function call to stash the
registers that would otherwise be clobbered by SMCCC v1.0.

Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 include/linux/arm-smccc.h | 141 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 141 insertions(+)

diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c45fd75db5d..60c2ad6316d8 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -122,5 +122,146 @@ asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
 			unsigned long a5, unsigned long a6, unsigned long a7,
 			struct arm_smccc_res *res);
 
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST	"smc	#0"
+#define SMCCC_HVC_INST	"hvc	#0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST	__SMC(0)
+#define SMCCC_HVC_INST	__HVC(0)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...)						\
+	___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0						\
+	"+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1						\
+	"+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3						\
+	"+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4	__constraint_write_3
+#define __constraint_write_5	__constraint_write_4
+#define __constraint_write_6	__constraint_write_5
+#define __constraint_write_7	__constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4	"r" (r4)
+#define __constraint_read_5	__constraint_read_4, "r" (r5)
+#define __constraint_read_6	__constraint_read_5, "r" (r6)
+#define __constraint_read_7	__constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register unsigned long r1 asm("r1");				\
+	register unsigned long r2 asm("r2");				\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_1(a0, a1, res)					\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register unsigned long r2 asm("r2");				\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_2(a0, a1, a2, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register typeof(a2)    r2 asm("r2") = a2;			\
+	register unsigned long r3 asm("r3")
+
+#define __declare_arg_3(a0, a1, a2, a3, res)				\
+	struct arm_smccc_res   *___res = res;				\
+	register u32           r0 asm("r0") = a0;			\
+	register typeof(a1)    r1 asm("r1") = a1;			\
+	register typeof(a2)    r2 asm("r2") = a2;			\
+	register typeof(a3)    r3 asm("r3") = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res)			\
+	__declare_arg_3(a0, a1, a2, a3, res);				\
+	register typeof(a4) r4 asm("r4") = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)			\
+	__declare_arg_4(a0, a1, a2, a3, a4, res);			\
+	register typeof(a5) r5 asm("r5") = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)		\
+	__declare_arg_5(a0, a1, a2, a3, a4, a5, res);			\
+	register typeof(a6) r6 asm("r6") = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)		\
+	__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);		\
+	register typeof(a7) r7 asm("r7") = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count)						\
+	: __constraint_write_ ## count					\
+	: __constraint_read_ ## count					\
+	: "memory"
+#define __constraints(count)	___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...)					\
+	do {								\
+		__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__);	\
+		asm volatile(inst "\n"					\
+			     __constraints(__count_args(__VA_ARGS__)));	\
+		if (___res)						\
+			*___res = (typeof(*___res)){r0, r1, r2, r3};	\
+	} while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...)	__arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...)	__arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
 #endif /*__ASSEMBLY__*/
 #endif /*__LINUX_ARM_SMCCC_H*/
-- 
2.21.0.rc0.269.g1a574e7a288b


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-07-12  5:30 UTC|newest]

Thread overview: 136+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-12  5:27 [PATCH v4.4 V2 00/43] V4.4 backport of arm64 Spectre patches Viresh Kumar
2019-07-12  5:27 ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 01/43] arm64: barrier: Add CSDB macros to control data-value prediction Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 02/43] arm64: Implement array_index_mask_nospec() Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 03/43] arm64: move TASK_* definitions to <asm/processor.h> Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 04/43] arm64: Make USER_DS an inclusive limit Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 05/43] arm64: Use pointer masking to limit uaccess speculation Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 06/43] arm64: entry: Ensure branch through syscall table is bounded under speculation Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 07/43] arm64: uaccess: Prevent speculative use of the current addr_limit Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 08/43] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 09/43] mm/kasan: add API to check memory regions Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 10/43] arm64: kasan: instrument user memory access API Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-12  5:27 ` [PATCH v4.4 V2 11/43] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Viresh Kumar
2019-07-12  5:27   ` Viresh Kumar
2019-07-31 12:37   ` Mark Rutland
2019-07-31 12:37     ` Mark Rutland
2019-08-01  3:38     ` Viresh Kumar
2019-08-01  3:38       ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 12/43] arm64: cpufeature: Test 'matches' pointer to find the end of the list Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 13/43] arm64: cpufeature: Add scope for capability check Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 14/43] arm64: Introduce cpu_die_early Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 15/43] arm64: Move cpu_die_early to smp.c Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-31 12:35   ` Mark Rutland
2019-07-31 12:35     ` Mark Rutland
2019-08-01  3:35     ` Viresh Kumar
2019-08-01  3:35       ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 16/43] arm64: Verify CPU errata work arounds on hotplugged CPU Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 17/43] arm64: errata: Calling enable functions for CPU errata too Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 18/43] arm64: Rearrange CPU errata workaround checks Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 19/43] arm64: Run enable method for errata work arounds on late CPUs Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 20/43] arm64: cpufeature: Pass capability structure to ->enable callback Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 21/43] drivers/firmware: Expose psci_get_version through psci_ops structure Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 22/43] arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 23/43] arm64: Move post_ttbr_update_workaround to C code Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 24/43] arm64: Add skeleton to harden the branch predictor against aliasing attacks Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-31 16:45   ` Mark Rutland
2019-07-31 16:45     ` Mark Rutland
2019-08-01  5:20     ` Viresh Kumar
2019-08-01  5:20       ` Viresh Kumar
2019-08-06 12:18       ` Mark Rutland
2019-08-06 12:18         ` Mark Rutland
2019-08-08 12:06         ` Viresh Kumar
2019-08-08 12:06           ` Viresh Kumar
2019-08-28 10:23           ` Viresh Kumar
2019-08-28 10:23             ` Viresh Kumar
2019-08-28 16:08           ` Mark Rutland
2019-08-28 16:08             ` Mark Rutland
2019-07-12  5:28 ` [PATCH v4.4 V2 25/43] arm64: Move BP hardening to check_and_switch_context Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-31 13:09   ` Julien Thierry
2019-07-31 13:09     ` Julien Thierry
2019-08-01  5:09     ` Viresh Kumar
2019-08-01  5:09       ` Viresh Kumar
2019-08-01  6:30       ` Julien Thierry
2019-08-01  6:30         ` Julien Thierry
2019-08-01  6:35         ` Viresh Kumar
2019-08-01  6:35           ` Viresh Kumar
2019-08-01  6:57           ` Greg KH
2019-08-01  6:57             ` Greg KH
2019-08-01  7:05             ` Viresh Kumar
2019-08-01  7:05               ` Viresh Kumar
2019-08-01  7:34               ` Will Deacon
2019-08-01  7:34                 ` Will Deacon
2019-08-01  7:41                 ` Viresh Kumar
2019-08-01  7:41                   ` Viresh Kumar
2019-08-01  8:43                 ` Greg KH
2019-08-01  8:43                   ` Greg KH
2019-08-01  8:49                   ` Julien Thierry
2019-08-01  8:49                     ` Julien Thierry
2019-07-12  5:28 ` [PATCH v4.4 V2 26/43] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 27/43] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 28/43] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 29/43] arm64: cpu_errata: Allow an erratum to be match for all revisions of a core Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 30/43] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 31/43] arm64: cputype info for Broadcom Vulcan Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 32/43] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 33/43] arm64: Branch predictor hardening for Cavium ThunderX2 Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 34/43] ARM: 8478/2: arm/arm64: add arm-smccc Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 35/43] arm/arm64: KVM: Advertise SMCCC v1.1 Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 36/43] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 37/43] firmware/psci: Expose PSCI conduit Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 38/43] firmware/psci: Expose SMCCC version through psci_ops Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 39/43] arm/arm64: smccc: Make function identifiers an unsigned quantity Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` Viresh Kumar [this message]
2019-07-12  5:28   ` [PATCH v4.4 V2 40/43] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 41/43] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 42/43] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-12  5:28 ` [PATCH v4.4 V2 43/43] arm64: futex: Mask __user pointers prior to dereference Viresh Kumar
2019-07-12  5:28   ` Viresh Kumar
2019-07-15 13:09 ` [PATCH v4.4 V2 00/43] V4.4 backport of arm64 Spectre patches Mark Rutland
2019-07-15 13:09   ` Mark Rutland
2019-07-16  3:44   ` Viresh Kumar
2019-07-16  3:44     ` Viresh Kumar
2019-07-31  2:52 ` Viresh Kumar
2019-07-31  2:52   ` Viresh Kumar
2019-07-31 17:02   ` Mark Rutland
2019-07-31 17:02     ` Mark Rutland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ed53e9e0d2f7eb7fb59f7fd78dc455e7eb4bb106.1562908075.git.viresh.kumar@linaro.org \
    --to=viresh.kumar@linaro.org \
    --cc=Julien.Thierry@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.brown@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=rmk+kernel@arm.linux.org.uk \
    --cc=stable@vger.kernel.org \
    --cc=vincent.guittot@linaro.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.