All of lore.kernel.org
 help / color / mirror / Atom feed
From: Viresh Kumar <viresh.kumar@linaro.org>
To: stable@vger.kernel.org, Julien Thierry <Julien.Thierry@arm.com>,
	Mark Rutland <mark.rutland@arm.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>,
	linux-arm-kernel@lists.infradead.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	mark.brown@arm.com
Subject: [PATCH ARM64 v4.4 V3 11/44] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
Date: Thu, 29 Aug 2019 17:03:56 +0530	[thread overview]
Message-ID: <821430ff13f625eca9e0a9700ddc161cbc7965ff.1567077734.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1567077734.git.viresh.kumar@linaro.org>

From: Will Deacon <will.deacon@arm.com>

commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ v4.4: fixup for v4.4 style uaccess primitives ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/include/asm/uaccess.h | 18 ++++++++++--------
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/clear_user.S      |  6 +++---
 arch/arm64/lib/copy_in_user.S    |  4 ++--
 4 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 693a0d784534..f2f5a152f372 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -303,19 +303,18 @@ do {									\
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
-	return  __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
-	return  __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -338,22 +337,25 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(__uaccess_mask_ptr(to), n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index c654df05b7d7..abe4e0984dbb 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(clear_page);
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..fc6bb0f83511 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,7 +24,7 @@
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -32,7 +32,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
@@ -57,7 +57,7 @@ USER(9f, strb	wzr, [x0]	)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..0219aa85b3cc 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -67,7 +67,7 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	add	end, x0, x2
@@ -76,7 +76,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-- 
2.21.0.rc0.269.g1a574e7a288b


WARNING: multiple messages have this Message-ID (diff)
From: Viresh Kumar <viresh.kumar@linaro.org>
To: stable@vger.kernel.org, Julien Thierry <Julien.Thierry@arm.com>,
	Mark Rutland <mark.rutland@arm.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Will Deacon <will.deacon@arm.com>,
	mark.brown@arm.com, Catalin Marinas <catalin.marinas@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH ARM64 v4.4 V3 11/44] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
Date: Thu, 29 Aug 2019 17:03:56 +0530	[thread overview]
Message-ID: <821430ff13f625eca9e0a9700ddc161cbc7965ff.1567077734.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1567077734.git.viresh.kumar@linaro.org>

From: Will Deacon <will.deacon@arm.com>

commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ v4.4: fixup for v4.4 style uaccess primitives ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/include/asm/uaccess.h | 18 ++++++++++--------
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/clear_user.S      |  6 +++---
 arch/arm64/lib/copy_in_user.S    |  4 ++--
 4 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 693a0d784534..f2f5a152f372 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -303,19 +303,18 @@ do {									\
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
-	return  __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
-	return  __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -338,22 +337,25 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(__uaccess_mask_ptr(to), n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index c654df05b7d7..abe4e0984dbb 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(clear_page);
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..fc6bb0f83511 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,7 +24,7 @@
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -32,7 +32,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
@@ -57,7 +57,7 @@ USER(9f, strb	wzr, [x0]	)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..0219aa85b3cc 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -67,7 +67,7 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	add	end, x0, x2
@@ -76,7 +76,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-- 
2.21.0.rc0.269.g1a574e7a288b


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-08-29 11:35 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-29 11:33 [PATCH ARM64 v4.4 V3 00/44] V4.4 backport of arm64 Spectre patches Viresh Kumar
2019-08-29 11:33 ` Viresh Kumar
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 01/44] arm64: barrier: Add CSDB macros to control data-value prediction Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:39   ` Mark Rutland
2019-08-30  9:39     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 02/44] arm64: Implement array_index_mask_nospec() Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 03/44] arm64: move TASK_* definitions to <asm/processor.h> Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 04/44] arm64: Make USER_DS an inclusive limit Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 05/44] arm64: Use pointer masking to limit uaccess speculation Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 06/44] arm64: entry: Ensure branch through syscall table is bounded under speculation Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 07/44] arm64: uaccess: Prevent speculative use of the current addr_limit Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:40   ` Mark Rutland
2019-08-30  9:40     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 08/44] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:41   ` Mark Rutland
2019-08-30  9:41     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 09/44] mm/kasan: add API to check memory regions Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:41   ` Mark Rutland
2019-08-30  9:41     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 10/44] arm64: kasan: instrument user memory access API Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-30  9:41   ` Mark Rutland
2019-08-30  9:41     ` Mark Rutland
2019-08-29 11:33 ` Viresh Kumar [this message]
2019-08-29 11:33   ` [PATCH ARM64 v4.4 V3 11/44] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Viresh Kumar
2019-08-30  9:41   ` Mark Rutland
2019-08-30  9:41     ` Mark Rutland
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 12/44] arm64: cpufeature: Test 'matches' pointer to find the end of the list Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-09-02 14:27   ` Mark Rutland
2019-09-02 14:27     ` Mark Rutland
2019-09-05  7:45     ` Viresh Kumar
2019-09-05  7:45       ` Viresh Kumar
2019-09-06 13:49       ` Mark Rutland
2019-09-06 13:49         ` Mark Rutland
2019-09-10  9:35         ` Viresh Kumar
2019-09-10  9:35           ` Viresh Kumar
2019-10-11  6:36         ` Viresh Kumar
2019-10-11  6:36           ` Viresh Kumar
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 13/44] arm64: cpufeature: Add scope for capability check Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-29 11:33 ` [PATCH ARM64 v4.4 V3 14/44] arm64: Introduce cpu_die_early Viresh Kumar
2019-08-29 11:33   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 15/44] arm64: Add a helper for parking CPUs in a loop Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 16/44] arm64: Move cpu_die_early to smp.c Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 17/44] arm64: Verify CPU errata work arounds on hotplugged CPU Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 18/44] arm64: errata: Calling enable functions for CPU errata too Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 19/44] arm64: Rearrange CPU errata workaround checks Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 20/44] arm64: Run enable method for errata work arounds on late CPUs Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 21/44] arm64: cpufeature: Pass capability structure to ->enable callback Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 22/44] drivers/firmware: Expose psci_get_version through psci_ops structure Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 23/44] arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 24/44] arm64: Move post_ttbr_update_workaround to C code Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 25/44] arm64: Add skeleton to harden the branch predictor against aliasing attacks Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 26/44] arm64: Move BP hardening to check_and_switch_context Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 27/44] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 28/44] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 29/44] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 30/44] arm64: cpu_errata: Allow an erratum to be match for all revisions of a core Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 31/44] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 32/44] arm64: cputype info for Broadcom Vulcan Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 33/44] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 34/44] arm64: Branch predictor hardening for Cavium ThunderX2 Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 35/44] ARM: 8478/2: arm/arm64: add arm-smccc Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 36/44] arm/arm64: KVM: Advertise SMCCC v1.1 Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 37/44] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 38/44] firmware/psci: Expose PSCI conduit Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 39/44] firmware/psci: Expose SMCCC version through psci_ops Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 40/44] arm/arm64: smccc: Make function identifiers an unsigned quantity Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 41/44] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 42/44] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 43/44] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-29 11:34 ` [PATCH ARM64 v4.4 V3 44/44] arm64: futex: Mask __user pointers prior to dereference Viresh Kumar
2019-08-29 11:34   ` Viresh Kumar
2019-08-30  9:42   ` Mark Rutland
2019-08-30  9:42     ` Mark Rutland
2019-09-03  5:15     ` Viresh Kumar
2019-09-03  5:15       ` Viresh Kumar
2019-08-29 16:18 ` [PATCH ARM64 v4.4 V3 00/44] V4.4 backport of arm64 Spectre patches Mark Rutland
2019-08-29 16:18   ` Mark Rutland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=821430ff13f625eca9e0a9700ddc161cbc7965ff.1567077734.git.viresh.kumar@linaro.org \
    --to=viresh.kumar@linaro.org \
    --cc=Julien.Thierry@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.brown@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=rmk+kernel@arm.linux.org.uk \
    --cc=stable@vger.kernel.org \
    --cc=vincent.guittot@linaro.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.