All of lore.kernel.org
 help / color / mirror / Atom feed
From: Viresh Kumar <viresh.kumar@linaro.org>
To: linux-arm-kernel@lists.infradead.org,
	Julien Thierry <Julien.Thierry@arm.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>,
	stable@vger.kernel.org, Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Mark Rutland <mark.rutland@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>,
	Vincent Guittot <vincent.guittot@linaro.org>,
	mark.brown@arm.com
Subject: [PATCH v4.4 12/45] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
Date: Fri, 14 Jun 2019 08:37:55 +0530	[thread overview]
Message-ID: <9f68161e012c5942720575377cffd4e445446acf.1560480942.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1560480942.git.viresh.kumar@linaro.org>

From: Will Deacon <will.deacon@arm.com>

commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ v4.4: fixup for v4.4 style uaccess primitives ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/include/asm/uaccess.h | 20 ++++++++++++--------
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/clear_user.S      |  6 +++---
 arch/arm64/lib/copy_in_user.S    |  4 ++--
 4 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 693a0d784534..a25b8726ffa9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -303,19 +303,20 @@ do {									\
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
-	return  __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
+
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
-	return  __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
+
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -338,22 +339,25 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(__uaccess_mask_ptr(to), n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index c654df05b7d7..abe4e0984dbb 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(clear_page);
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..fc6bb0f83511 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,7 +24,7 @@
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -32,7 +32,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
@@ -57,7 +57,7 @@ USER(9f, strb	wzr, [x0]	)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..0219aa85b3cc 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -67,7 +67,7 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	add	end, x0, x2
@@ -76,7 +76,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-- 
2.21.0.rc0.269.g1a574e7a288b


WARNING: multiple messages have this Message-ID (diff)
From: Viresh Kumar <viresh.kumar@linaro.org>
To: linux-arm-kernel@lists.infradead.org,
	Julien Thierry <Julien.Thierry@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Viresh Kumar <viresh.kumar@linaro.org>,
	Will Deacon <will.deacon@arm.com>,
	stable@vger.kernel.org, mark.brown@arm.com,
	Catalin Marinas <catalin.marinas@arm.com>,
	Russell King <rmk+kernel@arm.linux.org.uk>
Subject: [PATCH v4.4 12/45] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user
Date: Fri, 14 Jun 2019 08:37:55 +0530	[thread overview]
Message-ID: <9f68161e012c5942720575377cffd4e445446acf.1560480942.git.viresh.kumar@linaro.org> (raw)
In-Reply-To: <cover.1560480942.git.viresh.kumar@linaro.org>

From: Will Deacon <will.deacon@arm.com>

commit f71c2ffcb20dd8626880747557014bb9a61eb90e upstream.

Like we've done for get_user and put_user, ensure that user pointers
are masked before invoking the underlying __arch_{clear,copy_*}_user
operations.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ v4.4: fixup for v4.4 style uaccess primitives ]
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 arch/arm64/include/asm/uaccess.h | 20 ++++++++++++--------
 arch/arm64/kernel/arm64ksyms.c   |  4 ++--
 arch/arm64/lib/clear_user.S      |  6 +++---
 arch/arm64/lib/copy_in_user.S    |  4 ++--
 4 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 693a0d784534..a25b8726ffa9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -303,19 +303,20 @@ do {									\
 
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	kasan_check_write(to, n);
-	return  __arch_copy_from_user(to, from, n);
+	return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
+
 }
 
 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	kasan_check_read(from, n);
-	return  __arch_copy_to_user(to, from, n);
+	return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
+
 }
 
 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
@@ -338,22 +339,25 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
 	return n;
 }
 
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
+static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
 	if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
-		n = __copy_in_user(to, from, n);
+		n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
 	return n;
 }
+#define copy_in_user __copy_in_user
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
 	if (access_ok(VERIFY_WRITE, to, n))
-		n = __clear_user(__uaccess_mask_ptr(to), n);
+		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
 	return n;
 }
+#define clear_user	__clear_user
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index c654df05b7d7..abe4e0984dbb 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(clear_page);
 	/* user mem (segment) */
 EXPORT_SYMBOL(__arch_copy_from_user);
 EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
 
 	/* physical memory */
 EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a9723c71c52b..fc6bb0f83511 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -24,7 +24,7 @@
 
 	.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
@@ -32,7 +32,7 @@
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
@@ -57,7 +57,7 @@ USER(9f, strb	wzr, [x0]	)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
 
 	.section .fixup,"ax"
 	.align	2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 81c8fc93c100..0219aa85b3cc 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -67,7 +67,7 @@
 	.endm
 
 end	.req	x5
-ENTRY(__copy_in_user)
+ENTRY(__arch_copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	add	end, x0, x2
@@ -76,7 +76,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
 	    CONFIG_ARM64_PAN)
 	mov	x0, #0
 	ret
-ENDPROC(__copy_in_user)
+ENDPROC(__arch_copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-- 
2.21.0.rc0.269.g1a574e7a288b


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-06-14  3:12 UTC|newest]

Thread overview: 114+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-14  3:07 [PATCH v4.4 00/45] V4.4 backport of arm64 Spectre patches Viresh Kumar
2019-06-14  3:07 ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 01/45] arm64: barrier: Add CSDB macros to control data-value prediction Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 02/45] arm64: Implement array_index_mask_nospec() Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 03/45] arm64: remove duplicate macro __KERNEL__ check Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 04/45] arm64: move TASK_* definitions to <asm/processor.h> Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 05/45] arm64: Make USER_DS an inclusive limit Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 06/45] arm64: Use pointer masking to limit uaccess speculation Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 07/45] arm64: entry: Ensure branch through syscall table is bounded under speculation Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 08/45] arm64: uaccess: Prevent speculative use of the current addr_limit Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 09/45] arm64: uaccess: Don't bother eliding access_ok checks in __{get, put}_user Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 10/45] mm/kasan: add API to check memory regions Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-07-04 14:15   ` Julien Thierry
2019-07-04 14:15     ` Julien Thierry
2019-07-05  3:13     ` Viresh Kumar
2019-07-05  3:13       ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 11/45] arm64: kasan: instrument user memory access API Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` Viresh Kumar [this message]
2019-06-14  3:07   ` [PATCH v4.4 12/45] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 13/45] arm64: cpufeature: Pass capability structure to ->enable callback Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 14/45] drivers/firmware: Expose psci_get_version through psci_ops structure Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 15/45] arm64: Factor out TTBR0_EL1 post-update workaround into a specific asm macro Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:07 ` [PATCH v4.4 16/45] arm64: Move post_ttbr_update_workaround to C code Viresh Kumar
2019-06-14  3:07   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 17/45] arm64: cpufeature: Add scope for capability check Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 18/45] arm64: Add skeleton to harden the branch predictor against aliasing attacks Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 19/45] arm64: Move BP hardening to check_and_switch_context Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 20/45] mm: Introduce lm_alias Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-17 12:33   ` Julien Thierry
2019-06-17 12:33     ` Julien Thierry
2019-06-18  5:00     ` Viresh Kumar
2019-06-18  5:00       ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 21/45] arm64: entry: Apply BP hardening for high-priority synchronous exceptions Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 22/45] arm64: entry: Apply BP hardening for suspicious interrupts from EL0 Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 23/45] arm64: cputype: Add missing MIDR values for Cortex-A72 and Cortex-A75 Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 24/45] arm64: cpu_errata: Allow an erratum to be match for all revisions of a core Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 25/45] arm64: Implement branch predictor hardening for affected Cortex-A CPUs Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 26/45] arm64: cputype info for Broadcom Vulcan Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 27/45] arm64: cputype: Add MIDR values for Cavium ThunderX2 CPUs Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 28/45] arm64: Branch predictor hardening for Cavium ThunderX2 Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 29/45] arm64: KVM: Increment PC after handling an SMC trap Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 30/45] arm/arm64: KVM: Consolidate the PSCI include files Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 31/45] arm/arm64: KVM: Add PSCI_VERSION helper Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 32/45] arm/arm64: KVM: Add smccc accessors to PSCI code Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 33/45] ARM: 8478/2: arm/arm64: add arm-smccc Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 34/45] arm/arm64: KVM: Implement PSCI 1.0 support Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 35/45] arm/arm64: KVM: Advertise SMCCC v1.1 Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 36/45] arm/arm64: KVM: Turn kvm_psci_version into a static inline Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 37/45] arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 38/45] arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 39/45] firmware/psci: Expose PSCI conduit Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 40/45] firmware/psci: Expose SMCCC version through psci_ops Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 41/45] arm/arm64: smccc: Make function identifiers an unsigned quantity Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 42/45] arm/arm64: smccc: Implement SMCCC v1.1 inline primitive Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 43/45] arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 44/45] arm64: Kill PSCI_GET_VERSION as a variant-2 workaround Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-14  3:08 ` [PATCH v4.4 45/45] arm64: futex: Mask __user pointers prior to dereference Viresh Kumar
2019-06-14  3:08   ` Viresh Kumar
2019-06-17 12:10 ` [PATCH v4.4 00/45] V4.4 backport of arm64 Spectre patches Greg KH
2019-06-17 12:10   ` Greg KH
2019-06-17 16:03 ` Julien Thierry
2019-06-17 16:03   ` Julien Thierry
2019-06-18 10:21   ` Viresh Kumar
2019-06-18 10:21     ` Viresh Kumar
2019-06-19 11:03     ` Julien Thierry
2019-06-19 11:03       ` Julien Thierry
2019-06-19 11:20       ` Viresh Kumar
2019-06-19 11:20         ` Viresh Kumar
2019-06-17 16:30 ` Julien Thierry
2019-06-17 16:30   ` Julien Thierry
2019-07-11 13:57 ` Julien Thierry
2019-07-11 13:57   ` Julien Thierry

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9f68161e012c5942720575377cffd4e445446acf.1560480942.git.viresh.kumar@linaro.org \
    --to=viresh.kumar@linaro.org \
    --cc=Julien.Thierry@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.brown@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=rmk+kernel@arm.linux.org.uk \
    --cc=stable@vger.kernel.org \
    --cc=vincent.guittot@linaro.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.