linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/3] Use C inlines for uaccess
@ 2019-11-22  2:24 Pavel Tatashin
  2019-11-22  2:24 ` [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call Pavel Tatashin
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-22  2:24 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, linux-kernel, catalin.marinas,
	will, steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, mark.rutland, tglx, gregkh, allison, info,
	alexios.zavras, sstabellini, boris.ostrovsky, jgross, stefan,
	yamada.masahiro, xen-devel, linux

Changelog
v2:
	- Addressed Russell King's concern by not adding
	  uaccess_* to ARM.
	- Removed the accidental change to xtensa

Convert the remaining uaccess_* calls from ASM macros to C inlines.

These patches apply against linux-next. I boot tested ARM64, and
compile tested ARM changes.

Pavel Tatashin (3):
  arm/arm64/xen: use C inlines for privcmd_call
  arm64: remove uaccess_ttbr0 asm macros from cache functions
  arm64: remove the rest of asm-uaccess.h

 arch/arm/include/asm/assembler.h       |  2 +-
 arch/arm/include/asm/xen/hypercall.h   | 10 +++++
 arch/arm/xen/enlighten.c               |  2 +-
 arch/arm/xen/hypercall.S               |  4 +-
 arch/arm64/include/asm/asm-uaccess.h   | 60 --------------------------
 arch/arm64/include/asm/cacheflush.h    | 38 ++++++++++++++--
 arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++
 arch/arm64/kernel/entry.S              |  6 +--
 arch/arm64/lib/clear_user.S            |  2 +-
 arch/arm64/lib/copy_from_user.S        |  2 +-
 arch/arm64/lib/copy_in_user.S          |  2 +-
 arch/arm64/lib/copy_to_user.S          |  2 +-
 arch/arm64/mm/cache.S                  | 31 +++++--------
 arch/arm64/mm/context.c                | 12 ++++++
 arch/arm64/mm/flush.c                  |  2 +-
 arch/arm64/xen/hypercall.S             | 19 +-------
 include/xen/arm/hypercall.h            | 12 +++---
 17 files changed, 115 insertions(+), 119 deletions(-)
 delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

-- 
2.24.0


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call
  2019-11-22  2:24 [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
@ 2019-11-22  2:24 ` Pavel Tatashin
  2019-11-22 19:59   ` Stefano Stabellini
  2019-11-22  2:24 ` [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions Pavel Tatashin
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-22  2:24 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, linux-kernel, catalin.marinas,
	will, steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, mark.rutland, tglx, gregkh, allison, info,
	alexios.zavras, sstabellini, boris.ostrovsky, jgross, stefan,
	yamada.masahiro, xen-devel, linux

privcmd_call requires to enable access to userspace for the
duration of the hypercall.

Currently, this is done via assembly macros. Change it to C
inlines instead.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm/include/asm/assembler.h       |  2 +-
 arch/arm/include/asm/xen/hypercall.h   | 10 +++++++++
 arch/arm/xen/enlighten.c               |  2 +-
 arch/arm/xen/hypercall.S               |  4 ++--
 arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++++++++++++++++
 arch/arm64/xen/hypercall.S             | 19 ++---------------
 include/xen/arm/hypercall.h            | 12 +++++------
 7 files changed, 50 insertions(+), 27 deletions(-)

diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 99929122dad7..8e9262a0f016 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -480,7 +480,7 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
 	.macro	uaccess_disable, tmp, isb=1
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
 	/*
-	 * Whenever we re-enter userspace, the domains should always be
+	 * Whenever we re-enter kernel, the domains should always be
 	 * set appropriately.
 	 */
 	mov	\tmp, #DACR_UACCESS_DISABLE
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 3522cbaed316..cac5bd9ef519 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1 +1,11 @@
+#ifndef _ASM_ARM_XEN_HYPERCALL_H
+#define _ASM_ARM_XEN_HYPERCALL_H
 #include <xen/arm/hypercall.h>
+
+static inline long privcmd_call(unsigned int call, unsigned long a1,
+				unsigned long a2, unsigned long a3,
+				unsigned long a4, unsigned long a5)
+{
+	return arch_privcmd_call(call, a1, a2, a3, a4, a5);
+}
+#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index dd6804a64f1a..e87280c6d25d 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -440,4 +440,4 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
 EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
 EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
 EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
-EXPORT_SYMBOL_GPL(privcmd_call);
+EXPORT_SYMBOL_GPL(arch_privcmd_call);
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index b11bba542fac..277078c7da49 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -94,7 +94,7 @@ HYPERCALL2(multicall);
 HYPERCALL2(vm_assist);
 HYPERCALL3(dm_op);
 
-ENTRY(privcmd_call)
+ENTRY(arch_privcmd_call)
 	stmdb sp!, {r4}
 	mov r12, r0
 	mov r0, r1
@@ -119,4 +119,4 @@ ENTRY(privcmd_call)
 
 	ldm sp!, {r4}
 	ret lr
-ENDPROC(privcmd_call);
+ENDPROC(arch_privcmd_call);
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 3522cbaed316..1a74fb28607f 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1,29 @@
+#ifndef _ASM_ARM64_XEN_HYPERCALL_H
+#define _ASM_ARM64_XEN_HYPERCALL_H
 #include <xen/arm/hypercall.h>
+#include <linux/uaccess.h>
+
+static inline long privcmd_call(unsigned int call, unsigned long a1,
+				unsigned long a2, unsigned long a3,
+				unsigned long a4, unsigned long a5)
+{
+	long rv;
+
+	/*
+	 * Privcmd calls are issued by the userspace. The kernel needs to
+	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+	 * translations to user memory via AT instructions. Since AT
+	 * instructions are not affected by the PAN bit (ARMv8.1), we only
+	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+	 * is enabled (it implies that hardware UAO and PAN disabled).
+	 */
+	uaccess_ttbr0_enable();
+	rv = arch_privcmd_call(call, a1, a2, a3, a4, a5);
+	/*
+	 * Disable userspace access from kernel once the hyp call completed.
+	 */
+	uaccess_ttbr0_disable();
+
+	return rv;
+}
+#endif /* _ASM_ARM64_XEN_HYPERCALL_H */
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index c5f05c4a4d00..921611778d2a 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,7 +49,6 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
-#include <asm/asm-uaccess.h>
 #include <xen/interface/xen.h>
 
 
@@ -86,27 +85,13 @@ HYPERCALL2(multicall);
 HYPERCALL2(vm_assist);
 HYPERCALL3(dm_op);
 
-ENTRY(privcmd_call)
+ENTRY(arch_privcmd_call)
 	mov x16, x0
 	mov x0, x1
 	mov x1, x2
 	mov x2, x3
 	mov x3, x4
 	mov x4, x5
-	/*
-	 * Privcmd calls are issued by the userspace. The kernel needs to
-	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
-	 * translations to user memory via AT instructions. Since AT
-	 * instructions are not affected by the PAN bit (ARMv8.1), we only
-	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
-	 * is enabled (it implies that hardware UAO and PAN disabled).
-	 */
-	uaccess_ttbr0_enable x6, x7, x8
 	hvc XEN_IMM
-
-	/*
-	 * Disable userspace access from kernel once the hyp call completed.
-	 */
-	uaccess_ttbr0_disable x6, x7
 	ret
-ENDPROC(privcmd_call);
+ENDPROC(arch_privcmd_call);
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index b40485e54d80..624c8ad7e42a 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -30,8 +30,8 @@
  * IN THE SOFTWARE.
  */
 
-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
+#ifndef _ARM_XEN_HYPERCALL_H
+#define _ARM_XEN_HYPERCALL_H
 
 #include <linux/bug.h>
 
@@ -41,9 +41,9 @@
 
 struct xen_dm_op_buf;
 
-long privcmd_call(unsigned call, unsigned long a1,
-		unsigned long a2, unsigned long a3,
-		unsigned long a4, unsigned long a5);
+long arch_privcmd_call(unsigned int call, unsigned long a1,
+		       unsigned long a2, unsigned long a3,
+		       unsigned long a4, unsigned long a5);
 int HYPERVISOR_xen_version(int cmd, void *arg);
 int HYPERVISOR_console_io(int cmd, int count, char *str);
 int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
@@ -88,4 +88,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
 	BUG();
 }
 
-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#endif /* _ARM_XEN_HYPERCALL_H */
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions
  2019-11-22  2:24 [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
  2019-11-22  2:24 ` [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call Pavel Tatashin
@ 2019-11-22  2:24 ` Pavel Tatashin
  2019-11-27 15:01   ` Mark Rutland
  2019-11-22  2:24 ` [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h Pavel Tatashin
  2019-11-26 13:50 ` [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
  3 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-22  2:24 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, linux-kernel, catalin.marinas,
	will, steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, mark.rutland, tglx, gregkh, allison, info,
	alexios.zavras, sstabellini, boris.ostrovsky, jgross, stefan,
	yamada.masahiro, xen-devel, linux

Replace the uaccess_ttbr0_disable/uaccess_ttbr0_enable via
inline variants, and remove asm macros.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 22 ----------------
 arch/arm64/include/asm/cacheflush.h  | 38 +++++++++++++++++++++++++---
 arch/arm64/mm/cache.S                | 30 ++++++++--------------
 arch/arm64/mm/flush.c                |  2 +-
 4 files changed, 46 insertions(+), 46 deletions(-)

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 35e6145e1402..8f763e5b41b1 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -34,27 +34,5 @@
 	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
 	isb
 	.endm
-
-	.macro	uaccess_ttbr0_disable, tmp1, tmp2
-alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp2		// avoid preemption
-	__uaccess_ttbr0_disable \tmp1
-	restore_irq \tmp2
-alternative_else_nop_endif
-	.endm
-
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-alternative_if_not ARM64_HAS_PAN
-	save_and_disable_irq \tmp3		// avoid preemption
-	__uaccess_ttbr0_enable \tmp1, \tmp2
-	restore_irq \tmp3
-alternative_else_nop_endif
-	.endm
-#else
-	.macro	uaccess_ttbr0_disable, tmp1, tmp2
-	.endm
-
-	.macro	uaccess_ttbr0_enable, tmp1, tmp2, tmp3
-	.endm
 #endif
 #endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..cdd4a8eb8708 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -61,16 +61,48 @@
  *		- kaddr  - page address
  *		- size   - region size
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int  invalidate_icache_range(unsigned long start, unsigned long end);
+extern void __arch_flush_icache_range(unsigned long start, unsigned long end);
+extern long __arch_flush_cache_user_range(unsigned long start,
+					  unsigned long end);
+extern int  arch_invalidate_icache_range(unsigned long start,
+					 unsigned long end);
+
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pop(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(void *kaddr, unsigned long len);
 
+static inline void __flush_icache_range(unsigned long start, unsigned long end)
+{
+	uaccess_ttbr0_enable();
+	__arch_flush_icache_range(start, end);
+	uaccess_ttbr0_disable();
+}
+
+static inline void __flush_cache_user_range(unsigned long start,
+					    unsigned long end)
+{
+	uaccess_ttbr0_enable();
+	__arch_flush_cache_user_range(start, end);
+	uaccess_ttbr0_disable();
+}
+
+static inline int invalidate_icache_range(unsigned long start,
+					  unsigned long end)
+{
+	int rv;
+#if ARM64_HAS_CACHE_DIC
+	rv = arch_invalidate_icache_range(start, end);
+#else
+	uaccess_ttbr0_enable();
+	rv = arch_invalidate_icache_range(start, end);
+	uaccess_ttbr0_disable();
+#endif
+	return rv;
+}
+
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 	__flush_icache_range(start, end);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index db767b072601..408d317a47d2 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
 #include <asm/asm-uaccess.h>
 
 /*
- *	flush_icache_range(start,end)
+ *	__arch_flush_icache_range(start,end)
  *
  *	Ensure that the I and D caches are coherent within specified region.
  *	This is typically used when code has been written to a memory region,
@@ -24,11 +24,11 @@
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-ENTRY(__flush_icache_range)
+ENTRY(__arch_flush_icache_range)
 	/* FALLTHROUGH */
 
 /*
- *	__flush_cache_user_range(start,end)
+ *	__arch_flush_cache_user_range(start,end)
  *
  *	Ensure that the I and D caches are coherent within specified region.
  *	This is typically used when code has been written to a memory region,
@@ -37,8 +37,7 @@ ENTRY(__flush_icache_range)
  *	- start   - virtual start address of region
  *	- end     - virtual end address of region
  */
-ENTRY(__flush_cache_user_range)
-	uaccess_ttbr0_enable x2, x3, x4
+ENTRY(__arch_flush_cache_user_range)
 alternative_if ARM64_HAS_CACHE_IDC
 	dsb	ishst
 	b	7f
@@ -60,14 +59,11 @@ alternative_if ARM64_HAS_CACHE_DIC
 alternative_else_nop_endif
 	invalidate_icache_by_line x0, x1, x2, x3, 9f
 8:	mov	x0, #0
-1:
-	uaccess_ttbr0_disable x1, x2
-	ret
-9:
-	mov	x0, #-EFAULT
+1:	ret
+9:	mov	x0, #-EFAULT
 	b	1b
-ENDPROC(__flush_icache_range)
-ENDPROC(__flush_cache_user_range)
+ENDPROC(__arch_flush_icache_range)
+ENDPROC(__arch_flush_cache_user_range)
 
 /*
  *	invalidate_icache_range(start,end)
@@ -83,16 +79,10 @@ alternative_if ARM64_HAS_CACHE_DIC
 	isb
 	ret
 alternative_else_nop_endif
-
-	uaccess_ttbr0_enable x2, x3, x4
-
 	invalidate_icache_by_line x0, x1, x2, x3, 2f
 	mov	x0, xzr
-1:
-	uaccess_ttbr0_disable x1, x2
-	ret
-2:
-	mov	x0, #-EFAULT
+1:	ret
+2:	mov	x0, #-EFAULT
 	b	1b
 ENDPROC(invalidate_icache_range)
 
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..66249fca2092 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(flush_dcache_page);
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(__arch_flush_icache_range);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-22  2:24 [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
  2019-11-22  2:24 ` [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call Pavel Tatashin
  2019-11-22  2:24 ` [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions Pavel Tatashin
@ 2019-11-22  2:24 ` Pavel Tatashin
  2019-11-27 15:11   ` Mark Rutland
  2019-11-26 13:50 ` [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
  3 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-22  2:24 UTC (permalink / raw)
  To: pasha.tatashin, jmorris, sashal, linux-kernel, catalin.marinas,
	will, steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, mark.rutland, tglx, gregkh, allison, info,
	alexios.zavras, sstabellini, boris.ostrovsky, jgross, stefan,
	yamada.masahiro, xen-devel, linux

The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
are the last two macros defined in asm-uaccess.h.

Replace them with C wrappers and call C functions from
kernel_entry and kernel_exit.

Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
---
 arch/arm64/include/asm/asm-uaccess.h | 38 ----------------------------
 arch/arm64/kernel/entry.S            |  6 ++---
 arch/arm64/lib/clear_user.S          |  2 +-
 arch/arm64/lib/copy_from_user.S      |  2 +-
 arch/arm64/lib/copy_in_user.S        |  2 +-
 arch/arm64/lib/copy_to_user.S        |  2 +-
 arch/arm64/mm/cache.S                |  1 -
 arch/arm64/mm/context.c              | 12 +++++++++
 8 files changed, 19 insertions(+), 46 deletions(-)
 delete mode 100644 arch/arm64/include/asm/asm-uaccess.h

diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
deleted file mode 100644
index 8f763e5b41b1..000000000000
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ASM_UACCESS_H
-#define __ASM_ASM_UACCESS_H
-
-#include <asm/alternative.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/mmu.h>
-#include <asm/sysreg.h>
-#include <asm/assembler.h>
-
-/*
- * User access enabling/disabling macros.
- */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	.macro	__uaccess_ttbr0_disable, tmp1
-	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
-	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
-	sub	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE	// reserved_ttbr0 just before swapper_pg_dir
-	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
-	isb
-	add	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE
-	msr	ttbr1_el1, \tmp1		// set reserved ASID
-	isb
-	.endm
-
-	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
-	get_current_task \tmp1
-	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
-	mrs	\tmp2, ttbr1_el1
-	extr    \tmp2, \tmp2, \tmp1, #48
-	ror     \tmp2, \tmp2, #16
-	msr	ttbr1_el1, \tmp2		// set the active ASID
-	isb
-	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
-	isb
-	.endm
-#endif
-#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 583f71abbe98..c7b571e6d0f2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -22,8 +22,8 @@
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -219,7 +219,7 @@ alternative_else_nop_endif
 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 	.endif
 
-	__uaccess_ttbr0_disable x21
+	bl __uaccess_ttbr0_disable_c
 1:
 #endif
 
@@ -293,7 +293,7 @@ alternative_else_nop_endif
 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 	.endif
 
-	__uaccess_ttbr0_enable x0, x1
+	bl	__uaccess_ttbr0_enable_c
 
 	.if	\el == 0
 	/*
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index aeafc03e961a..b0b4a86a09e2 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -6,7 +6,7 @@
  */
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 
 	.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index ebb3c06cbb5d..142bc7505518 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -5,7 +5,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 3d8153a1ebce..04dc48ca26f7 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -7,7 +7,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 357eae2c18eb..8f3218ae88ab 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -5,7 +5,7 @@
 
 #include <linux/linkage.h>
 
-#include <asm/asm-uaccess.h>
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 408d317a47d2..7940d6ef5da5 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -12,7 +12,6 @@
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
-#include <asm/asm-uaccess.h>
 
 /*
  *	__arch_flush_icache_range(start,end)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329fde2dd..4fc32c504dea 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -237,6 +237,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 		cpu_switch_mm(mm->pgd, mm);
 }
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+asmlinkage void __uaccess_ttbr0_enable_c(void)
+{
+	__uaccess_ttbr0_enable();
+}
+
+asmlinkage void __uaccess_ttbr0_disable_c(void)
+{
+	__uaccess_ttbr0_disable();
+}
+#endif
+
 /* Errata workaround post TTBRx_EL1 update. */
 asmlinkage void post_ttbr_update_workaround(void)
 {
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call
  2019-11-22  2:24 ` [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call Pavel Tatashin
@ 2019-11-22 19:59   ` Stefano Stabellini
  0 siblings, 0 replies; 15+ messages in thread
From: Stefano Stabellini @ 2019-11-22 19:59 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: jmorris, sashal, linux-kernel, catalin.marinas, will,
	steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, mark.rutland, tglx, gregkh, allison, info,
	alexios.zavras, sstabellini, boris.ostrovsky, jgross, stefan,
	yamada.masahiro, xen-devel, linux

On Thu, 21 Nov 2019, Pavel Tatashin wrote:
> privcmd_call requires to enable access to userspace for the
> duration of the hypercall.
> 
> Currently, this is done via assembly macros. Change it to C
> inlines instead.
> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>

I am OK with this.

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>  arch/arm/include/asm/assembler.h       |  2 +-
>  arch/arm/include/asm/xen/hypercall.h   | 10 +++++++++
>  arch/arm/xen/enlighten.c               |  2 +-
>  arch/arm/xen/hypercall.S               |  4 ++--
>  arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++++++++++++++++
>  arch/arm64/xen/hypercall.S             | 19 ++---------------
>  include/xen/arm/hypercall.h            | 12 +++++------
>  7 files changed, 50 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
> index 99929122dad7..8e9262a0f016 100644
> --- a/arch/arm/include/asm/assembler.h
> +++ b/arch/arm/include/asm/assembler.h
> @@ -480,7 +480,7 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
>  	.macro	uaccess_disable, tmp, isb=1
>  #ifdef CONFIG_CPU_SW_DOMAIN_PAN
>  	/*
> -	 * Whenever we re-enter userspace, the domains should always be
> +	 * Whenever we re-enter kernel, the domains should always be
>  	 * set appropriately.
>  	 */
>  	mov	\tmp, #DACR_UACCESS_DISABLE
> diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
> index 3522cbaed316..cac5bd9ef519 100644
> --- a/arch/arm/include/asm/xen/hypercall.h
> +++ b/arch/arm/include/asm/xen/hypercall.h
> @@ -1 +1,11 @@
> +#ifndef _ASM_ARM_XEN_HYPERCALL_H
> +#define _ASM_ARM_XEN_HYPERCALL_H
>  #include <xen/arm/hypercall.h>
> +
> +static inline long privcmd_call(unsigned int call, unsigned long a1,
> +				unsigned long a2, unsigned long a3,
> +				unsigned long a4, unsigned long a5)
> +{
> +	return arch_privcmd_call(call, a1, a2, a3, a4, a5);
> +}
> +#endif /* _ASM_ARM_XEN_HYPERCALL_H */
> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
> index dd6804a64f1a..e87280c6d25d 100644
> --- a/arch/arm/xen/enlighten.c
> +++ b/arch/arm/xen/enlighten.c
> @@ -440,4 +440,4 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
>  EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
>  EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
>  EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
> -EXPORT_SYMBOL_GPL(privcmd_call);
> +EXPORT_SYMBOL_GPL(arch_privcmd_call);
> diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
> index b11bba542fac..277078c7da49 100644
> --- a/arch/arm/xen/hypercall.S
> +++ b/arch/arm/xen/hypercall.S
> @@ -94,7 +94,7 @@ HYPERCALL2(multicall);
>  HYPERCALL2(vm_assist);
>  HYPERCALL3(dm_op);
>  
> -ENTRY(privcmd_call)
> +ENTRY(arch_privcmd_call)
>  	stmdb sp!, {r4}
>  	mov r12, r0
>  	mov r0, r1
> @@ -119,4 +119,4 @@ ENTRY(privcmd_call)
>  
>  	ldm sp!, {r4}
>  	ret lr
> -ENDPROC(privcmd_call);
> +ENDPROC(arch_privcmd_call);
> diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
> index 3522cbaed316..1a74fb28607f 100644
> --- a/arch/arm64/include/asm/xen/hypercall.h
> +++ b/arch/arm64/include/asm/xen/hypercall.h
> @@ -1 +1,29 @@
> +#ifndef _ASM_ARM64_XEN_HYPERCALL_H
> +#define _ASM_ARM64_XEN_HYPERCALL_H
>  #include <xen/arm/hypercall.h>
> +#include <linux/uaccess.h>
> +
> +static inline long privcmd_call(unsigned int call, unsigned long a1,
> +				unsigned long a2, unsigned long a3,
> +				unsigned long a4, unsigned long a5)
> +{
> +	long rv;
> +
> +	/*
> +	 * Privcmd calls are issued by the userspace. The kernel needs to
> +	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
> +	 * translations to user memory via AT instructions. Since AT
> +	 * instructions are not affected by the PAN bit (ARMv8.1), we only
> +	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
> +	 * is enabled (it implies that hardware UAO and PAN disabled).
> +	 */
> +	uaccess_ttbr0_enable();
> +	rv = arch_privcmd_call(call, a1, a2, a3, a4, a5);
> +	/*
> +	 * Disable userspace access from kernel once the hyp call completed.
> +	 */
> +	uaccess_ttbr0_disable();
> +
> +	return rv;
> +}
> +#endif /* _ASM_ARM64_XEN_HYPERCALL_H */
> diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
> index c5f05c4a4d00..921611778d2a 100644
> --- a/arch/arm64/xen/hypercall.S
> +++ b/arch/arm64/xen/hypercall.S
> @@ -49,7 +49,6 @@
>  
>  #include <linux/linkage.h>
>  #include <asm/assembler.h>
> -#include <asm/asm-uaccess.h>
>  #include <xen/interface/xen.h>
>  
>  
> @@ -86,27 +85,13 @@ HYPERCALL2(multicall);
>  HYPERCALL2(vm_assist);
>  HYPERCALL3(dm_op);
>  
> -ENTRY(privcmd_call)
> +ENTRY(arch_privcmd_call)
>  	mov x16, x0
>  	mov x0, x1
>  	mov x1, x2
>  	mov x2, x3
>  	mov x3, x4
>  	mov x4, x5
> -	/*
> -	 * Privcmd calls are issued by the userspace. The kernel needs to
> -	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
> -	 * translations to user memory via AT instructions. Since AT
> -	 * instructions are not affected by the PAN bit (ARMv8.1), we only
> -	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
> -	 * is enabled (it implies that hardware UAO and PAN disabled).
> -	 */
> -	uaccess_ttbr0_enable x6, x7, x8
>  	hvc XEN_IMM
> -
> -	/*
> -	 * Disable userspace access from kernel once the hyp call completed.
> -	 */
> -	uaccess_ttbr0_disable x6, x7
>  	ret
> -ENDPROC(privcmd_call);
> +ENDPROC(arch_privcmd_call);
> diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
> index b40485e54d80..624c8ad7e42a 100644
> --- a/include/xen/arm/hypercall.h
> +++ b/include/xen/arm/hypercall.h
> @@ -30,8 +30,8 @@
>   * IN THE SOFTWARE.
>   */
>  
> -#ifndef _ASM_ARM_XEN_HYPERCALL_H
> -#define _ASM_ARM_XEN_HYPERCALL_H
> +#ifndef _ARM_XEN_HYPERCALL_H
> +#define _ARM_XEN_HYPERCALL_H
>  
>  #include <linux/bug.h>
>  
> @@ -41,9 +41,9 @@
>  
>  struct xen_dm_op_buf;
>  
> -long privcmd_call(unsigned call, unsigned long a1,
> -		unsigned long a2, unsigned long a3,
> -		unsigned long a4, unsigned long a5);
> +long arch_privcmd_call(unsigned int call, unsigned long a1,
> +		       unsigned long a2, unsigned long a3,
> +		       unsigned long a4, unsigned long a5);
>  int HYPERVISOR_xen_version(int cmd, void *arg);
>  int HYPERVISOR_console_io(int cmd, int count, char *str);
>  int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
> @@ -88,4 +88,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
>  	BUG();
>  }
>  
> -#endif /* _ASM_ARM_XEN_HYPERCALL_H */
> +#endif /* _ARM_XEN_HYPERCALL_H */
> -- 
> 2.24.0
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 0/3] Use C inlines for uaccess
  2019-11-22  2:24 [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
                   ` (2 preceding siblings ...)
  2019-11-22  2:24 ` [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h Pavel Tatashin
@ 2019-11-26 13:50 ` Pavel Tatashin
  3 siblings, 0 replies; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-26 13:50 UTC (permalink / raw)
  To: Pavel Tatashin, James Morris, Sasha Levin, LKML, Catalin Marinas,
	Will Deacon, steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Mark Rutland, Thomas Gleixner,
	Greg Kroah-Hartman, allison, info, alexios.zavras,
	Stefano Stabellini, boris.ostrovsky, jgross, Stefan Agner,
	Masahiro Yamada, xen-devel, Russell King - ARM Linux admin,
	Kees Cook

Kees Cook mentioned that it is a good idea to assert the PAN state
during disable/enable. Since, with this change everything is moved to
the same C place, if this hardening is something others also want to
see, I could add it in the next revision of this series. Here are the
options to choose from:
1. Do something similar to what is done in preempt with
CONFIG_PREEMPT_COUNT:  keep a boolean (could be optionally enabled by
a config) that is checked when uaccess_enable()/uaccess_disable() are
called. This way we will always check that state even on processors
with hardware PAN and UAO, however, there is going to be this extra
overhead of checking/storing the variable on userland enter/exits even
on systems which have these marcos set to nothing otherwise.
2. Check only in __uaccess_ttbr0_disable()/__uaccess_ttbr0_enable()
that ttbr0_el1 is in the expected state, or add another boolean  for
this purpose to thread_info.
3. Keep as is, and do not add extra overhead for this hardening.

Thank you,
Pasha

On Thu, Nov 21, 2019 at 9:24 PM Pavel Tatashin
<pasha.tatashin@soleen.com> wrote:
>
> Changelog
> v2:
>         - Addressed Russell King's concern by not adding
>           uaccess_* to ARM.
>         - Removed the accidental change to xtensa
>
> Convert the remaining uaccess_* calls from ASM macros to C inlines.
>
> These patches apply against linux-next. I boot tested ARM64, and
> compile tested ARM changes.
>
> Pavel Tatashin (3):
>   arm/arm64/xen: use C inlines for privcmd_call
>   arm64: remove uaccess_ttbr0 asm macros from cache functions
>   arm64: remove the rest of asm-uaccess.h
>
>  arch/arm/include/asm/assembler.h       |  2 +-
>  arch/arm/include/asm/xen/hypercall.h   | 10 +++++
>  arch/arm/xen/enlighten.c               |  2 +-
>  arch/arm/xen/hypercall.S               |  4 +-
>  arch/arm64/include/asm/asm-uaccess.h   | 60 --------------------------
>  arch/arm64/include/asm/cacheflush.h    | 38 ++++++++++++++--
>  arch/arm64/include/asm/xen/hypercall.h | 28 ++++++++++++
>  arch/arm64/kernel/entry.S              |  6 +--
>  arch/arm64/lib/clear_user.S            |  2 +-
>  arch/arm64/lib/copy_from_user.S        |  2 +-
>  arch/arm64/lib/copy_in_user.S          |  2 +-
>  arch/arm64/lib/copy_to_user.S          |  2 +-
>  arch/arm64/mm/cache.S                  | 31 +++++--------
>  arch/arm64/mm/context.c                | 12 ++++++
>  arch/arm64/mm/flush.c                  |  2 +-
>  arch/arm64/xen/hypercall.S             | 19 +-------
>  include/xen/arm/hypercall.h            | 12 +++---
>  17 files changed, 115 insertions(+), 119 deletions(-)
>  delete mode 100644 arch/arm64/include/asm/asm-uaccess.h
>
> --
> 2.24.0
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions
  2019-11-22  2:24 ` [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions Pavel Tatashin
@ 2019-11-27 15:01   ` Mark Rutland
  2019-11-27 15:10     ` Pavel Tatashin
  0 siblings, 1 reply; 15+ messages in thread
From: Mark Rutland @ 2019-11-27 15:01 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: jmorris, sashal, linux-kernel, catalin.marinas, will,
	steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, tglx, gregkh, allison, info, alexios.zavras,
	sstabellini, boris.ostrovsky, jgross, stefan, yamada.masahiro,
	xen-devel, linux

Hi Pavel,

On Thu, Nov 21, 2019 at 09:24:05PM -0500, Pavel Tatashin wrote:
> Replace the uaccess_ttbr0_disable/uaccess_ttbr0_enable via
> inline variants, and remove asm macros.

A commit message should provide rationale, rather than just a
description of the patch. Something like:

| We currently duplicate the logic to enable/disable uaccess via TTBR0,
| with C functions and assembly macros. This is a maintenenace burden
| and is liable to lead to subtle bugs, so let's get rid of the assembly
| macros, and always use the C functions. This requires refactoring
| some assembly functions to have a C wrapper.

[...]

> +static inline int invalidate_icache_range(unsigned long start,
> +					  unsigned long end)
> +{
> +	int rv;
> +#if ARM64_HAS_CACHE_DIC
> +	rv = arch_invalidate_icache_range(start, end);
> +#else
> +	uaccess_ttbr0_enable();
> +	rv = arch_invalidate_icache_range(start, end);
> +	uaccess_ttbr0_disable();
> +#endif
> +	return rv;
> +}

This ifdeffery is not the same as an alternative_if, and even if it were
the ARM64_HAS_CACHE_DIC behaviour is not the same as the existing
assembly.

This should be:

static inline int invalidate_icache_range(unsigned long start,
					  unsigned long end)
{
	int ret;

	if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
		isb();
		return 0;
	}
	
	uaccess_ttbr0_enable();
	ret = arch_invalidate_icache_range(start, end);
	uaccess_ttbr0_disable();

	return ret;
}

The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
since this is entirely local to the arch code, and even then should only
be called from the C wrappers.

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions
  2019-11-27 15:01   ` Mark Rutland
@ 2019-11-27 15:10     ` Pavel Tatashin
  2019-11-27 15:14       ` Mark Rutland
  0 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-27 15:10 UTC (permalink / raw)
  To: Mark Rutland
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

Hi Mark,

Thank you for reviewing this work.

> A commit message should provide rationale, rather than just a
> description of the patch. Something like:
>
> | We currently duplicate the logic to enable/disable uaccess via TTBR0,
> | with C functions and assembly macros. This is a maintenenace burden
> | and is liable to lead to subtle bugs, so let's get rid of the assembly
> | macros, and always use the C functions. This requires refactoring
> | some assembly functions to have a C wrapper.

Thank you for suggestion, I will fix my commit log.
>
> [...]
>
> > +static inline int invalidate_icache_range(unsigned long start,
> > +                                       unsigned long end)
> > +{
> > +     int rv;
> > +#if ARM64_HAS_CACHE_DIC
> > +     rv = arch_invalidate_icache_range(start, end);
> > +#else
> > +     uaccess_ttbr0_enable();
> > +     rv = arch_invalidate_icache_range(start, end);
> > +     uaccess_ttbr0_disable();
> > +#endif
> > +     return rv;
> > +}
>
> This ifdeffery is not the same as an alternative_if, and even if it were
> the ARM64_HAS_CACHE_DIC behaviour is not the same as the existing
> assembly.
>
> This should be:
>
> static inline int invalidate_icache_range(unsigned long start,
>                                           unsigned long end)
> {
>         int ret;
>
>         if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) {
>                 isb();
>                 return 0;
>         }
>
>         uaccess_ttbr0_enable();
>         ret = arch_invalidate_icache_range(start, end);
>         uaccess_ttbr0_disable();
>
>         return ret;
> }

I will fix it, thanks.

>
> The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
> since this is entirely local to the arch code, and even then should only
> be called from the C wrappers.

Sure, I can change it to asm_*, I was using arch_* to be consistent
with __arch_copy_from_user() and friends.

Thank you,
Pasha

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-22  2:24 ` [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h Pavel Tatashin
@ 2019-11-27 15:11   ` Mark Rutland
  2019-11-27 15:31     ` Pavel Tatashin
  0 siblings, 1 reply; 15+ messages in thread
From: Mark Rutland @ 2019-11-27 15:11 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: jmorris, sashal, linux-kernel, catalin.marinas, will,
	steve.capper, linux-arm-kernel, marc.zyngier, james.morse,
	vladimir.murzin, tglx, gregkh, allison, info, alexios.zavras,
	sstabellini, boris.ostrovsky, jgross, stefan, yamada.masahiro,
	xen-devel, linux

On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> are the last two macros defined in asm-uaccess.h.
> 
> Replace them with C wrappers and call C functions from
> kernel_entry and kernel_exit.

For now, please leave those as-is.

I don't think we want to have out-of-line C wrappers in the middle of
the entry assembly where we don't have a complete kernel environment.
The use in entry code can also assume non-preemptibility, while the C
functions have to explcitily disable that.

We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
and maybe fold the macros into entry.S if it's not too crowded.

Thanks,
Mark.

> 
> Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
> ---
>  arch/arm64/include/asm/asm-uaccess.h | 38 ----------------------------
>  arch/arm64/kernel/entry.S            |  6 ++---
>  arch/arm64/lib/clear_user.S          |  2 +-
>  arch/arm64/lib/copy_from_user.S      |  2 +-
>  arch/arm64/lib/copy_in_user.S        |  2 +-
>  arch/arm64/lib/copy_to_user.S        |  2 +-
>  arch/arm64/mm/cache.S                |  1 -
>  arch/arm64/mm/context.c              | 12 +++++++++
>  8 files changed, 19 insertions(+), 46 deletions(-)
>  delete mode 100644 arch/arm64/include/asm/asm-uaccess.h
> 
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> deleted file mode 100644
> index 8f763e5b41b1..000000000000
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ /dev/null
> @@ -1,38 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> -#ifndef __ASM_ASM_UACCESS_H
> -#define __ASM_ASM_UACCESS_H
> -
> -#include <asm/alternative.h>
> -#include <asm/kernel-pgtable.h>
> -#include <asm/mmu.h>
> -#include <asm/sysreg.h>
> -#include <asm/assembler.h>
> -
> -/*
> - * User access enabling/disabling macros.
> - */
> -#ifdef CONFIG_ARM64_SW_TTBR0_PAN
> -	.macro	__uaccess_ttbr0_disable, tmp1
> -	mrs	\tmp1, ttbr1_el1			// swapper_pg_dir
> -	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
> -	sub	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE	// reserved_ttbr0 just before swapper_pg_dir
> -	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
> -	isb
> -	add	\tmp1, \tmp1, #RESERVED_TTBR0_SIZE
> -	msr	ttbr1_el1, \tmp1		// set reserved ASID
> -	isb
> -	.endm
> -
> -	.macro	__uaccess_ttbr0_enable, tmp1, tmp2
> -	get_current_task \tmp1
> -	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
> -	mrs	\tmp2, ttbr1_el1
> -	extr    \tmp2, \tmp2, \tmp1, #48
> -	ror     \tmp2, \tmp2, #16
> -	msr	ttbr1_el1, \tmp2		// set the active ASID
> -	isb
> -	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
> -	isb
> -	.endm
> -#endif
> -#endif
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 583f71abbe98..c7b571e6d0f2 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -22,8 +22,8 @@
>  #include <asm/mmu.h>
>  #include <asm/processor.h>
>  #include <asm/ptrace.h>
> +#include <asm/kernel-pgtable.h>
>  #include <asm/thread_info.h>
> -#include <asm/asm-uaccess.h>
>  #include <asm/unistd.h>
>  
>  /*
> @@ -219,7 +219,7 @@ alternative_else_nop_endif
>  	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
>  	.endif
>  
> -	__uaccess_ttbr0_disable x21
> +	bl __uaccess_ttbr0_disable_c
>  1:
>  #endif
>  
> @@ -293,7 +293,7 @@ alternative_else_nop_endif
>  	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
>  	.endif
>  
> -	__uaccess_ttbr0_enable x0, x1
> +	bl	__uaccess_ttbr0_enable_c
>  
>  	.if	\el == 0
>  	/*
> diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
> index aeafc03e961a..b0b4a86a09e2 100644
> --- a/arch/arm64/lib/clear_user.S
> +++ b/arch/arm64/lib/clear_user.S
> @@ -6,7 +6,7 @@
>   */
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  
>  	.text
> diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
> index ebb3c06cbb5d..142bc7505518 100644
> --- a/arch/arm64/lib/copy_from_user.S
> +++ b/arch/arm64/lib/copy_from_user.S
> @@ -5,7 +5,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
> index 3d8153a1ebce..04dc48ca26f7 100644
> --- a/arch/arm64/lib/copy_in_user.S
> +++ b/arch/arm64/lib/copy_in_user.S
> @@ -7,7 +7,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
> index 357eae2c18eb..8f3218ae88ab 100644
> --- a/arch/arm64/lib/copy_to_user.S
> +++ b/arch/arm64/lib/copy_to_user.S
> @@ -5,7 +5,7 @@
>  
>  #include <linux/linkage.h>
>  
> -#include <asm/asm-uaccess.h>
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/cache.h>
>  
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 408d317a47d2..7940d6ef5da5 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -12,7 +12,6 @@
>  #include <asm/assembler.h>
>  #include <asm/cpufeature.h>
>  #include <asm/alternative.h>
> -#include <asm/asm-uaccess.h>
>  
>  /*
>   *	__arch_flush_icache_range(start,end)
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index b5e329fde2dd..4fc32c504dea 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -237,6 +237,18 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
>  		cpu_switch_mm(mm->pgd, mm);
>  }
>  
> +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
> +asmlinkage void __uaccess_ttbr0_enable_c(void)
> +{
> +	__uaccess_ttbr0_enable();
> +}
> +
> +asmlinkage void __uaccess_ttbr0_disable_c(void)
> +{
> +	__uaccess_ttbr0_disable();
> +}
> +#endif
> +
>  /* Errata workaround post TTBRx_EL1 update. */
>  asmlinkage void post_ttbr_update_workaround(void)
>  {
> -- 
> 2.24.0
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions
  2019-11-27 15:10     ` Pavel Tatashin
@ 2019-11-27 15:14       ` Mark Rutland
  0 siblings, 0 replies; 15+ messages in thread
From: Mark Rutland @ 2019-11-27 15:14 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 10:10:07AM -0500, Pavel Tatashin wrote:
> Hi Mark,
> 
> Thank you for reviewing this work.
 
> > The 'arch_' prefix should probably be 'asm_' (or have an '_asm' suffix),
> > since this is entirely local to the arch code, and even then should only
> > be called from the C wrappers.
> 
> Sure, I can change it to asm_*, I was using arch_* to be consistent
> with __arch_copy_from_user() and friends.

FWIW, that naming was from before the common uaccess code took on the
raw_* anming for the arch functions, and I was expecting that the arch_*
functions would end up being called from core code.

For now it's probably too churny to change that existing case.

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-27 15:11   ` Mark Rutland
@ 2019-11-27 15:31     ` Pavel Tatashin
  2019-11-27 16:03       ` Mark Rutland
  0 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-27 15:31 UTC (permalink / raw)
  To: Mark Rutland
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > are the last two macros defined in asm-uaccess.h.
> >
> > Replace them with C wrappers and call C functions from
> > kernel_entry and kernel_exit.
>
> For now, please leave those as-is.
>
> I don't think we want to have out-of-line C wrappers in the middle of
> the entry assembly where we don't have a complete kernel environment.
> The use in entry code can also assume non-preemptibility, while the C
> functions have to explcitily disable that.

I do not understand, if C function is called form non-preemptible
context it stays non-preemptible. kernel_exit already may call C
functions around the time __uaccess_ttbr0_enable is called (it may
call post_ttbr_update_workaround), and that C functions does not do
explicit preempt disable:

> We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
> and maybe fold the macros into entry.S if it's not too crowded.

I can do this as a separate patch.

Thank you,
Pasha

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-27 15:31     ` Pavel Tatashin
@ 2019-11-27 16:03       ` Mark Rutland
  2019-11-27 16:09         ` Pavel Tatashin
  0 siblings, 1 reply; 15+ messages in thread
From: Mark Rutland @ 2019-11-27 16:03 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> >
> > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > are the last two macros defined in asm-uaccess.h.
> > >
> > > Replace them with C wrappers and call C functions from
> > > kernel_entry and kernel_exit.
> >
> > For now, please leave those as-is.
> >
> > I don't think we want to have out-of-line C wrappers in the middle of
> > the entry assembly where we don't have a complete kernel environment.
> > The use in entry code can also assume non-preemptibility, while the C
> > functions have to explcitily disable that.
> 
> I do not understand, if C function is called form non-preemptible
> context it stays non-preemptible. kernel_exit already may call C
> functions around the time __uaccess_ttbr0_enable is called (it may
> call post_ttbr_update_workaround), and that C functions does not do
> explicit preempt disable:

Sorry, I meant that IRQs are disabled here.

The C wrapper calls __uaccess_ttbr0_enable(), which calls
local_irq_save() and local_irq_restore(). Those are pointless in the
bowels of the entry code, and potentially expensive if IRQ prio masking
is in use.

I'd rather not add more out-of-line C code calls here right now as I'd
prefer to factor out the logic to C in a better way.

> > We can certainly remove the includes of <asm/asm-uaccess.h> elsewhere,
> > and maybe fold the macros into entry.S if it's not too crowded.
> 
> I can do this as a separate patch.

That sounds fine to me,

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-27 16:03       ` Mark Rutland
@ 2019-11-27 16:09         ` Pavel Tatashin
  2019-11-27 17:01           ` Mark Rutland
  0 siblings, 1 reply; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-27 16:09 UTC (permalink / raw)
  To: Mark Rutland
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > >
> > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > are the last two macros defined in asm-uaccess.h.
> > > >
> > > > Replace them with C wrappers and call C functions from
> > > > kernel_entry and kernel_exit.
> > >
> > > For now, please leave those as-is.
> > >
> > > I don't think we want to have out-of-line C wrappers in the middle of
> > > the entry assembly where we don't have a complete kernel environment.
> > > The use in entry code can also assume non-preemptibility, while the C
> > > functions have to explcitily disable that.
> >
> > I do not understand, if C function is called form non-preemptible
> > context it stays non-preemptible. kernel_exit already may call C
> > functions around the time __uaccess_ttbr0_enable is called (it may
> > call post_ttbr_update_workaround), and that C functions does not do
> > explicit preempt disable:
>
> Sorry, I meant that IRQs are disabled here.
>
> The C wrapper calls __uaccess_ttbr0_enable(), which calls
> local_irq_save() and local_irq_restore(). Those are pointless in the
> bowels of the entry code, and potentially expensive if IRQ prio masking
> is in use.
>
> I'd rather not add more out-of-line C code calls here right now as I'd
> prefer to factor out the logic to C in a better way.

Ah, yes, this makes sense. I could certainly factor out C calls in a
better way, or is this something you want to work on?

Without removing these assembly macros I do not think we want to
address this suggestion from Kees Cook:
https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/

Thank you,
Pasha

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-27 16:09         ` Pavel Tatashin
@ 2019-11-27 17:01           ` Mark Rutland
  2019-11-27 17:13             ` Pavel Tatashin
  0 siblings, 1 reply; 15+ messages in thread
From: Mark Rutland @ 2019-11-27 17:01 UTC (permalink / raw)
  To: Pavel Tatashin
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 11:09:35AM -0500, Pavel Tatashin wrote:
> On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
> >
> > On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > > >
> > > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > > are the last two macros defined in asm-uaccess.h.
> > > > >
> > > > > Replace them with C wrappers and call C functions from
> > > > > kernel_entry and kernel_exit.
> > > >
> > > > For now, please leave those as-is.
> > > >
> > > > I don't think we want to have out-of-line C wrappers in the middle of
> > > > the entry assembly where we don't have a complete kernel environment.
> > > > The use in entry code can also assume non-preemptibility, while the C
> > > > functions have to explcitily disable that.
> > >
> > > I do not understand, if C function is called form non-preemptible
> > > context it stays non-preemptible. kernel_exit already may call C
> > > functions around the time __uaccess_ttbr0_enable is called (it may
> > > call post_ttbr_update_workaround), and that C functions does not do
> > > explicit preempt disable:
> >
> > Sorry, I meant that IRQs are disabled here.
> >
> > The C wrapper calls __uaccess_ttbr0_enable(), which calls
> > local_irq_save() and local_irq_restore(). Those are pointless in the
> > bowels of the entry code, and potentially expensive if IRQ prio masking
> > is in use.
> >
> > I'd rather not add more out-of-line C code calls here right now as I'd
> > prefer to factor out the logic to C in a better way.
> 
> Ah, yes, this makes sense. I could certainly factor out C calls in a
> better way, or is this something you want to work on?

I'm hoping to do that as part of ongoing entry-deasm work, now that a
lot of the prerequisite work was merged in v5.4.

> Without removing these assembly macros I do not think we want to
> address this suggestion from Kees Cook:
> https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/

In the mean time, we could add checks around addr_limit_user_check(),
and in the context-switch path. I have some preparatory cleanup to allow
for the context-switch check, which I'll send out at -rc1. That was what
I used to detect the case you reported previously.

Thanks,
Mark.

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h
  2019-11-27 17:01           ` Mark Rutland
@ 2019-11-27 17:13             ` Pavel Tatashin
  0 siblings, 0 replies; 15+ messages in thread
From: Pavel Tatashin @ 2019-11-27 17:13 UTC (permalink / raw)
  To: Mark Rutland
  Cc: James Morris, Sasha Levin, LKML, Catalin Marinas, Will Deacon,
	steve.capper, Linux ARM, Marc Zyngier, James Morse,
	Vladimir Murzin, Thomas Gleixner, Greg Kroah-Hartman, allison,
	info, alexios.zavras, Stefano Stabellini, boris.ostrovsky,
	jgross, Stefan Agner, Masahiro Yamada, xen-devel,
	Russell King - ARM Linux admin

On Wed, Nov 27, 2019 at 12:01 PM Mark Rutland <mark.rutland@arm.com> wrote:
>
> On Wed, Nov 27, 2019 at 11:09:35AM -0500, Pavel Tatashin wrote:
> > On Wed, Nov 27, 2019 at 11:03 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > >
> > > On Wed, Nov 27, 2019 at 10:31:54AM -0500, Pavel Tatashin wrote:
> > > > On Wed, Nov 27, 2019 at 10:12 AM Mark Rutland <mark.rutland@arm.com> wrote:
> > > > >
> > > > > On Thu, Nov 21, 2019 at 09:24:06PM -0500, Pavel Tatashin wrote:
> > > > > > The __uaccess_ttbr0_disable and __uaccess_ttbr0_enable,
> > > > > > are the last two macros defined in asm-uaccess.h.
> > > > > >
> > > > > > Replace them with C wrappers and call C functions from
> > > > > > kernel_entry and kernel_exit.
> > > > >
> > > > > For now, please leave those as-is.
> > > > >
> > > > > I don't think we want to have out-of-line C wrappers in the middle of
> > > > > the entry assembly where we don't have a complete kernel environment.
> > > > > The use in entry code can also assume non-preemptibility, while the C
> > > > > functions have to explcitily disable that.
> > > >
> > > > I do not understand, if C function is called form non-preemptible
> > > > context it stays non-preemptible. kernel_exit already may call C
> > > > functions around the time __uaccess_ttbr0_enable is called (it may
> > > > call post_ttbr_update_workaround), and that C functions does not do
> > > > explicit preempt disable:
> > >
> > > Sorry, I meant that IRQs are disabled here.
> > >
> > > The C wrapper calls __uaccess_ttbr0_enable(), which calls
> > > local_irq_save() and local_irq_restore(). Those are pointless in the
> > > bowels of the entry code, and potentially expensive if IRQ prio masking
> > > is in use.
> > >
> > > I'd rather not add more out-of-line C code calls here right now as I'd
> > > prefer to factor out the logic to C in a better way.
> >
> > Ah, yes, this makes sense. I could certainly factor out C calls in a
> > better way, or is this something you want to work on?
>
> I'm hoping to do that as part of ongoing entry-deasm work, now that a
> lot of the prerequisite work was merged in v5.4.

OK, I will send new patches with what we agreed on, and your comments addressed.

>
> > Without removing these assembly macros I do not think we want to
> > address this suggestion from Kees Cook:
> > https://lore.kernel.org/lkml/CA+CK2bCBS2fKOTmTFm13iv3u5TBPwpoCsYeeP352DVE-gs9GJw@mail.gmail.com/
>
> In the mean time, we could add checks around addr_limit_user_check(),
> and in the context-switch path. I have some preparatory cleanup to allow
> for the context-switch check, which I'll send out at -rc1. That was what
> I used to detect the case you reported previously.

Sounds good.

Thank you,
Pasha

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2019-11-27 17:13 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-22  2:24 [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin
2019-11-22  2:24 ` [PATCH v2 1/3] arm/arm64/xen: use C inlines for privcmd_call Pavel Tatashin
2019-11-22 19:59   ` Stefano Stabellini
2019-11-22  2:24 ` [PATCH v2 2/3] arm64: remove uaccess_ttbr0 asm macros from cache functions Pavel Tatashin
2019-11-27 15:01   ` Mark Rutland
2019-11-27 15:10     ` Pavel Tatashin
2019-11-27 15:14       ` Mark Rutland
2019-11-22  2:24 ` [PATCH v2 3/3] arm64: remove the rest of asm-uaccess.h Pavel Tatashin
2019-11-27 15:11   ` Mark Rutland
2019-11-27 15:31     ` Pavel Tatashin
2019-11-27 16:03       ` Mark Rutland
2019-11-27 16:09         ` Pavel Tatashin
2019-11-27 17:01           ` Mark Rutland
2019-11-27 17:13             ` Pavel Tatashin
2019-11-26 13:50 ` [PATCH v2 0/3] Use C inlines for uaccess Pavel Tatashin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).