linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Al Viro <viro@ZenIV.linux.org.uk>
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org
Subject: [PATCH 14/18] mips: propagate the calling convention change down into __csum_partial_copy_..._user()
Date: Tue, 21 Jul 2020 21:25:45 +0100	[thread overview]
Message-ID: <20200721202549.4150745-14-viro@ZenIV.linux.org.uk> (raw)
Message-ID: <20200721202545.CxdxMMX_0HegGRkQRUGIctT5txL1hIsvBYhUwbawT9Q@z> (raw)
In-Reply-To: <20200721202549.4150745-1-viro@ZenIV.linux.org.uk>

From: Al Viro <viro@zeniv.linux.org.uk>

and turn the exception handlers into simply returning 0, which
simplifies the hell out of things in csum_partial.S

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
---
 arch/mips/include/asm/checksum.h |  26 +---
 arch/mips/lib/csum_partial.S     | 258 +++++++++++++--------------------------
 2 files changed, 89 insertions(+), 195 deletions(-)

diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index bf02d2d3869f..66a86a33339a 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -34,25 +34,17 @@
  */
 __wsum csum_partial(const void *buff, int len, __wsum sum);
 
-__wsum __csum_partial_copy_from_user(const void *src, void *dst,
-				     int len, __wsum sum, int *err_ptr);
-__wsum __csum_partial_copy_to_user(const void *src, void *dst,
-				   int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_from_user(const void __user *src, void *dst, int len);
+__wsum __csum_partial_copy_to_user(const void *src, void __user *dst, int len);
 
 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
 static inline
 __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
 {
-	__wsum sum = ~0U;
-	int err = 0;
-
 	might_fault();
-
 	if (!access_ok(src, len))
 		return 0;
-	sum = __csum_partial_copy_from_user((__force void *)src, dst,
-						     len, sum, &err);
-	return err ? 0 : sum;
+	return __csum_partial_copy_from_user(src, dst, len);
 }
 
 /*
@@ -62,26 +54,20 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
 static inline
 __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
 {
-	int err = 0;
-	__wsum sum = ~0U;
-
 	might_fault();
 	if (!access_ok(dst, len))
 		return 0;
-	sum = __csum_partial_copy_to_user(src,
-					   (__force void *)dst,
-					   len, sum, &err);
-	return err ? 0 : sum;
+	return __csum_partial_copy_to_user(src, dst, len);
 }
 
 /*
  * the same as csum_partial, but copies from user space (but on MIPS
  * we have just one address space, so this is identical to the above)
  */
-__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+__wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len);
 static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
 {
-	return __csum_partial_copy_nocheck(src, dst, len, 0);
+	return __csum_partial_copy_nocheck(src, dst, len);
 }
 #define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 983e909c2052..a46db0807195 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -308,8 +308,8 @@ EXPORT_SYMBOL(csum_partial)
 /*
  * checksum and copy routines based on memcpy.S
  *
- *	csum_partial_copy_nocheck(src, dst, len, sum)
- *	__csum_partial_copy_kernel(src, dst, len, sum, errp)
+ *	csum_partial_copy_nocheck(src, dst, len)
+ *	__csum_partial_copy_kernel(src, dst, len)
  *
  * See "Spec" in memcpy.S for details.	Unlike __copy_user, all
  * function in this file use the standard calling convention.
@@ -318,26 +318,11 @@ EXPORT_SYMBOL(csum_partial)
 #define src a0
 #define dst a1
 #define len a2
-#define psum a3
 #define sum v0
 #define odd t8
-#define errptr t9
 
 /*
- * The exception handler for loads requires that:
- *  1- AT contain the address of the byte just past the end of the source
- *     of the copy,
- *  2- src_entry <= src < AT, and
- *  3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by __csum_partial_copy_from_user and maintained by
- *	not writing AT in __csum_partial_copy
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores stores -EFAULT to errptr and return.
- * These handlers do not need to overwrite any data.
+ * All exception handlers simply return 0.
  */
 
 /* Instruction type */
@@ -358,11 +343,11 @@ EXPORT_SYMBOL(csum_partial)
  * addr    : Address
  * handler : Exception handler
  */
-#define EXC(insn, type, reg, addr, handler)	\
+#define EXC(insn, type, reg, addr)		\
 	.if \mode == LEGACY_MODE;		\
 9:		insn reg, addr;			\
 		.section __ex_table,"a";	\
-		PTR	9b, handler;		\
+		PTR	9b, .L_exc;		\
 		.previous;			\
 	/* This is enabled in EVA mode */	\
 	.else;					\
@@ -371,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
 		    ((\to == USEROP) && (type == ST_INSN));	\
 9:			__BUILD_EVA_INSN(insn##e, reg, addr);	\
 			.section __ex_table,"a";		\
-			PTR	9b, handler;			\
+			PTR	9b, .L_exc;			\
 			.previous;				\
 		.else;						\
 			/* EVA without exception */		\
@@ -384,14 +369,14 @@ EXPORT_SYMBOL(csum_partial)
 #ifdef USE_DOUBLE
 
 #define LOADK	ld /* No exception */
-#define LOAD(reg, addr, handler)	EXC(ld, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler)	EXC(ldl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler)	EXC(ldr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler)	EXC(sdl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler)	EXC(sdr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler)	EXC(sd, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr)		EXC(ld, LD_INSN, reg, addr)
+#define LOADBU(reg, addr)	EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr)	EXC(ldl, LD_INSN, reg, addr)
+#define LOADR(reg, addr)	EXC(ldr, LD_INSN, reg, addr)
+#define STOREB(reg, addr)	EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr)	EXC(sdl, ST_INSN, reg, addr)
+#define STORER(reg, addr)	EXC(sdr, ST_INSN, reg, addr)
+#define STORE(reg, addr)	EXC(sd, ST_INSN, reg, addr)
 #define ADD    daddu
 #define SUB    dsubu
 #define SRL    dsrl
@@ -404,14 +389,14 @@ EXPORT_SYMBOL(csum_partial)
 #else
 
 #define LOADK	lw /* No exception */
-#define LOAD(reg, addr, handler)	EXC(lw, LD_INSN, reg, addr, handler)
-#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler)
-#define LOADL(reg, addr, handler)	EXC(lwl, LD_INSN, reg, addr, handler)
-#define LOADR(reg, addr, handler)	EXC(lwr, LD_INSN, reg, addr, handler)
-#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler)
-#define STOREL(reg, addr, handler)	EXC(swl, ST_INSN, reg, addr, handler)
-#define STORER(reg, addr, handler)	EXC(swr, ST_INSN, reg, addr, handler)
-#define STORE(reg, addr, handler)	EXC(sw, ST_INSN, reg, addr, handler)
+#define LOAD(reg, addr)		EXC(lw, LD_INSN, reg, addr)
+#define LOADBU(reg, addr)	EXC(lbu, LD_INSN, reg, addr)
+#define LOADL(reg, addr)	EXC(lwl, LD_INSN, reg, addr)
+#define LOADR(reg, addr)	EXC(lwr, LD_INSN, reg, addr)
+#define STOREB(reg, addr)	EXC(sb, ST_INSN, reg, addr)
+#define STOREL(reg, addr)	EXC(swl, ST_INSN, reg, addr)
+#define STORER(reg, addr)	EXC(swr, ST_INSN, reg, addr)
+#define STORE(reg, addr)	EXC(sw, ST_INSN, reg, addr)
 #define ADD    addu
 #define SUB    subu
 #define SRL    srl
@@ -450,22 +435,9 @@ EXPORT_SYMBOL(csum_partial)
 	.set	at=v1
 #endif
 
-	.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
+	.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to
 
-	PTR_ADDU	AT, src, len	/* See (1) above. */
-	/* initialize __nocheck if this the first time we execute this
-	 * macro
-	 */
-#ifdef CONFIG_64BIT
-	move	errptr, a4
-#else
-	lw	errptr, 16(sp)
-#endif
-	.if \__nocheck == 1
-	FEXPORT(__csum_partial_copy_nocheck)
-	EXPORT_SYMBOL(__csum_partial_copy_nocheck)
-	.endif
-	move	sum, zero
+	li	sum, -1
 	move	odd, zero
 	/*
 	 * Note: dst & src may be unaligned, len may be 0
@@ -497,31 +469,31 @@ EXPORT_SYMBOL(csum_partial)
 	SUB	len, 8*NBYTES		# subtract here for bgez loop
 	.align	4
 1:
-	LOAD(t0, UNIT(0)(src), .Ll_exc\@)
-	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
-	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
-	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
-	LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
-	LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
-	LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
-	LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
+	LOAD(t0, UNIT(0)(src))
+	LOAD(t1, UNIT(1)(src))
+	LOAD(t2, UNIT(2)(src))
+	LOAD(t3, UNIT(3)(src))
+	LOAD(t4, UNIT(4)(src))
+	LOAD(t5, UNIT(5)(src))
+	LOAD(t6, UNIT(6)(src))
+	LOAD(t7, UNIT(7)(src))
 	SUB	len, len, 8*NBYTES
 	ADD	src, src, 8*NBYTES
-	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	STORE(t0, UNIT(0)(dst))
 	ADDC(t0, t1)
-	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	STORE(t1, UNIT(1)(dst))
 	ADDC(sum, t0)
-	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	STORE(t2, UNIT(2)(dst))
 	ADDC(t2, t3)
-	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	STORE(t3, UNIT(3)(dst))
 	ADDC(sum, t2)
-	STORE(t4, UNIT(4)(dst),	.Ls_exc\@)
+	STORE(t4, UNIT(4)(dst))
 	ADDC(t4, t5)
-	STORE(t5, UNIT(5)(dst),	.Ls_exc\@)
+	STORE(t5, UNIT(5)(dst))
 	ADDC(sum, t4)
-	STORE(t6, UNIT(6)(dst),	.Ls_exc\@)
+	STORE(t6, UNIT(6)(dst))
 	ADDC(t6, t7)
-	STORE(t7, UNIT(7)(dst),	.Ls_exc\@)
+	STORE(t7, UNIT(7)(dst))
 	ADDC(sum, t6)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, 8*NBYTES
@@ -541,19 +513,19 @@ EXPORT_SYMBOL(csum_partial)
 	/*
 	 * len >= 4*NBYTES
 	 */
-	LOAD(t0, UNIT(0)(src), .Ll_exc\@)
-	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
-	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
-	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
+	LOAD(t0, UNIT(0)(src))
+	LOAD(t1, UNIT(1)(src))
+	LOAD(t2, UNIT(2)(src))
+	LOAD(t3, UNIT(3)(src))
 	SUB	len, len, 4*NBYTES
 	ADD	src, src, 4*NBYTES
-	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	STORE(t0, UNIT(0)(dst))
 	ADDC(t0, t1)
-	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	STORE(t1, UNIT(1)(dst))
 	ADDC(sum, t0)
-	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	STORE(t2, UNIT(2)(dst))
 	ADDC(t2, t3)
-	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	STORE(t3, UNIT(3)(dst))
 	ADDC(sum, t2)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, 4*NBYTES
@@ -566,10 +538,10 @@ EXPORT_SYMBOL(csum_partial)
 	beq	rem, len, .Lcopy_bytes\@
 	 nop
 1:
-	LOAD(t0, 0(src), .Ll_exc\@)
+	LOAD(t0, 0(src))
 	ADD	src, src, NBYTES
 	SUB	len, len, NBYTES
-	STORE(t0, 0(dst), .Ls_exc\@)
+	STORE(t0, 0(dst))
 	ADDC(sum, t0)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, NBYTES
@@ -592,10 +564,10 @@ EXPORT_SYMBOL(csum_partial)
 	 ADD	t1, dst, len	# t1 is just past last byte of dst
 	li	bits, 8*NBYTES
 	SLL	rem, len, 3	# rem = number of bits to keep
-	LOAD(t0, 0(src), .Ll_exc\@)
+	LOAD(t0, 0(src))
 	SUB	bits, bits, rem # bits = number of bits to discard
 	SHIFT_DISCARD t0, t0, bits
-	STREST(t0, -1(t1), .Ls_exc\@)
+	STREST(t0, -1(t1))
 	SHIFT_DISCARD_REVERT t0, t0, bits
 	.set reorder
 	ADDC(sum, t0)
@@ -612,12 +584,12 @@ EXPORT_SYMBOL(csum_partial)
 	 * Set match = (src and dst have same alignment)
 	 */
 #define match rem
-	LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
+	LDFIRST(t3, FIRST(0)(src))
 	ADD	t2, zero, NBYTES
-	LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
+	LDREST(t3, REST(0)(src))
 	SUB	t2, t2, t1	# t2 = number of bytes copied
 	xor	match, t0, t1
-	STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
+	STFIRST(t3, FIRST(0)(dst))
 	SLL	t4, t1, 3		# t4 = number of bits to discard
 	SHIFT_DISCARD t3, t3, t4
 	/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
@@ -639,26 +611,26 @@ EXPORT_SYMBOL(csum_partial)
  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  * are to the same unit (unless src is aligned, but it's not).
  */
-	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
-	LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
+	LDFIRST(t0, FIRST(0)(src))
+	LDFIRST(t1, FIRST(1)(src))
 	SUB	len, len, 4*NBYTES
-	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
-	LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
-	LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
-	LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
-	LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
-	LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
+	LDREST(t0, REST(0)(src))
+	LDREST(t1, REST(1)(src))
+	LDFIRST(t2, FIRST(2)(src))
+	LDFIRST(t3, FIRST(3)(src))
+	LDREST(t2, REST(2)(src))
+	LDREST(t3, REST(3)(src))
 	ADD	src, src, 4*NBYTES
 #ifdef CONFIG_CPU_SB1
 	nop				# improves slotting
 #endif
-	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)
+	STORE(t0, UNIT(0)(dst))
 	ADDC(t0, t1)
-	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)
+	STORE(t1, UNIT(1)(dst))
 	ADDC(sum, t0)
-	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)
+	STORE(t2, UNIT(2)(dst))
 	ADDC(t2, t3)
-	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)
+	STORE(t3, UNIT(3)(dst))
 	ADDC(sum, t2)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, 4*NBYTES
@@ -671,11 +643,11 @@ EXPORT_SYMBOL(csum_partial)
 	beq	rem, len, .Lcopy_bytes\@
 	 nop
 1:
-	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
-	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
+	LDFIRST(t0, FIRST(0)(src))
+	LDREST(t0, REST(0)(src))
 	ADD	src, src, NBYTES
 	SUB	len, len, NBYTES
-	STORE(t0, 0(dst), .Ls_exc\@)
+	STORE(t0, 0(dst))
 	ADDC(sum, t0)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, NBYTES
@@ -696,11 +668,10 @@ EXPORT_SYMBOL(csum_partial)
 #endif
 	move	t2, zero	# partial word
 	li	t3, SHIFT_START # shift
-/* use .Ll_exc_copy here to return correct sum on fault */
 #define COPY_BYTE(N)			\
-	LOADBU(t0, N(src), .Ll_exc_copy\@);	\
+	LOADBU(t0, N(src));		\
 	SUB	len, len, 1;		\
-	STOREB(t0, N(dst), .Ls_exc\@);	\
+	STOREB(t0, N(dst));		\
 	SLLV	t0, t0, t3;		\
 	addu	t3, SHIFT_INC;		\
 	beqz	len, .Lcopy_bytes_done\@; \
@@ -714,9 +685,9 @@ EXPORT_SYMBOL(csum_partial)
 	COPY_BYTE(4)
 	COPY_BYTE(5)
 #endif
-	LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
+	LOADBU(t0, NBYTES-2(src))
 	SUB	len, len, 1
-	STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
+	STOREB(t0, NBYTES-2(dst))
 	SLLV	t0, t0, t3
 	or	t2, t0
 .Lcopy_bytes_done\@:
@@ -753,94 +724,31 @@ EXPORT_SYMBOL(csum_partial)
 #endif
 	.set	pop
 	.set reorder
-	ADDC32(sum, psum)
 	jr	ra
 	.set noreorder
+	.endm
 
-.Ll_exc_copy\@:
-	/*
-	 * Copy bytes from src until faulting load address (or until a
-	 * lb faults)
-	 *
-	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
-	 * may be more than a byte beyond the last address.
-	 * Hence, the lb below may get an exception.
-	 *
-	 * Assumes src < THREAD_BUADDR($28)
-	 */
-	LOADK	t0, TI_TASK($28)
-	 li	t2, SHIFT_START
-	LOADK	t0, THREAD_BUADDR(t0)
-1:
-	LOADBU(t1, 0(src), .Ll_exc\@)
-	ADD	src, src, 1
-	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
-	SLLV	t1, t1, t2
-	addu	t2, SHIFT_INC
-	ADDC(sum, t1)
-	.set	reorder				/* DADDI_WAR */
-	ADD	dst, dst, 1
-	bne	src, t0, 1b
-	.set	noreorder
-.Ll_exc\@:
-	LOADK	t0, TI_TASK($28)
-	 nop
-	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
-	 nop
-	SUB	len, AT, t0		# len number of uncopied bytes
-	/*
-	 * Here's where we rely on src and dst being incremented in tandem,
-	 *   See (3) above.
-	 * dst += (fault addr - src) to put dst at first byte to clear
-	 */
-	ADD	dst, t0			# compute start address in a1
-	SUB	dst, src
-	/*
-	 * Clear len bytes starting at dst.  Can't call __bzero because it
-	 * might modify len.  An inefficient loop for these rare times...
-	 */
-	.set	reorder				/* DADDI_WAR */
-	SUB	src, len, 1
-	beqz	len, .Ldone\@
-	.set	noreorder
-1:	sb	zero, 0(dst)
-	ADD	dst, dst, 1
-	.set	push
-	.set	noat
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
-	bnez	src, 1b
-	 SUB	src, src, 1
-#else
-	li	v1, 1
-	bnez	src, 1b
-	 SUB	src, src, v1
-#endif
-	li	v1, -EFAULT
-	b	.Ldone\@
-	 sw	v1, (errptr)
-
-.Ls_exc\@:
-	li	v0, -1 /* invalid checksum */
-	li	v1, -EFAULT
+	.set noreorder
+.L_exc:
 	jr	ra
-	 sw	v1, (errptr)
-	.set	pop
-	.endm
+	 li	v0, 0
 
+FEXPORT(__csum_partial_copy_nocheck)
+EXPORT_SYMBOL(__csum_partial_copy_nocheck)
 #ifndef CONFIG_EVA
 FEXPORT(__csum_partial_copy_to_user)
 EXPORT_SYMBOL(__csum_partial_copy_to_user)
 FEXPORT(__csum_partial_copy_from_user)
 EXPORT_SYMBOL(__csum_partial_copy_from_user)
 #endif
-__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
+__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP
 
 #ifdef CONFIG_EVA
 LEAF(__csum_partial_copy_to_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP
 END(__csum_partial_copy_to_user)
 
 LEAF(__csum_partial_copy_from_user)
-__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
+__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP
 END(__csum_partial_copy_from_user)
 #endif
-- 
2.11.0

  parent reply	other threads:[~2020-07-21 20:25 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-21 20:24 [RFC][CFT][PATCHSET] saner calling conventions for csum-and-copy primitives Al Viro
2020-07-21 20:25 ` [PATCH 01/18] skb_copy_and_csum_bits(): don't bother with the last argument Al Viro
2020-07-21 20:25   ` [PATCH 02/18] icmp_push_reply(): reorder adding the checksum up Al Viro
2020-07-21 20:25   ` [PATCH 03/18] csum_partial_copy_nocheck(): drop the last argument Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 04/18] csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:55     ` Linus Torvalds
2020-07-21 20:58       ` Linus Torvalds
2020-07-21 21:11         ` Al Viro
2020-07-21 21:16           ` Linus Torvalds
2020-07-21 21:16             ` Linus Torvalds
2020-07-25 17:54           ` Al Viro
2020-07-22  9:45       ` David Laight
2020-07-22  9:27     ` David Laight
2020-07-22 14:42       ` Al Viro
2020-07-22 15:22         ` David Laight
2020-07-22 15:54           ` Al Viro
2020-07-22 16:17             ` David Laight
2020-07-22 17:39               ` Al Viro
2020-07-23  8:29                 ` David Laight
2020-07-23 13:54                 ` David Laight
2020-07-23 14:30                   ` David Laight
2020-07-23 14:53                   ` Al Viro
2020-07-23 15:19                     ` David Laight
2020-07-23 15:21                     ` Al Viro
2020-07-23 15:36                       ` David Laight
2020-07-21 20:25   ` [PATCH 05/18] saner calling conventions for csum_and_copy_..._user() Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 06/18] alpha: propagate the calling convention changes down to csum_partial_copy.c helpers Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 07/18] arm: propagate the calling convention changes down to csum_partial_copy_from_user() Al Viro
2020-07-21 20:25   ` [PATCH 08/18] m68k: get rid of zeroing destination on error in csum_and_copy_from_user() Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 09/18] sh: propage the calling conventions change down to csum_partial_copy_generic() Al Viro
2020-07-21 20:25   ` [PATCH 10/18] i386: propagate " Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 11/18] sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic() Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-22  1:20     ` David Miller
2020-07-21 20:25   ` [PATCH 12/18] mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS Al Viro
2020-07-21 20:25   ` [PATCH 13/18] mips: __csum_partial_copy_kernel() has no users left Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` Al Viro [this message]
2020-07-21 20:25     ` [PATCH 14/18] mips: propagate the calling convention change down into __csum_partial_copy_..._user() Al Viro
2020-07-21 20:25   ` [PATCH 15/18] xtensa: propagate the calling conventions change down into csum_partial_copy_generic() Al Viro
2020-07-22  8:56     ` Max Filippov
2020-07-21 20:25   ` [PATCH 16/18] sparc64: propagate the calling convention changes down to __csum_partial_copy_...() Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-22  1:21     ` David Miller
2020-07-21 20:25   ` [PATCH 17/18] amd64: switch csum_partial_copy_generic() to new calling conventions Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-21 20:25   ` [PATCH 18/18] ppc: propagate the calling conventions change down to csum_partial_copy_generic() Al Viro
2020-07-21 20:25     ` Al Viro
2020-07-24  1:25 ` [RFC][CFT][PATCHSET v2] saner calling conventions for csum-and-copy primitives Al Viro
2020-07-24  1:25   ` [PATCH v2 01/20] xtensa: fix access check in csum_and_copy_from_user Al Viro
2020-07-24  1:25     ` [PATCH v2 02/20] skb_copy_and_csum_bits(): don't bother with the last argument Al Viro
2020-07-24  1:25     ` [PATCH v2 03/20] icmp_push_reply(): reorder adding the checksum up Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 04/20] unify generic instances of csum_partial_copy_nocheck() Al Viro
2020-07-24  6:41       ` Christoph Hellwig
2020-07-24 12:19         ` Al Viro
2020-07-24 12:23           ` Christoph Hellwig
2020-07-24 12:30             ` Al Viro
2020-07-26  7:11               ` Christoph Hellwig
2020-07-26  7:11                 ` Christoph Hellwig
2020-07-27  3:58                 ` Al Viro
2020-07-24  1:25     ` [PATCH v2 05/20] csum_partial_copy_nocheck(): drop the last argument Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 06/20] csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum Al Viro
2020-07-24  1:25     ` [PATCH v2 07/20] saner calling conventions for csum_and_copy_..._user() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 08/20] alpha: propagate the calling convention changes down to csum_partial_copy.c helpers Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 09/20] arm: propagate the calling convention changes down to csum_partial_copy_from_user() Al Viro
2020-07-24  1:25     ` [PATCH v2 10/20] m68k: get rid of zeroing destination on error in csum_and_copy_from_user() Al Viro
2020-07-24  1:25     ` [PATCH v2 11/20] sh: propage the calling conventions change down to csum_partial_copy_generic() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 12/20] i386: propagate " Al Viro
2020-07-24  1:25     ` [PATCH v2 13/20] sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 14/20] mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS Al Viro
2020-07-24  1:25     ` [PATCH v2 15/20] mips: __csum_partial_copy_kernel() has no users left Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 16/20] mips: propagate the calling convention change down into __csum_partial_copy_..._user() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 17/20] xtensa: propagate the calling conventions change down into csum_partial_copy_generic() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 18/20] sparc64: propagate the calling convention changes down to __csum_partial_copy_...() Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 19/20] amd64: switch csum_partial_copy_generic() to new calling conventions Al Viro
2020-07-24  1:25       ` Al Viro
2020-07-24  1:25     ` [PATCH v2 20/20] ppc: propagate the calling conventions change down to csum_partial_copy_generic() Al Viro
2020-07-24  1:25       ` Al Viro
2020-10-14 22:26       ` Jason A. Donenfeld
2020-10-14 22:51         ` Linus Torvalds
2020-10-14 22:53           ` Linus Torvalds
2020-10-14 22:54             ` Jason A. Donenfeld
2020-10-14 22:53           ` Jason A. Donenfeld
2020-10-14 23:12           ` Al Viro
2020-10-14 23:02         ` [PATCH] powerpc32: don't adjust unmoved stack pointer in csum_partial_copy_generic() epilogue Jason A. Donenfeld
2020-10-14 23:05           ` Linus Torvalds

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200721202549.4150745-14-viro@ZenIV.linux.org.uk \
    --to=viro@zeniv.linux.org.uk \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).