From: Paul Burton <paul.burton@imgtec.com> To: <linux-mips@linux-mips.org> Cc: Ralf Baechle <ralf@linux-mips.org>, Paul Burton <paul.burton@imgtec.com> Subject: [PATCH 4/7] MIPS: memcpy: Return uncopied bytes from __copy_user*() in v0 Date: Mon, 7 Nov 2016 11:17:59 +0000 [thread overview] Message-ID: <20161107111802.12071-5-paul.burton@imgtec.com> (raw) In-Reply-To: <20161107111802.12071-1-paul.burton@imgtec.com> The __copy_user*() functions have thus far returned the number of uncopied bytes in the $a2 register used as the argument providing the length of the memory region to be copied. As part of moving to use the standard calling convention, return the number of uncopied bytes in v0 instead. Signed-off-by: Paul Burton <paul.burton@imgtec.com> --- arch/mips/cavium-octeon/octeon-memcpy.S | 18 +++++++++--------- arch/mips/include/asm/uaccess.h | 30 ++++++++++++++++++++---------- arch/mips/lib/memcpy.S | 26 +++++++++++++------------- 3 files changed, 42 insertions(+), 32 deletions(-) diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 944f8f5..6f312a2 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -141,7 +141,7 @@ .set noreorder .set noat - .macro __BUILD_COPY_USER mode + .macro __BUILD_COPY_USER mode, uncopied /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -358,12 +358,12 @@ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(4) COPY_BYTE(5) EXC( lb t0, NBYTES-2(src), l_exc) - SUB len, len, 1 + SUB \uncopied, len, 1 jr ra EXC( sb t0, NBYTES-2(dst), s_exc_p1) .Ldone\@: jr ra - nop + move \uncopied, len /* memcpy shouldn't generate exceptions */ .if \mode != MEMCPY_MODE @@ -410,13 +410,13 @@ l_exc: bnez src, 1b SUB src, src, 1 2: jr ra - nop + move \uncopied, len #define SEXC(n) \ s_exc_p ## n ## u: \ jr ra; \ - ADD len, len, n*NBYTES + ADD \uncopied, len, n*NBYTES SEXC(16) SEXC(15) @@ -437,10 +437,10 @@ SEXC(1) s_exc_p1: jr ra - ADD len, len, 1 + ADD \uncopied, len, 1 s_exc: jr ra - nop + move \uncopied, len .endif /* \mode != MEMCPY_MODE */ .endm @@ -458,7 +458,7 @@ s_exc: LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ - __BUILD_COPY_USER MEMCPY_MODE + __BUILD_COPY_USER MEMCPY_MODE len END(memcpy) /* @@ -475,7 +475,7 @@ LEAF(__copy_user) EXPORT_SYMBOL(__copy_user) li t7, 0 /* not inatomic */ __copy_user_common: - __BUILD_COPY_USER COPY_USER_MODE + __BUILD_COPY_USER COPY_USER_MODE v0 END(__copy_user) /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 89fa5c0b..81d632f 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -814,6 +814,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #ifndef CONFIG_EVA #define __invoke_copy_to_user(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void __user *__cu_to_r __asm__("$4"); \ register const void *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -823,11 +824,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r = (n); \ __asm__ __volatile__( \ __MODULE_JAL(__copy_user) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_to_kernel(to, from, n) \ @@ -963,6 +965,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -977,11 +980,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_from_kernel(to, from, n) \ @@ -997,6 +1001,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user_inatomic(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1011,11 +1016,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ @@ -1035,6 +1041,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1049,15 +1056,17 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1067,11 +1076,12 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len_r = (n); \ __asm__ __volatile__( \ __MODULE_JAL(func_ptr) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) /* diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index bfbe23c..052f7a1 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -262,7 +262,7 @@ * from : Source operand. USEROP or KERNELOP * to : Destination operand. USEROP or KERNELOP */ - .macro __BUILD_COPY_USER mode, from, to + .macro __BUILD_COPY_USER mode, from, to, uncopied /* * Note: dst & src may be unaligned, len may be 0 @@ -398,7 +398,7 @@ SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) jr ra - move len, zero + move \uncopied, zero .Ldst_unaligned\@: /* * dst is unaligned @@ -500,12 +500,12 @@ COPY_BYTE(5) #endif LOADB(t0, NBYTES-2(src), .Ll_exc\@) - SUB len, len, 1 + SUB \uncopied, len, 1 jr ra STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra - nop + move \uncopied, len #ifdef CONFIG_CPU_MIPSR6 .Lcopy_unaligned_bytes\@: @@ -584,13 +584,13 @@ .set pop #endif jr ra - nop + move \uncopied, len #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ - ADD len, len, n*NBYTES; \ + ADD \uncopied, len, n*NBYTES; \ jr ra; \ .set noreorder @@ -605,12 +605,12 @@ SEXC(1) .Ls_exc_p1\@: .set reorder /* DADDI_WAR */ - ADD len, len, 1 + ADD \uncopied, len, 1 jr ra .set noreorder .Ls_exc\@: jr ra - nop + move \uncopied, len .endif /* \mode != MEMCPY_MODE */ .endm @@ -632,7 +632,7 @@ EXPORT_SYMBOL(memcpy) .L__memcpy: li t6, 0 /* not inatomic */ /* Legacy Mode, user <-> user */ - __BUILD_COPY_USER MEMCPY_MODE USEROP USEROP + __BUILD_COPY_USER MEMCPY_MODE USEROP USEROP len END(memcpy) /* @@ -651,7 +651,7 @@ EXPORT_SYMBOL(__copy_user) li t6, 0 /* not inatomic */ __copy_user_common: /* Legacy Mode, user <-> user */ - __BUILD_COPY_USER LEGACY_MODE USEROP USEROP + __BUILD_COPY_USER LEGACY_MODE USEROP USEROP v0 END(__copy_user) /* @@ -686,7 +686,7 @@ LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) li t6, 0 /* not inatomic */ __copy_from_user_common: - __BUILD_COPY_USER EVA_MODE USEROP KERNELOP + __BUILD_COPY_USER EVA_MODE USEROP KERNELOP v0 END(__copy_from_user_eva) @@ -697,7 +697,7 @@ END(__copy_from_user_eva) LEAF(__copy_to_user_eva) EXPORT_SYMBOL(__copy_to_user_eva) -__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP v0 END(__copy_to_user_eva) /* @@ -706,7 +706,7 @@ END(__copy_to_user_eva) LEAF(__copy_in_user_eva) EXPORT_SYMBOL(__copy_in_user_eva) -__BUILD_COPY_USER EVA_MODE USEROP USEROP +__BUILD_COPY_USER EVA_MODE USEROP USEROP v0 END(__copy_in_user_eva) #endif -- 2.10.2
WARNING: multiple messages have this Message-ID (diff)
From: Paul Burton <paul.burton@imgtec.com> To: linux-mips@linux-mips.org Cc: Ralf Baechle <ralf@linux-mips.org>, Paul Burton <paul.burton@imgtec.com> Subject: [PATCH 4/7] MIPS: memcpy: Return uncopied bytes from __copy_user*() in v0 Date: Mon, 7 Nov 2016 11:17:59 +0000 [thread overview] Message-ID: <20161107111802.12071-5-paul.burton@imgtec.com> (raw) Message-ID: <20161107111759.bLi9crkhxAYXMPsq2F-ho2QqDkwKEV0y6YAi_2Z0xDo@z> (raw) In-Reply-To: <20161107111802.12071-1-paul.burton@imgtec.com> The __copy_user*() functions have thus far returned the number of uncopied bytes in the $a2 register used as the argument providing the length of the memory region to be copied. As part of moving to use the standard calling convention, return the number of uncopied bytes in v0 instead. Signed-off-by: Paul Burton <paul.burton@imgtec.com> --- arch/mips/cavium-octeon/octeon-memcpy.S | 18 +++++++++--------- arch/mips/include/asm/uaccess.h | 30 ++++++++++++++++++++---------- arch/mips/lib/memcpy.S | 26 +++++++++++++------------- 3 files changed, 42 insertions(+), 32 deletions(-) diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 944f8f5..6f312a2 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -141,7 +141,7 @@ .set noreorder .set noat - .macro __BUILD_COPY_USER mode + .macro __BUILD_COPY_USER mode, uncopied /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -358,12 +358,12 @@ EXC( sb t0, N(dst), s_exc_p1) COPY_BYTE(4) COPY_BYTE(5) EXC( lb t0, NBYTES-2(src), l_exc) - SUB len, len, 1 + SUB \uncopied, len, 1 jr ra EXC( sb t0, NBYTES-2(dst), s_exc_p1) .Ldone\@: jr ra - nop + move \uncopied, len /* memcpy shouldn't generate exceptions */ .if \mode != MEMCPY_MODE @@ -410,13 +410,13 @@ l_exc: bnez src, 1b SUB src, src, 1 2: jr ra - nop + move \uncopied, len #define SEXC(n) \ s_exc_p ## n ## u: \ jr ra; \ - ADD len, len, n*NBYTES + ADD \uncopied, len, n*NBYTES SEXC(16) SEXC(15) @@ -437,10 +437,10 @@ SEXC(1) s_exc_p1: jr ra - ADD len, len, 1 + ADD \uncopied, len, 1 s_exc: jr ra - nop + move \uncopied, len .endif /* \mode != MEMCPY_MODE */ .endm @@ -458,7 +458,7 @@ s_exc: LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ - __BUILD_COPY_USER MEMCPY_MODE + __BUILD_COPY_USER MEMCPY_MODE len END(memcpy) /* @@ -475,7 +475,7 @@ LEAF(__copy_user) EXPORT_SYMBOL(__copy_user) li t7, 0 /* not inatomic */ __copy_user_common: - __BUILD_COPY_USER COPY_USER_MODE + __BUILD_COPY_USER COPY_USER_MODE v0 END(__copy_user) /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 89fa5c0b..81d632f 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -814,6 +814,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #ifndef CONFIG_EVA #define __invoke_copy_to_user(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void __user *__cu_to_r __asm__("$4"); \ register const void *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -823,11 +824,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r = (n); \ __asm__ __volatile__( \ __MODULE_JAL(__copy_user) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_to_kernel(to, from, n) \ @@ -963,6 +965,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -977,11 +980,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_from_kernel(to, from, n) \ @@ -997,6 +1001,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user_inatomic(to, from, n) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1011,11 +1016,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ @@ -1035,6 +1041,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1049,15 +1056,17 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __UA_ADDU "\t$1, %1, %2\n\t" \ ".set\tat\n\t" \ ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ ({ \ + register long __cu_ret_r __asm__("$2"); \ register void *__cu_to_r __asm__("$4"); \ register const void __user *__cu_from_r __asm__("$5"); \ register long __cu_len_r __asm__("$6"); \ @@ -1067,11 +1076,12 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len_r = (n); \ __asm__ __volatile__( \ __MODULE_JAL(func_ptr) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : "=r"(__cu_ret_r), "+r" (__cu_to_r), \ + "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ + __cu_ret_r; \ }) /* diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index bfbe23c..052f7a1 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -262,7 +262,7 @@ * from : Source operand. USEROP or KERNELOP * to : Destination operand. USEROP or KERNELOP */ - .macro __BUILD_COPY_USER mode, from, to + .macro __BUILD_COPY_USER mode, from, to, uncopied /* * Note: dst & src may be unaligned, len may be 0 @@ -398,7 +398,7 @@ SHIFT_DISCARD t0, t0, bits STREST(t0, -1(t1), .Ls_exc\@) jr ra - move len, zero + move \uncopied, zero .Ldst_unaligned\@: /* * dst is unaligned @@ -500,12 +500,12 @@ COPY_BYTE(5) #endif LOADB(t0, NBYTES-2(src), .Ll_exc\@) - SUB len, len, 1 + SUB \uncopied, len, 1 jr ra STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra - nop + move \uncopied, len #ifdef CONFIG_CPU_MIPSR6 .Lcopy_unaligned_bytes\@: @@ -584,13 +584,13 @@ .set pop #endif jr ra - nop + move \uncopied, len #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ - ADD len, len, n*NBYTES; \ + ADD \uncopied, len, n*NBYTES; \ jr ra; \ .set noreorder @@ -605,12 +605,12 @@ SEXC(1) .Ls_exc_p1\@: .set reorder /* DADDI_WAR */ - ADD len, len, 1 + ADD \uncopied, len, 1 jr ra .set noreorder .Ls_exc\@: jr ra - nop + move \uncopied, len .endif /* \mode != MEMCPY_MODE */ .endm @@ -632,7 +632,7 @@ EXPORT_SYMBOL(memcpy) .L__memcpy: li t6, 0 /* not inatomic */ /* Legacy Mode, user <-> user */ - __BUILD_COPY_USER MEMCPY_MODE USEROP USEROP + __BUILD_COPY_USER MEMCPY_MODE USEROP USEROP len END(memcpy) /* @@ -651,7 +651,7 @@ EXPORT_SYMBOL(__copy_user) li t6, 0 /* not inatomic */ __copy_user_common: /* Legacy Mode, user <-> user */ - __BUILD_COPY_USER LEGACY_MODE USEROP USEROP + __BUILD_COPY_USER LEGACY_MODE USEROP USEROP v0 END(__copy_user) /* @@ -686,7 +686,7 @@ LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) li t6, 0 /* not inatomic */ __copy_from_user_common: - __BUILD_COPY_USER EVA_MODE USEROP KERNELOP + __BUILD_COPY_USER EVA_MODE USEROP KERNELOP v0 END(__copy_from_user_eva) @@ -697,7 +697,7 @@ END(__copy_from_user_eva) LEAF(__copy_to_user_eva) EXPORT_SYMBOL(__copy_to_user_eva) -__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP v0 END(__copy_to_user_eva) /* @@ -706,7 +706,7 @@ END(__copy_to_user_eva) LEAF(__copy_in_user_eva) EXPORT_SYMBOL(__copy_in_user_eva) -__BUILD_COPY_USER EVA_MODE USEROP USEROP +__BUILD_COPY_USER EVA_MODE USEROP USEROP v0 END(__copy_in_user_eva) #endif -- 2.10.2
next prev parent reply other threads:[~2016-11-07 11:20 UTC|newest] Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top 2016-11-07 11:17 [PATCH 0/7] MIPS: Standard calling convention usercopy & memcpy Paul Burton 2016-11-07 11:17 ` Paul Burton 2016-11-07 11:17 ` [PATCH 1/7] MIPS: lib: Split lib-y to a line per file Paul Burton 2016-11-07 11:17 ` Paul Burton 2016-11-07 11:17 ` [PATCH 2/7] MIPS: lib: Implement memmove in C Paul Burton 2016-11-07 11:17 ` Paul Burton 2016-11-07 11:17 ` [PATCH 3/7] MIPS: memcpy: Split __copy_user & memcpy Paul Burton 2016-11-07 11:17 ` Paul Burton 2016-11-07 11:17 ` Paul Burton [this message] 2016-11-07 11:17 ` [PATCH 4/7] MIPS: memcpy: Return uncopied bytes from __copy_user*() in v0 Paul Burton 2016-11-07 11:18 ` [PATCH 5/7] MIPS: memcpy: Use ta* instead of manually defining t4-t7 Paul Burton 2016-11-07 11:18 ` Paul Burton 2016-11-07 11:18 ` [PATCH 6/7] MIPS: memcpy: Use a3/$7 for source end address Paul Burton 2016-11-07 11:18 ` Paul Burton 2016-11-14 14:47 ` Maciej W. Rozycki 2016-11-14 14:47 ` Maciej W. Rozycki 2016-11-07 11:18 ` [PATCH 7/7] MIPS: uaccess: Use standard __user_copy* function calls Paul Burton 2016-11-07 11:18 ` Paul Burton 2017-06-27 22:33 ` James Hogan 2017-06-27 22:33 ` James Hogan
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20161107111802.12071-5-paul.burton@imgtec.com \ --to=paul.burton@imgtec.com \ --cc=linux-mips@linux-mips.org \ --cc=ralf@linux-mips.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.