All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Burton <paul.burton@imgtec.com>
To: <linux-mips@linux-mips.org>
Cc: Ralf Baechle <ralf@linux-mips.org>, Paul Burton <paul.burton@imgtec.com>
Subject: [PATCH 6/7] MIPS: memcpy: Use a3/$7 for source end address
Date: Mon, 7 Nov 2016 11:18:01 +0000	[thread overview]
Message-ID: <20161107111802.12071-7-paul.burton@imgtec.com> (raw)
In-Reply-To: <20161107111802.12071-1-paul.burton@imgtec.com>

Instead of using the at/$1 register (which does not form part of the
typical calling convention) to provide the end of the source region to
__copy_user* functions, use the a3/$7 register. This prepares us for
being able to call __copy_user* with a standard function call.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
---

 arch/mips/cavium-octeon/octeon-memcpy.S |  8 ++++----
 arch/mips/include/asm/uaccess.h         | 21 ++++++++++++---------
 arch/mips/lib/memcpy.S                  |  8 ++++----
 3 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index db49fca..9316ab1 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -57,13 +57,13 @@
 
 /*
  * The exception handler for loads requires that:
- *  1- AT contain the address of the byte just past the end of the source
+ *  1- a3 contain the address of the byte just past the end of the source
  *     of the copy,
- *  2- src_entry <= src < AT, and
+ *  2- src_entry <= src < a3, and
  *  3- (dst - src) == (dst_entry - src_entry),
  * The _entry suffix denotes values when __copy_user was called.
  *
- * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user
  * (2) is met by incrementing src by the number of bytes copied
  * (3) is met by not doing loads between a pair of increments of dst and src
  *
@@ -386,7 +386,7 @@ EXC(	lb	t1, 0(src),	l_exc)
 l_exc:
 	LOAD	t0, TI_TASK($28)
 	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
-	SUB	len, AT, t0		# len number of uncopied bytes
+	SUB	len, a3, t0		# len number of uncopied bytes
 	bnez	ta0, 2f		/* Skip the zeroing out part if inatomic */
 	/*
 	 * Here's where we rely on src and dst being incremented in tandem,
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 81d632f..562ad49 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -809,7 +809,8 @@ extern void __put_user_unaligned_unknown(void);
 #define DADDI_SCRATCH "$0"
 #endif
 
-extern size_t __copy_user(void *__to, const void *__from, size_t __n);
+extern size_t __copy_user(void *__to, const void *__from, size_t __n,
+			  const void *__from_end);
 
 #ifndef CONFIG_EVA
 #define __invoke_copy_to_user(to, from, n)				\
@@ -874,7 +875,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 	__cu_len;							\
 })
 
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n,
+				   const void *__from_end);
 
 #define __copy_to_user_inatomic(to, from, n)				\
 ({									\
@@ -977,7 +979,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(__copy_user)					\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
@@ -1013,7 +1015,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(__copy_user_inatomic)				\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
@@ -1032,12 +1034,13 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 /* EVA specific functions */
 
 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
-				       size_t __n);
+				       size_t __n, const void *__from_end);
 extern size_t __copy_from_user_eva(void *__to, const void *__from,
-				   size_t __n);
+				   size_t __n, const void *__from_end);
 extern size_t __copy_to_user_eva(void *__to, const void *__from,
-				 size_t __n);
-extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+				 size_t __n, const void *__from_end);
+extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n,
+				 const void *__from_end);
 
 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
 ({									\
@@ -1053,7 +1056,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(func_ptr)						\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 48684c4..5af9f03 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -70,13 +70,13 @@
 
 /*
  * The exception handler for loads requires that:
- *  1- AT contain the address of the byte just past the end of the source
+ *  1- a3 contain the address of the byte just past the end of the source
  *     of the copy,
- *  2- src_entry <= src < AT, and
+ *  2- src_entry <= src < a3, and
  *  3- (dst - src) == (dst_entry - src_entry),
  * The _entry suffix denotes values when __copy_user was called.
  *
- * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user
  * (2) is met by incrementing src by the number of bytes copied
  * (3) is met by not doing loads between a pair of increments of dst and src
  *
@@ -549,7 +549,7 @@
 	 nop
 	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	 nop
-	SUB	len, AT, t0		# len number of uncopied bytes
+	SUB	len, a3, t0		# len number of uncopied bytes
 	bnez	ta2, .Ldone\@	/* Skip the zeroing part if inatomic */
 	/*
 	 * Here's where we rely on src and dst being incremented in tandem,
-- 
2.10.2

WARNING: multiple messages have this Message-ID (diff)
From: Paul Burton <paul.burton@imgtec.com>
To: linux-mips@linux-mips.org
Cc: Ralf Baechle <ralf@linux-mips.org>, Paul Burton <paul.burton@imgtec.com>
Subject: [PATCH 6/7] MIPS: memcpy: Use a3/$7 for source end address
Date: Mon, 7 Nov 2016 11:18:01 +0000	[thread overview]
Message-ID: <20161107111802.12071-7-paul.burton@imgtec.com> (raw)
Message-ID: <20161107111801.k7pxR07tpVVfvTeIfXryA8VNjcU14BHgIqCI9LByNZU@z> (raw)
In-Reply-To: <20161107111802.12071-1-paul.burton@imgtec.com>

Instead of using the at/$1 register (which does not form part of the
typical calling convention) to provide the end of the source region to
__copy_user* functions, use the a3/$7 register. This prepares us for
being able to call __copy_user* with a standard function call.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
---

 arch/mips/cavium-octeon/octeon-memcpy.S |  8 ++++----
 arch/mips/include/asm/uaccess.h         | 21 ++++++++++++---------
 arch/mips/lib/memcpy.S                  |  8 ++++----
 3 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index db49fca..9316ab1 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -57,13 +57,13 @@
 
 /*
  * The exception handler for loads requires that:
- *  1- AT contain the address of the byte just past the end of the source
+ *  1- a3 contain the address of the byte just past the end of the source
  *     of the copy,
- *  2- src_entry <= src < AT, and
+ *  2- src_entry <= src < a3, and
  *  3- (dst - src) == (dst_entry - src_entry),
  * The _entry suffix denotes values when __copy_user was called.
  *
- * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user
  * (2) is met by incrementing src by the number of bytes copied
  * (3) is met by not doing loads between a pair of increments of dst and src
  *
@@ -386,7 +386,7 @@ EXC(	lb	t1, 0(src),	l_exc)
 l_exc:
 	LOAD	t0, TI_TASK($28)
 	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
-	SUB	len, AT, t0		# len number of uncopied bytes
+	SUB	len, a3, t0		# len number of uncopied bytes
 	bnez	ta0, 2f		/* Skip the zeroing out part if inatomic */
 	/*
 	 * Here's where we rely on src and dst being incremented in tandem,
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 81d632f..562ad49 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -809,7 +809,8 @@ extern void __put_user_unaligned_unknown(void);
 #define DADDI_SCRATCH "$0"
 #endif
 
-extern size_t __copy_user(void *__to, const void *__from, size_t __n);
+extern size_t __copy_user(void *__to, const void *__from, size_t __n,
+			  const void *__from_end);
 
 #ifndef CONFIG_EVA
 #define __invoke_copy_to_user(to, from, n)				\
@@ -874,7 +875,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 	__cu_len;							\
 })
 
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n,
+				   const void *__from_end);
 
 #define __copy_to_user_inatomic(to, from, n)				\
 ({									\
@@ -977,7 +979,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(__copy_user)					\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
@@ -1013,7 +1015,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(__copy_user_inatomic)				\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
@@ -1032,12 +1034,13 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
 /* EVA specific functions */
 
 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
-				       size_t __n);
+				       size_t __n, const void *__from_end);
 extern size_t __copy_from_user_eva(void *__to, const void *__from,
-				   size_t __n);
+				   size_t __n, const void *__from_end);
 extern size_t __copy_to_user_eva(void *__to, const void *__from,
-				 size_t __n);
-extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+				 size_t __n, const void *__from_end);
+extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n,
+				 const void *__from_end);
 
 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
 ({									\
@@ -1053,7 +1056,7 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
 	".set\tnoreorder\n\t"						\
 	__MODULE_JAL(func_ptr)						\
 	".set\tnoat\n\t"						\
-	__UA_ADDU "\t$1, %1, %2\n\t"					\
+	__UA_ADDU "\t$7, %1, %2\n\t"					\
 	".set\tat\n\t"							\
 	".set\treorder"							\
 	: "=r"(__cu_ret_r), "+r" (__cu_to_r),				\
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 48684c4..5af9f03 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -70,13 +70,13 @@
 
 /*
  * The exception handler for loads requires that:
- *  1- AT contain the address of the byte just past the end of the source
+ *  1- a3 contain the address of the byte just past the end of the source
  *     of the copy,
- *  2- src_entry <= src < AT, and
+ *  2- src_entry <= src < a3, and
  *  3- (dst - src) == (dst_entry - src_entry),
  * The _entry suffix denotes values when __copy_user was called.
  *
- * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (1) is set up up by uaccess.h and maintained by not writing a3 in copy_user
  * (2) is met by incrementing src by the number of bytes copied
  * (3) is met by not doing loads between a pair of increments of dst and src
  *
@@ -549,7 +549,7 @@
 	 nop
 	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
 	 nop
-	SUB	len, AT, t0		# len number of uncopied bytes
+	SUB	len, a3, t0		# len number of uncopied bytes
 	bnez	ta2, .Ldone\@	/* Skip the zeroing part if inatomic */
 	/*
 	 * Here's where we rely on src and dst being incremented in tandem,
-- 
2.10.2

  parent reply	other threads:[~2016-11-07 11:21 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-07 11:17 [PATCH 0/7] MIPS: Standard calling convention usercopy & memcpy Paul Burton
2016-11-07 11:17 ` Paul Burton
2016-11-07 11:17 ` [PATCH 1/7] MIPS: lib: Split lib-y to a line per file Paul Burton
2016-11-07 11:17   ` Paul Burton
2016-11-07 11:17 ` [PATCH 2/7] MIPS: lib: Implement memmove in C Paul Burton
2016-11-07 11:17   ` Paul Burton
2016-11-07 11:17 ` [PATCH 3/7] MIPS: memcpy: Split __copy_user & memcpy Paul Burton
2016-11-07 11:17   ` Paul Burton
2016-11-07 11:17 ` [PATCH 4/7] MIPS: memcpy: Return uncopied bytes from __copy_user*() in v0 Paul Burton
2016-11-07 11:17   ` Paul Burton
2016-11-07 11:18 ` [PATCH 5/7] MIPS: memcpy: Use ta* instead of manually defining t4-t7 Paul Burton
2016-11-07 11:18   ` Paul Burton
2016-11-07 11:18 ` Paul Burton [this message]
2016-11-07 11:18   ` [PATCH 6/7] MIPS: memcpy: Use a3/$7 for source end address Paul Burton
2016-11-14 14:47   ` Maciej W. Rozycki
2016-11-14 14:47     ` Maciej W. Rozycki
2016-11-07 11:18 ` [PATCH 7/7] MIPS: uaccess: Use standard __user_copy* function calls Paul Burton
2016-11-07 11:18   ` Paul Burton
2017-06-27 22:33   ` James Hogan
2017-06-27 22:33     ` James Hogan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161107111802.12071-7-paul.burton@imgtec.com \
    --to=paul.burton@imgtec.com \
    --cc=linux-mips@linux-mips.org \
    --cc=ralf@linux-mips.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.