linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alexey Dobriyan <adobriyan@gmail.com>
To: x86@kernel.org, tglx@linutronix.de, mingo@redhat.com, hpa@zytor.com
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 3/5] x86_64: rename clear_page() and copy_user() variants
Date: Wed, 26 Apr 2017 21:30:47 +0300	[thread overview]
Message-ID: <20170426183047.GC5069@avx2> (raw)
In-Reply-To: <20170426182806.GB5069@avx2>

Patch changes market-ish acronyms like ERMS and chatty names
to consistent and shorter versions:

	xxx_mov
	xxx_rep_stosq	xxx_rep_movsq
	xxx_rep_stosb	xxx_rep_movsb

Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
---

 arch/x86/include/asm/page_64.h    |   12 ++++++------
 arch/x86/include/asm/uaccess_64.h |   18 +++++++++---------
 arch/x86/lib/clear_page_64.S      |   18 +++++++++---------
 arch/x86/lib/copy_user_64.S       |   20 ++++++++++----------
 tools/perf/ui/browsers/annotate.c |    2 +-
 5 files changed, 35 insertions(+), 35 deletions(-)

--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -35,15 +35,15 @@ extern unsigned long __phys_addr_symbol(unsigned long);
 #define pfn_valid(pfn)          ((pfn) < max_pfn)
 #endif
 
-void clear_page_orig(void *page);
-void clear_page_rep(void *page);
-void clear_page_erms(void *page);
+void clear_page_mov(void *page);
+void clear_page_rep_stosq(void *page);
+void clear_page_rep_stosb(void *page);
 
 static inline void clear_page(void *page)
 {
-	alternative_call_2(clear_page_orig,
-			   clear_page_rep, X86_FEATURE_REP_GOOD,
-			   clear_page_erms, X86_FEATURE_ERMS,
+	alternative_call_2(clear_page_mov,
+			   clear_page_rep_stosq, X86_FEATURE_REP_GOOD,
+			   clear_page_rep_stosb, X86_FEATURE_ERMS,
 			   "=D" (page),
 			   "0" (page)
 			   : "memory", "rax", "rcx");
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -18,11 +18,11 @@
 
 /* Handles exceptions in both to and from, but doesn't do access_ok */
 __must_check unsigned long
-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+copy_user_rep_movsb(void *to, const void *from, unsigned len);
 __must_check unsigned long
-copy_user_generic_string(void *to, const void *from, unsigned len);
+copy_user_rep_movsq(void *to, const void *from, unsigned len);
 __must_check unsigned long
-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+copy_user_mov(void *to, const void *from, unsigned len);
 
 static __always_inline __must_check unsigned long
 copy_user_generic(void *to, const void *from, unsigned len)
@@ -30,14 +30,14 @@ copy_user_generic(void *to, const void *from, unsigned len)
 	unsigned ret;
 
 	/*
-	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
-	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
-	 * Otherwise, use copy_user_generic_unrolled.
+	 * If CPU has ERMS feature, use copy_user_rep_movsb.
+	 * Otherwise, if CPU has rep_good feature, use copy_user_rep_movsq.
+	 * Otherwise, use copy_user_mov.
 	 */
-	alternative_call_2(copy_user_generic_unrolled,
-			 copy_user_generic_string,
+	alternative_call_2(copy_user_mov,
+			 copy_user_rep_movsq,
 			 X86_FEATURE_REP_GOOD,
-			 copy_user_enhanced_fast_string,
+			 copy_user_rep_movsb,
 			 X86_FEATURE_ERMS,
 			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
 				     "=d" (len)),
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -14,15 +14,15 @@
  * Zero a page.
  * %rdi	- page
  */
-ENTRY(clear_page_rep)
+ENTRY(clear_page_rep_stosq)
 	movl $4096/8,%ecx
 	xorl %eax,%eax
 	rep stosq
 	ret
-ENDPROC(clear_page_rep)
-EXPORT_SYMBOL_GPL(clear_page_rep)
+ENDPROC(clear_page_rep_stosq)
+EXPORT_SYMBOL_GPL(clear_page_rep_stosq)
 
-ENTRY(clear_page_orig)
+ENTRY(clear_page_mov)
 	xorl   %eax,%eax
 	movl   $4096/64,%ecx
 	.p2align 4
@@ -41,13 +41,13 @@ ENTRY(clear_page_orig)
 	jnz	.Lloop
 	nop
 	ret
-ENDPROC(clear_page_orig)
-EXPORT_SYMBOL_GPL(clear_page_orig)
+ENDPROC(clear_page_mov)
+EXPORT_SYMBOL_GPL(clear_page_mov)
 
-ENTRY(clear_page_erms)
+ENTRY(clear_page_rep_stosb)
 	movl $4096,%ecx
 	xorl %eax,%eax
 	rep stosb
 	ret
-ENDPROC(clear_page_erms)
-EXPORT_SYMBOL_GPL(clear_page_erms)
+ENDPROC(clear_page_rep_stosb)
+EXPORT_SYMBOL_GPL(clear_page_rep_stosb)
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -17,7 +17,7 @@
 #include <asm/export.h>
 
 /*
- * copy_user_generic_unrolled - memory copy with exception handling.
+ * copy_user_mov - memory copy with exception handling.
  * This version is for CPUs like P4 that don't have efficient micro
  * code for rep movsq
  *
@@ -29,7 +29,7 @@
  * Output:
  * eax uncopied bytes or 0 if successful.
  */
-ENTRY(copy_user_generic_unrolled)
+ENTRY(copy_user_mov)
 	ASM_STAC
 	cmpl $8,%edx
 	jb 20f		/* less then 8 bytes, go to byte copy loop */
@@ -111,8 +111,8 @@ ENTRY(copy_user_generic_unrolled)
 	_ASM_EXTABLE(19b,40b)
 	_ASM_EXTABLE(21b,50b)
 	_ASM_EXTABLE(22b,50b)
-ENDPROC(copy_user_generic_unrolled)
-EXPORT_SYMBOL(copy_user_generic_unrolled)
+ENDPROC(copy_user_mov)
+EXPORT_SYMBOL(copy_user_mov)
 
 /* Some CPUs run faster using the string copy instructions.
  * This is also a lot simpler. Use them when possible.
@@ -132,7 +132,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled)
  * Output:
  * eax uncopied bytes or 0 if successful.
  */
-ENTRY(copy_user_generic_string)
+ENTRY(copy_user_rep_movsq)
 	ASM_STAC
 	cmpl $8,%edx
 	jb 2f		/* less than 8 bytes, go to byte copy loop */
@@ -157,8 +157,8 @@ ENTRY(copy_user_generic_string)
 
 	_ASM_EXTABLE(1b,11b)
 	_ASM_EXTABLE(3b,12b)
-ENDPROC(copy_user_generic_string)
-EXPORT_SYMBOL(copy_user_generic_string)
+ENDPROC(copy_user_rep_movsq)
+EXPORT_SYMBOL(copy_user_rep_movsq)
 
 /*
  * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@@ -172,7 +172,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
  * Output:
  * eax uncopied bytes or 0 if successful.
  */
-ENTRY(copy_user_enhanced_fast_string)
+ENTRY(copy_user_rep_movsb)
 	ASM_STAC
 	movl %edx,%ecx
 1:	rep
@@ -187,8 +187,8 @@ ENTRY(copy_user_enhanced_fast_string)
 	.previous
 
 	_ASM_EXTABLE(1b,12b)
-ENDPROC(copy_user_enhanced_fast_string)
-EXPORT_SYMBOL(copy_user_enhanced_fast_string)
+ENDPROC(copy_user_rep_movsb)
+EXPORT_SYMBOL(copy_user_rep_movsb)
 
 /*
  * copy_user_nocache - Uncached memory copy with exception handling
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -1084,7 +1084,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
 			 * routines that comes with labels in the same column
 			 * as the address in objdump, sigh.
 			 *
-			 * E.g. copy_user_generic_unrolled
+			 * E.g. copy_user_mov
  			 */
 			if (pos->offset < (s64)size)
 				browser.offsets[pos->offset] = pos;

  reply	other threads:[~2017-04-26 18:31 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-26 18:23 [PATCH 1/5] x86_64: use REP MOVSB in copy_page() Alexey Dobriyan
2017-04-26 18:28 ` [PATCH 2/5] x86_64: inline copy_page() at call site Alexey Dobriyan
2017-04-26 18:30   ` Alexey Dobriyan [this message]
2017-04-26 18:34     ` [PATCH 4/5] x86_64: clobber "cc" in inlined clear_page() Alexey Dobriyan
2017-04-26 18:35       ` [PATCH 5/5] x86_64: garbage collect headers in clear_page.S Alexey Dobriyan
2017-05-05 16:58     ` [PATCH 3/5] x86_64: rename clear_page() and copy_user() variants Borislav Petkov
2017-04-28 21:04   ` [PATCH 2/5] x86_64: inline copy_page() at call site Borislav Petkov
2017-05-02 11:49     ` Alexey Dobriyan
2017-05-02 11:59       ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170426183047.GC5069@avx2 \
    --to=adobriyan@gmail.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).