x86_64: new and improved memset()
diff mbox series

Message ID 20190914103345.GA5856@avx2
State New
Headers show
Series
  • x86_64: new and improved memset()
Related show

Commit Message

Alexey Dobriyan Sept. 14, 2019, 10:33 a.m. UTC
Current memset() implementation does silly things:
* multiplication to get register-wide constant:
	waste of cycles if filler is known at compile time,

* REP STOSQ followed by REP STOSB:
	REP STOSB setup overhead is very high because trailing length
	is very low (< 8)

* suboptimal calling convention:
	REP STOSB/STOSQ favours (rdi, rcx), ABI gives (rdi, rsi, rdx).
	While shuffling registers is free, rcx and rdx are equivalent
	code generation wise.

* memset_orig():
	memset(..., 0, ...) could be done within 3 registers,
	memset(..., != 0, ...) -- within 4 registers, anything else is
	a waste. CPUs which required unrolling are hopefully gone by now.

New implementation is based on the following observations:
* c == 0 is the most common form,
	filler can be done with "xor eax, eax" and pushed into memset()
	saving 2 bytes per call and multiplication

* "len" divisible by 8 is the most common form:
	all it takes is one pointer or unsigned long inside structure,
	dispatch at compile time to code without those ugly "lets fill
	at most 7 bytes" tails,

* multiplication to get wider filler value can be done at compile time
  for "c != 0" with 1 insn/10 bytes at most saving multiplication.

Note: "memset0" name is chosen because "bzero" is officially deprecated.

Note: memset(,0,) form is interleaved into memset(,c,) form to save space.

TODO:
	CONFIG_FORTIFY_SOURCE is enabled by distros
	inline "xor eax, eax; rep stosb"
	benchmarks
	testing

Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
---

 arch/x86/boot/compressed/Makefile     |    1 
 arch/x86/include/asm/string_64.h      |  104 ++++++++++++++++++++++++++++++++++
 arch/x86/lib/Makefile                 |    1 
 arch/x86/lib/memset0_64.S             |   86 ++++++++++++++++++++++++++++
 drivers/firmware/efi/libstub/Makefile |    2 
 5 files changed, 193 insertions(+), 1 deletion(-)

Comments

Borislav Petkov Sept. 14, 2019, 11:37 a.m. UTC | #1
On Sat, Sep 14, 2019 at 01:33:45PM +0300, Alexey Dobriyan wrote:
> --- a/arch/x86/include/asm/string_64.h
> +++ b/arch/x86/include/asm/string_64.h
> @@ -15,7 +15,111 @@ extern void *memcpy(void *to, const void *from, size_t len);
>  extern void *__memcpy(void *to, const void *from, size_t len);
>  
>  #define __HAVE_ARCH_MEMSET
> +#if defined(_ARCH_X86_BOOT) || defined(CONFIG_FORTIFY_SOURCE)
>  void *memset(void *s, int c, size_t n);
> +#else
> +#include <asm/alternative.h>
> +#include <asm/cpufeatures.h>
> +
> +/* Internal, do not use. */
> +static __always_inline void memset0(void *s, size_t n)
> +{
> +	/* Internal, do not use. */
> +	void _memset0_mov(void);
> +	void _memset0_rep_stosq(void);
> +	void memset0_mov(void);
> +	void memset0_rep_stosq(void);
> +	void memset0_rep_stosb(void);
> +
> +	if (__builtin_constant_p(n) && n == 0) {
> +	} else if (__builtin_constant_p(n) && n == 1) {
> +		*(uint8_t *)s = 0;
> +	} else if (__builtin_constant_p(n) && n == 2) {
> +		*(uint16_t *)s = 0;
> +	} else if (__builtin_constant_p(n) && n == 4) {
> +		*(uint32_t *)s = 0;
> +	} else if (__builtin_constant_p(n) && n == 6) {
> +		*(uint32_t *)s = 0;
> +		*(uint16_t *)(s + 4) = 0;
> +	} else if (__builtin_constant_p(n) && n == 8) {
> +		*(uint64_t *)s = 0;
> +	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
> +		alternative_call_2(
> +			_memset0_mov,
> +			_memset0_rep_stosq, X86_FEATURE_REP_GOOD,
> +			memset0_rep_stosb, X86_FEATURE_ERMS,
> +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> +			"D" (s), "c" (n)
> +			: "rax", "cc", "memory"
> +		);
> +	} else {
> +		alternative_call_2(
> +			memset0_mov,
> +			memset0_rep_stosq, X86_FEATURE_REP_GOOD,
> +			memset0_rep_stosb, X86_FEATURE_ERMS,
> +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> +			"D" (s), "c" (n)
> +			: "rax", "rsi", "cc", "memory"
> +		);
> +	}
> +}
> +
> +/* Internal, do not use. */
> +static __always_inline void memsetx(void *s, int c, size_t n)
> +{
> +	/* Internal, do not use. */
> +	void _memsetx_mov(void);
> +	void _memsetx_rep_stosq(void);
> +	void memsetx_mov(void);
> +	void memsetx_rep_stosq(void);
> +	void memsetx_rep_stosb(void);
> +
> +	const uint64_t ccc = (uint8_t)c * 0x0101010101010101ULL;
> +
> +	if (__builtin_constant_p(n) && n == 0) {
> +	} else if (__builtin_constant_p(n) && n == 1) {
> +		*(uint8_t *)s = ccc;
> +	} else if (__builtin_constant_p(n) && n == 2) {
> +		*(uint16_t *)s = ccc;
> +	} else if (__builtin_constant_p(n) && n == 4) {
> +		*(uint32_t *)s = ccc;
> +	} else if (__builtin_constant_p(n) && n == 8) {
> +		*(uint64_t *)s = ccc;
> +	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
> +		alternative_call_2(
> +			_memsetx_mov,
> +			_memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
> +			memsetx_rep_stosb, X86_FEATURE_ERMS,
> +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> +			"D" (s), "c" (n), "a" (ccc)
> +			: "cc", "memory"
> +		);
> +	} else {
> +		alternative_call_2(
> +			memsetx_mov,
> +			memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
> +			memsetx_rep_stosb, X86_FEATURE_ERMS,
> +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> +			"D" (s), "c" (n), "a" (ccc)
> +			: "rsi", "cc", "memory"
> +		);
> +	}
> +}
> +
> +static __always_inline void *memset(void *s, int c, size_t n)
> +{
> +	if (__builtin_constant_p(c)) {
> +		if (c == 0) {
> +			memset0(s, n);
> +		} else {
> +			memsetx(s, c, n);
> +		}
> +		return s;
> +	} else {
> +		return __builtin_memset(s, c, n);
> +	}
> +}

I'm willing to take something like that only when such complexity is
justified by numbers. I.e., I'm much more inclined to capping it under
32 and 64 byte sizes and keeping it simple.

...

> +ENTRY(_memset0_mov)
> +	xor	eax, eax
> +.globl _memsetx_mov
> +_memsetx_mov:
> +	add	rcx, rdi
> +	cmp	rdi, rcx
> +	je	1f
> +2:
> +	mov	[rdi], rax
> +	add	rdi, 8
> +	cmp	rdi, rcx
> +	jne	2b
> +1:
> +	ret
> +ENDPROC(_memset0_mov)
> +ENDPROC(_memsetx_mov)
> +EXPORT_SYMBOL(_memset0_mov)
> +EXPORT_SYMBOL(_memsetx_mov)
> +
> +ENTRY(memset0_mov)
> +	xor	eax, eax
> +.globl memsetx_mov
> +memsetx_mov:
> +	lea	rsi, [rdi + rcx]
> +	cmp	rdi, rsi
> +	je	1f
> +2:
> +	mov	[rdi], al
> +	add	rdi, 1
> +	cmp	rdi, rsi
> +	jne	2b
> +1:
> +	ret

Say what now? Intel syntax? You must be joking...

> +ENDPROC(memset0_mov)
> +ENDPROC(memsetx_mov)
> +EXPORT_SYMBOL(memset0_mov)
> +EXPORT_SYMBOL(memsetx_mov)

Too many exported symbols. Again, I'll much more prefer a cleaner,
smaller solution than one where readability suffers greatly at the
expense of *maybe* getting a bit better performance.

> --- a/drivers/firmware/efi/libstub/Makefile
> +++ b/drivers/firmware/efi/libstub/Makefile
> @@ -28,7 +28,7 @@ KBUILD_CFLAGS			:= $(cflags-y) -DDISABLE_BRANCH_PROFILING \
>  				   -D__NO_FORTIFY \
>  				   $(call cc-option,-ffreestanding) \
>  				   $(call cc-option,-fno-stack-protector) \
> -				   -D__DISABLE_EXPORTS
> +				   -D__DISABLE_EXPORTS -D_ARCH_X86_BOOT

Yeah, something like that is inevitable, I've come to realize too. ;-\
Alexey Dobriyan Sept. 14, 2019, 3:15 p.m. UTC | #2
On Sat, Sep 14, 2019 at 01:37:17PM +0200, Borislav Petkov wrote:
> On Sat, Sep 14, 2019 at 01:33:45PM +0300, Alexey Dobriyan wrote:
> > --- a/arch/x86/include/asm/string_64.h
> > +++ b/arch/x86/include/asm/string_64.h
> > @@ -15,7 +15,111 @@ extern void *memcpy(void *to, const void *from, size_t len);
> >  extern void *__memcpy(void *to, const void *from, size_t len);
> >  
> >  #define __HAVE_ARCH_MEMSET
> > +#if defined(_ARCH_X86_BOOT) || defined(CONFIG_FORTIFY_SOURCE)
> >  void *memset(void *s, int c, size_t n);
> > +#else
> > +#include <asm/alternative.h>
> > +#include <asm/cpufeatures.h>
> > +
> > +/* Internal, do not use. */
> > +static __always_inline void memset0(void *s, size_t n)
> > +{
> > +	/* Internal, do not use. */
> > +	void _memset0_mov(void);
> > +	void _memset0_rep_stosq(void);
> > +	void memset0_mov(void);
> > +	void memset0_rep_stosq(void);
> > +	void memset0_rep_stosb(void);
> > +
> > +	if (__builtin_constant_p(n) && n == 0) {
> > +	} else if (__builtin_constant_p(n) && n == 1) {
> > +		*(uint8_t *)s = 0;
> > +	} else if (__builtin_constant_p(n) && n == 2) {
> > +		*(uint16_t *)s = 0;
> > +	} else if (__builtin_constant_p(n) && n == 4) {
> > +		*(uint32_t *)s = 0;
> > +	} else if (__builtin_constant_p(n) && n == 6) {
> > +		*(uint32_t *)s = 0;
> > +		*(uint16_t *)(s + 4) = 0;
> > +	} else if (__builtin_constant_p(n) && n == 8) {
> > +		*(uint64_t *)s = 0;
> > +	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
> > +		alternative_call_2(
> > +			_memset0_mov,
> > +			_memset0_rep_stosq, X86_FEATURE_REP_GOOD,
> > +			memset0_rep_stosb, X86_FEATURE_ERMS,
> > +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> > +			"D" (s), "c" (n)
> > +			: "rax", "cc", "memory"
> > +		);
> > +	} else {
> > +		alternative_call_2(
> > +			memset0_mov,
> > +			memset0_rep_stosq, X86_FEATURE_REP_GOOD,
> > +			memset0_rep_stosb, X86_FEATURE_ERMS,
> > +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> > +			"D" (s), "c" (n)
> > +			: "rax", "rsi", "cc", "memory"
> > +		);
> > +	}
> > +}
> > +
> > +/* Internal, do not use. */
> > +static __always_inline void memsetx(void *s, int c, size_t n)
> > +{
> > +	/* Internal, do not use. */
> > +	void _memsetx_mov(void);
> > +	void _memsetx_rep_stosq(void);
> > +	void memsetx_mov(void);
> > +	void memsetx_rep_stosq(void);
> > +	void memsetx_rep_stosb(void);
> > +
> > +	const uint64_t ccc = (uint8_t)c * 0x0101010101010101ULL;
> > +
> > +	if (__builtin_constant_p(n) && n == 0) {
> > +	} else if (__builtin_constant_p(n) && n == 1) {
> > +		*(uint8_t *)s = ccc;
> > +	} else if (__builtin_constant_p(n) && n == 2) {
> > +		*(uint16_t *)s = ccc;
> > +	} else if (__builtin_constant_p(n) && n == 4) {
> > +		*(uint32_t *)s = ccc;
> > +	} else if (__builtin_constant_p(n) && n == 8) {
> > +		*(uint64_t *)s = ccc;
> > +	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
> > +		alternative_call_2(
> > +			_memsetx_mov,
> > +			_memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
> > +			memsetx_rep_stosb, X86_FEATURE_ERMS,
> > +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> > +			"D" (s), "c" (n), "a" (ccc)
> > +			: "cc", "memory"
> > +		);
> > +	} else {
> > +		alternative_call_2(
> > +			memsetx_mov,
> > +			memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
> > +			memsetx_rep_stosb, X86_FEATURE_ERMS,
> > +			ASM_OUTPUT2("=D" (s), "=c" (n)),
> > +			"D" (s), "c" (n), "a" (ccc)
> > +			: "rsi", "cc", "memory"
> > +		);
> > +	}
> > +}
> > +
> > +static __always_inline void *memset(void *s, int c, size_t n)
> > +{
> > +	if (__builtin_constant_p(c)) {
> > +		if (c == 0) {
> > +			memset0(s, n);
> > +		} else {
> > +			memsetx(s, c, n);
> > +		}
> > +		return s;
> > +	} else {
> > +		return __builtin_memset(s, c, n);
> > +	}
> > +}
> 
> I'm willing to take something like that only when such complexity is
> justified by numbers. I.e., I'm much more inclined to capping it under
> 32 and 64 byte sizes and keeping it simple.

OK. Those small lengths were indeed annoying.

> > +ENTRY(_memset0_mov)
> > +	xor	eax, eax
> > +.globl _memsetx_mov
> > +_memsetx_mov:
> > +	add	rcx, rdi
> > +	cmp	rdi, rcx
> > +	je	1f
> > +2:
> > +	mov	[rdi], rax
> > +	add	rdi, 8
> > +	cmp	rdi, rcx
> > +	jne	2b
> > +1:
> > +	ret
> > +ENDPROC(_memset0_mov)
> > +ENDPROC(_memsetx_mov)
> > +EXPORT_SYMBOL(_memset0_mov)
> > +EXPORT_SYMBOL(_memsetx_mov)
> > +
> > +ENTRY(memset0_mov)
> > +	xor	eax, eax
> > +.globl memsetx_mov
> > +memsetx_mov:
> > +	lea	rsi, [rdi + rcx]
> > +	cmp	rdi, rsi
> > +	je	1f
> > +2:
> > +	mov	[rdi], al
> > +	add	rdi, 1
> > +	cmp	rdi, rsi
> > +	jne	2b
> > +1:
> > +	ret
> 
> Say what now? Intel syntax? You must be joking...

It is the best thing in the x86 assembler universe.

> > +ENDPROC(memset0_mov)
> > +ENDPROC(memsetx_mov)
> > +EXPORT_SYMBOL(memset0_mov)
> > +EXPORT_SYMBOL(memsetx_mov)
> 
> Too many exported symbols.

Those are technical exports. memset() remains the only developer-visible
interface.

> Again, I'll much more prefer a cleaner,
> smaller solution than one where readability suffers greatly at the
> expense of *maybe* getting a bit better performance.

Readability is red herring, I for one find AT&T syntax unreadable.
David Laight Sept. 16, 2019, 2:18 p.m. UTC | #3
From: Alexey Dobriyan
> Sent: 14 September 2019 11:34
...
> +ENTRY(memset0_rep_stosq)
> +	xor	eax, eax
> +.globl memsetx_rep_stosq
> +memsetx_rep_stosq:
> +	lea	rsi, [rdi + rcx]
> +	shr	rcx, 3
> +	rep stosq
> +	cmp	rdi, rsi
> +	je	1f
> +2:
> +	mov	[rdi], al
> +	add	rdi, 1
> +	cmp	rdi, rsi
> +	jne	2b
> +1:
> +	ret

You can do the 'trailing bytes' first with a potentially misaligned store.
Something like (modulo asm syntax and argument ordering):
	lea	rsi, [rdi + rdx]
	shr	rcx, 3
	jcxz	1f		# Short buffer
	mov	-8[rsi], rax
	rep stosq
	ret
1:
	mov	[rdi], al
	add	rdi, 1
	cmp	rdi, rsi
	jne	1b
	ret

The final loop can be one instruction shorter by arranging to do:
1:
	mov	[rdi+rxx], al
	add	rdi, 1
	jnz	1b
	ret

Last I looked 'jcxz' was 'ok' on all recent amd and intel cpus.
OTOH 'loop' is horrid on intel ones.

The same applies to the other versions.

I suspect it isn't worth optimising to realign misaligned buffers
they are unlikely to happen often enough.

I also think that gcc's __builtin version does some of the short
buffer optimisations already.

	David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)

Patch
diff mbox series

--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -38,6 +38,7 @@  KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
 KBUILD_CFLAGS += -Wno-pointer-sign
+KBUILD_CFLAGS += -D_ARCH_X86_BOOT
 
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -15,7 +15,111 @@  extern void *memcpy(void *to, const void *from, size_t len);
 extern void *__memcpy(void *to, const void *from, size_t len);
 
 #define __HAVE_ARCH_MEMSET
+#if defined(_ARCH_X86_BOOT) || defined(CONFIG_FORTIFY_SOURCE)
 void *memset(void *s, int c, size_t n);
+#else
+#include <asm/alternative.h>
+#include <asm/cpufeatures.h>
+
+/* Internal, do not use. */
+static __always_inline void memset0(void *s, size_t n)
+{
+	/* Internal, do not use. */
+	void _memset0_mov(void);
+	void _memset0_rep_stosq(void);
+	void memset0_mov(void);
+	void memset0_rep_stosq(void);
+	void memset0_rep_stosb(void);
+
+	if (__builtin_constant_p(n) && n == 0) {
+	} else if (__builtin_constant_p(n) && n == 1) {
+		*(uint8_t *)s = 0;
+	} else if (__builtin_constant_p(n) && n == 2) {
+		*(uint16_t *)s = 0;
+	} else if (__builtin_constant_p(n) && n == 4) {
+		*(uint32_t *)s = 0;
+	} else if (__builtin_constant_p(n) && n == 6) {
+		*(uint32_t *)s = 0;
+		*(uint16_t *)(s + 4) = 0;
+	} else if (__builtin_constant_p(n) && n == 8) {
+		*(uint64_t *)s = 0;
+	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
+		alternative_call_2(
+			_memset0_mov,
+			_memset0_rep_stosq, X86_FEATURE_REP_GOOD,
+			memset0_rep_stosb, X86_FEATURE_ERMS,
+			ASM_OUTPUT2("=D" (s), "=c" (n)),
+			"D" (s), "c" (n)
+			: "rax", "cc", "memory"
+		);
+	} else {
+		alternative_call_2(
+			memset0_mov,
+			memset0_rep_stosq, X86_FEATURE_REP_GOOD,
+			memset0_rep_stosb, X86_FEATURE_ERMS,
+			ASM_OUTPUT2("=D" (s), "=c" (n)),
+			"D" (s), "c" (n)
+			: "rax", "rsi", "cc", "memory"
+		);
+	}
+}
+
+/* Internal, do not use. */
+static __always_inline void memsetx(void *s, int c, size_t n)
+{
+	/* Internal, do not use. */
+	void _memsetx_mov(void);
+	void _memsetx_rep_stosq(void);
+	void memsetx_mov(void);
+	void memsetx_rep_stosq(void);
+	void memsetx_rep_stosb(void);
+
+	const uint64_t ccc = (uint8_t)c * 0x0101010101010101ULL;
+
+	if (__builtin_constant_p(n) && n == 0) {
+	} else if (__builtin_constant_p(n) && n == 1) {
+		*(uint8_t *)s = ccc;
+	} else if (__builtin_constant_p(n) && n == 2) {
+		*(uint16_t *)s = ccc;
+	} else if (__builtin_constant_p(n) && n == 4) {
+		*(uint32_t *)s = ccc;
+	} else if (__builtin_constant_p(n) && n == 8) {
+		*(uint64_t *)s = ccc;
+	} else if (__builtin_constant_p(n) && (n & 7) == 0) {
+		alternative_call_2(
+			_memsetx_mov,
+			_memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
+			memsetx_rep_stosb, X86_FEATURE_ERMS,
+			ASM_OUTPUT2("=D" (s), "=c" (n)),
+			"D" (s), "c" (n), "a" (ccc)
+			: "cc", "memory"
+		);
+	} else {
+		alternative_call_2(
+			memsetx_mov,
+			memsetx_rep_stosq, X86_FEATURE_REP_GOOD,
+			memsetx_rep_stosb, X86_FEATURE_ERMS,
+			ASM_OUTPUT2("=D" (s), "=c" (n)),
+			"D" (s), "c" (n), "a" (ccc)
+			: "rsi", "cc", "memory"
+		);
+	}
+}
+
+static __always_inline void *memset(void *s, int c, size_t n)
+{
+	if (__builtin_constant_p(c)) {
+		if (c == 0) {
+			memset0(s, n);
+		} else {
+			memsetx(s, c, n);
+		}
+		return s;
+	} else {
+		return __builtin_memset(s, c, n);
+	}
+}
+#endif
 void *__memset(void *s, int c, size_t n);
 
 #define __HAVE_ARCH_MEMSET16
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -58,6 +58,7 @@  else
         lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
         lib-y += clear_page_64.o copy_page_64.o
         lib-y += memmove_64.o memset_64.o
+	lib-y += memset0_64.o
         lib-y += copy_user_64.o
 	lib-y += cmpxchg16b_emu.o
 endif
new file mode 100644
--- /dev/null
+++ b/arch/x86/lib/memset0_64.S
@@ -0,0 +1,86 @@ 
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+.intel_syntax noprefix
+
+ENTRY(memset0_rep_stosb)
+	xor	eax, eax
+.globl memsetx_rep_stosb
+memsetx_rep_stosb:
+	rep stosb
+	ret
+ENDPROC(memset0_rep_stosb)
+ENDPROC(memsetx_rep_stosb)
+EXPORT_SYMBOL(memset0_rep_stosb)
+EXPORT_SYMBOL(memsetx_rep_stosb)
+
+ENTRY(_memset0_rep_stosq)
+	xor	eax, eax
+.globl _memsetx_rep_stosq
+_memsetx_rep_stosq:
+	shr	rcx, 3
+	rep stosq
+	ret
+ENDPROC(_memset0_rep_stosq)
+ENDPROC(_memsetx_rep_stosq)
+EXPORT_SYMBOL(_memset0_rep_stosq)
+EXPORT_SYMBOL(_memsetx_rep_stosq)
+
+ENTRY(memset0_rep_stosq)
+	xor	eax, eax
+.globl memsetx_rep_stosq
+memsetx_rep_stosq:
+	lea	rsi, [rdi + rcx]
+	shr	rcx, 3
+	rep stosq
+	cmp	rdi, rsi
+	je	1f
+2:
+	mov	[rdi], al
+	add	rdi, 1
+	cmp	rdi, rsi
+	jne	2b
+1:
+	ret
+ENDPROC(memset0_rep_stosq)
+ENDPROC(memsetx_rep_stosq)
+EXPORT_SYMBOL(memset0_rep_stosq)
+EXPORT_SYMBOL(memsetx_rep_stosq)
+
+ENTRY(_memset0_mov)
+	xor	eax, eax
+.globl _memsetx_mov
+_memsetx_mov:
+	add	rcx, rdi
+	cmp	rdi, rcx
+	je	1f
+2:
+	mov	[rdi], rax
+	add	rdi, 8
+	cmp	rdi, rcx
+	jne	2b
+1:
+	ret
+ENDPROC(_memset0_mov)
+ENDPROC(_memsetx_mov)
+EXPORT_SYMBOL(_memset0_mov)
+EXPORT_SYMBOL(_memsetx_mov)
+
+ENTRY(memset0_mov)
+	xor	eax, eax
+.globl memsetx_mov
+memsetx_mov:
+	lea	rsi, [rdi + rcx]
+	cmp	rdi, rsi
+	je	1f
+2:
+	mov	[rdi], al
+	add	rdi, 1
+	cmp	rdi, rsi
+	jne	2b
+1:
+	ret
+ENDPROC(memset0_mov)
+ENDPROC(memsetx_mov)
+EXPORT_SYMBOL(memset0_mov)
+EXPORT_SYMBOL(memsetx_mov)
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@  KBUILD_CFLAGS			:= $(cflags-y) -DDISABLE_BRANCH_PROFILING \
 				   -D__NO_FORTIFY \
 				   $(call cc-option,-ffreestanding) \
 				   $(call cc-option,-fno-stack-protector) \
-				   -D__DISABLE_EXPORTS
+				   -D__DISABLE_EXPORTS -D_ARCH_X86_BOOT
 
 GCOV_PROFILE			:= n
 KASAN_SANITIZE			:= n