From: Tony Luck <tony.luck@intel.com> To: Ingo Molnar <mingo@kernel.org> Cc: Borislav Petkov <bp@alien8.de>, Andrew Morton <akpm@linux-foundation.org>, Andy Lutomirski <luto@kernel.org>, Dan Williams <dan.j.williams@intel.com>, <elliott@hpe.com>, linux-kernel@vger.kernel.org, linux-mm@kvack.org, linux-nvdimm@ml01.01.org, x86@kernel.org Subject: [PATCH v8 3/3] x86, mce: Add __mcsafe_copy() Date: Fri, 8 Jan 2016 13:18:03 -0800 [thread overview] Message-ID: <19f6403f2b04d3448ed2ac958e656645d8b6e70c.1452297867.git.tony.luck@intel.com> (raw) In-Reply-To: <cover.1452297867.git.tony.luck@intel.com> Make use of the EXTABLE_FAULT exception table entries. This routine returns a structure to indicate the result of the copy: struct mcsafe_ret { u64 trapnr; u64 remain; }; If the copy is successful, then both 'trapnr' and 'remain' are zero. If we faulted during the copy, then 'trapnr' will say which type of trap (X86_TRAP_PF or X86_TRAP_MC) and 'remain' says how many bytes were not copied. Signed-off-by: Tony Luck <tony.luck@intel.com> --- arch/x86/include/asm/string_64.h | 8 +++ arch/x86/kernel/x8664_ksyms_64.c | 2 + arch/x86/lib/memcpy_64.S | 133 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index ff8b9a17dc4b..5b24039463a4 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -78,6 +78,14 @@ int strcmp(const char *cs, const char *ct); #define memset(s, c, n) __memset(s, c, n) #endif +struct mcsafe_ret { + u64 trapnr; + u64 remain; +}; + +struct mcsafe_ret __mcsafe_copy(void *dst, const void __user *src, size_t cnt); +extern void __mcsafe_copy_end(void); + #endif /* __KERNEL__ */ #endif /* _ASM_X86_STRING_64_H */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index a0695be19864..96434edd7430 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -37,6 +37,8 @@ EXPORT_SYMBOL(__copy_user_nocache); EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_to_user); +EXPORT_SYMBOL(__mcsafe_copy); + EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 16698bba87de..195ff0144152 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -177,3 +177,136 @@ ENTRY(memcpy_orig) .Lend: retq ENDPROC(memcpy_orig) + +/* + * __mcsafe_copy - memory copy with machine check exception handling + * Note that we only catch machine checks when reading the source addresses. + * Writes to target are posted and don't generate machine checks. + */ +ENTRY(__mcsafe_copy) + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ + + /* check for bad alignment of source */ + movl %esi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +0: movb (%rsi),%al + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 0b +102: + movl %edx,%ecx + andl $63,%edx + shrl $6,%ecx + jz 17f +1: movq (%rsi),%r8 +2: movq 1*8(%rsi),%r9 +3: movq 2*8(%rsi),%r10 +4: movq 3*8(%rsi),%r11 + mov %r8,(%rdi) + mov %r9,1*8(%rdi) + mov %r10,2*8(%rdi) + mov %r11,3*8(%rdi) +9: movq 4*8(%rsi),%r8 +10: movq 5*8(%rsi),%r9 +11: movq 6*8(%rsi),%r10 +12: movq 7*8(%rsi),%r11 + mov %r8,4*8(%rdi) + mov %r9,5*8(%rdi) + mov %r10,6*8(%rdi) + mov %r11,7*8(%rdi) + leaq 64(%rsi),%rsi + leaq 64(%rdi),%rdi + decl %ecx + jnz 1b +17: movl %edx,%ecx + andl $7,%edx + shrl $3,%ecx + jz 20f +18: movq (%rsi),%r8 + mov %r8,(%rdi) + leaq 8(%rsi),%rsi + leaq 8(%rdi),%rdi + decl %ecx + jnz 18b +20: andl %edx,%edx + jz 23f + movl %edx,%ecx +21: movb (%rsi),%al + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 21b +23: xorq %rax, %rax + xorq %rdx, %rdx + sfence + /* copy successful. return 0 */ + ret + + .section .fixup,"ax" + /* fixups for machine check */ +30: + add %ecx,%edx + jmp 100f +31: + shl $6,%ecx + add %ecx,%edx + jmp 100f +32: + shl $6,%ecx + lea -8(%ecx,%edx),%edx + jmp 100f +33: + shl $6,%ecx + lea -16(%ecx,%edx),%edx + jmp 100f +34: + shl $6,%ecx + lea -24(%ecx,%edx),%edx + jmp 100f +35: + shl $6,%ecx + lea -32(%ecx,%edx),%edx + jmp 100f +36: + shl $6,%ecx + lea -40(%ecx,%edx),%edx + jmp 100f +37: + shl $6,%ecx + lea -48(%ecx,%edx),%edx + jmp 100f +38: + shl $6,%ecx + lea -56(%ecx,%edx),%edx + jmp 100f +39: + lea (%rdx,%rcx,8),%rdx + jmp 100f +40: + mov %ecx,%edx +100: + sfence + + /* %rax set the fault number in fixup_exception() */ + ret + .previous + + _ASM_EXTABLE_FAULT(0b,30b) + _ASM_EXTABLE_FAULT(1b,31b) + _ASM_EXTABLE_FAULT(2b,32b) + _ASM_EXTABLE_FAULT(3b,33b) + _ASM_EXTABLE_FAULT(4b,34b) + _ASM_EXTABLE_FAULT(9b,35b) + _ASM_EXTABLE_FAULT(10b,36b) + _ASM_EXTABLE_FAULT(11b,37b) + _ASM_EXTABLE_FAULT(12b,38b) + _ASM_EXTABLE_FAULT(18b,39b) + _ASM_EXTABLE_FAULT(21b,40b) -- 2.1.4
WARNING: multiple messages have this Message-ID (diff)
From: Tony Luck <tony.luck@intel.com> To: Ingo Molnar <mingo@kernel.org> Cc: Borislav Petkov <bp@alien8.de>, Andrew Morton <akpm@linux-foundation.org>, Andy Lutomirski <luto@kernel.org>, Dan Williams <dan.j.williams@intel.com>, elliott@hpe.com, linux-kernel@vger.kernel.org, linux-mm@kvack.org, linux-nvdimm@ml01.01.org, x86@kernel.org Subject: [PATCH v8 3/3] x86, mce: Add __mcsafe_copy() Date: Fri, 8 Jan 2016 13:18:03 -0800 [thread overview] Message-ID: <19f6403f2b04d3448ed2ac958e656645d8b6e70c.1452297867.git.tony.luck@intel.com> (raw) In-Reply-To: <cover.1452297867.git.tony.luck@intel.com> Make use of the EXTABLE_FAULT exception table entries. This routine returns a structure to indicate the result of the copy: struct mcsafe_ret { u64 trapnr; u64 remain; }; If the copy is successful, then both 'trapnr' and 'remain' are zero. If we faulted during the copy, then 'trapnr' will say which type of trap (X86_TRAP_PF or X86_TRAP_MC) and 'remain' says how many bytes were not copied. Signed-off-by: Tony Luck <tony.luck@intel.com> --- arch/x86/include/asm/string_64.h | 8 +++ arch/x86/kernel/x8664_ksyms_64.c | 2 + arch/x86/lib/memcpy_64.S | 133 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index ff8b9a17dc4b..5b24039463a4 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -78,6 +78,14 @@ int strcmp(const char *cs, const char *ct); #define memset(s, c, n) __memset(s, c, n) #endif +struct mcsafe_ret { + u64 trapnr; + u64 remain; +}; + +struct mcsafe_ret __mcsafe_copy(void *dst, const void __user *src, size_t cnt); +extern void __mcsafe_copy_end(void); + #endif /* __KERNEL__ */ #endif /* _ASM_X86_STRING_64_H */ diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index a0695be19864..96434edd7430 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -37,6 +37,8 @@ EXPORT_SYMBOL(__copy_user_nocache); EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_to_user); +EXPORT_SYMBOL(__mcsafe_copy); + EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 16698bba87de..195ff0144152 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -177,3 +177,136 @@ ENTRY(memcpy_orig) .Lend: retq ENDPROC(memcpy_orig) + +/* + * __mcsafe_copy - memory copy with machine check exception handling + * Note that we only catch machine checks when reading the source addresses. + * Writes to target are posted and don't generate machine checks. + */ +ENTRY(__mcsafe_copy) + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ + + /* check for bad alignment of source */ + movl %esi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +0: movb (%rsi),%al + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 0b +102: + movl %edx,%ecx + andl $63,%edx + shrl $6,%ecx + jz 17f +1: movq (%rsi),%r8 +2: movq 1*8(%rsi),%r9 +3: movq 2*8(%rsi),%r10 +4: movq 3*8(%rsi),%r11 + mov %r8,(%rdi) + mov %r9,1*8(%rdi) + mov %r10,2*8(%rdi) + mov %r11,3*8(%rdi) +9: movq 4*8(%rsi),%r8 +10: movq 5*8(%rsi),%r9 +11: movq 6*8(%rsi),%r10 +12: movq 7*8(%rsi),%r11 + mov %r8,4*8(%rdi) + mov %r9,5*8(%rdi) + mov %r10,6*8(%rdi) + mov %r11,7*8(%rdi) + leaq 64(%rsi),%rsi + leaq 64(%rdi),%rdi + decl %ecx + jnz 1b +17: movl %edx,%ecx + andl $7,%edx + shrl $3,%ecx + jz 20f +18: movq (%rsi),%r8 + mov %r8,(%rdi) + leaq 8(%rsi),%rsi + leaq 8(%rdi),%rdi + decl %ecx + jnz 18b +20: andl %edx,%edx + jz 23f + movl %edx,%ecx +21: movb (%rsi),%al + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 21b +23: xorq %rax, %rax + xorq %rdx, %rdx + sfence + /* copy successful. return 0 */ + ret + + .section .fixup,"ax" + /* fixups for machine check */ +30: + add %ecx,%edx + jmp 100f +31: + shl $6,%ecx + add %ecx,%edx + jmp 100f +32: + shl $6,%ecx + lea -8(%ecx,%edx),%edx + jmp 100f +33: + shl $6,%ecx + lea -16(%ecx,%edx),%edx + jmp 100f +34: + shl $6,%ecx + lea -24(%ecx,%edx),%edx + jmp 100f +35: + shl $6,%ecx + lea -32(%ecx,%edx),%edx + jmp 100f +36: + shl $6,%ecx + lea -40(%ecx,%edx),%edx + jmp 100f +37: + shl $6,%ecx + lea -48(%ecx,%edx),%edx + jmp 100f +38: + shl $6,%ecx + lea -56(%ecx,%edx),%edx + jmp 100f +39: + lea (%rdx,%rcx,8),%rdx + jmp 100f +40: + mov %ecx,%edx +100: + sfence + + /* %rax set the fault number in fixup_exception() */ + ret + .previous + + _ASM_EXTABLE_FAULT(0b,30b) + _ASM_EXTABLE_FAULT(1b,31b) + _ASM_EXTABLE_FAULT(2b,32b) + _ASM_EXTABLE_FAULT(3b,33b) + _ASM_EXTABLE_FAULT(4b,34b) + _ASM_EXTABLE_FAULT(9b,35b) + _ASM_EXTABLE_FAULT(10b,36b) + _ASM_EXTABLE_FAULT(11b,37b) + _ASM_EXTABLE_FAULT(12b,38b) + _ASM_EXTABLE_FAULT(18b,39b) + _ASM_EXTABLE_FAULT(21b,40b) -- 2.1.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-01-09 0:19 UTC|newest] Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top 2016-01-09 0:04 [PATCH v8 0/3] Machine check recovery when kernel accesses poison Tony Luck 2016-01-09 0:04 ` Tony Luck 2015-12-31 19:40 ` [PATCH v8 2/3] x86, mce: Check for faults tagged in EXTABLE_CLASS_FAULT exception table entries Tony Luck 2015-12-31 19:40 ` Tony Luck 2016-01-08 20:49 ` [PATCH v8 1/3] x86: Expand exception table to allow new handling options Tony Luck 2016-01-08 20:49 ` Tony Luck 2016-01-09 1:52 ` Andy Lutomirski 2016-01-09 1:52 ` Andy Lutomirski 2016-01-09 3:39 ` Brian Gerst 2016-01-09 3:39 ` Brian Gerst 2016-01-09 4:31 ` Brian Gerst 2016-01-09 4:31 ` Brian Gerst 2016-01-09 6:36 ` Andy Lutomirski 2016-01-09 6:36 ` Andy Lutomirski 2016-01-11 23:09 ` Brian Gerst 2016-01-11 23:09 ` Brian Gerst 2016-01-11 23:22 ` Andy Lutomirski 2016-01-11 23:22 ` Andy Lutomirski 2016-01-11 23:48 ` Luck, Tony 2016-01-11 23:48 ` Luck, Tony 2016-01-09 17:45 ` Tony Luck 2016-01-09 17:45 ` Tony Luck 2016-01-09 18:00 ` Andy Lutomirski 2016-01-09 18:00 ` Andy Lutomirski 2016-01-09 19:51 ` Tony Luck 2016-01-09 19:51 ` Tony Luck 2016-01-09 22:32 ` Andy Lutomirski 2016-01-09 22:32 ` Andy Lutomirski 2016-01-10 1:15 ` Tony Luck 2016-01-10 1:15 ` Tony Luck 2016-01-11 0:25 ` Luck, Tony 2016-01-11 0:25 ` Luck, Tony 2016-01-08 21:18 ` Tony Luck [this message] 2016-01-08 21:18 ` [PATCH v8 3/3] x86, mce: Add __mcsafe_copy() Tony Luck 2016-01-09 1:49 ` Andy Lutomirski 2016-01-09 1:49 ` Andy Lutomirski 2016-01-09 17:48 ` Tony Luck 2016-01-09 17:48 ` Tony Luck 2016-01-09 17:57 ` Andy Lutomirski 2016-01-09 17:57 ` Andy Lutomirski 2016-01-09 19:39 ` Tony Luck 2016-01-09 19:39 ` Tony Luck 2016-01-09 22:15 ` Dan Williams 2016-01-09 22:15 ` Dan Williams 2016-01-09 22:33 ` Andy Lutomirski 2016-01-09 22:33 ` Andy Lutomirski 2016-01-10 0:23 ` Dan Williams 2016-01-10 0:23 ` Dan Williams 2016-01-10 1:40 ` Tony Luck 2016-01-10 1:40 ` Tony Luck 2016-01-10 11:26 ` Borislav Petkov 2016-01-10 11:26 ` Borislav Petkov 2016-01-11 10:44 ` Ingo Molnar 2016-01-11 10:44 ` Ingo Molnar 2016-01-13 23:22 ` Tony Luck 2016-01-13 23:22 ` Tony Luck 2016-01-14 4:39 ` Borislav Petkov 2016-01-14 4:39 ` Borislav Petkov 2016-01-30 0:35 ` Tony Luck 2016-01-30 0:35 ` Tony Luck 2016-01-30 10:28 ` Borislav Petkov 2016-01-30 10:28 ` Borislav Petkov 2016-02-01 23:10 ` Tony Luck 2016-02-01 23:10 ` Tony Luck 2016-02-01 23:16 ` Dan Williams 2016-02-01 23:16 ` Dan Williams 2016-01-12 0:26 ` Luck, Tony 2016-01-12 0:26 ` Luck, Tony 2016-01-12 0:30 ` Andy Lutomirski 2016-01-12 0:30 ` Andy Lutomirski 2016-01-12 0:37 ` Andy Lutomirski 2016-01-12 0:37 ` Andy Lutomirski
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=19f6403f2b04d3448ed2ac958e656645d8b6e70c.1452297867.git.tony.luck@intel.com \ --to=tony.luck@intel.com \ --cc=akpm@linux-foundation.org \ --cc=bp@alien8.de \ --cc=dan.j.williams@intel.com \ --cc=elliott@hpe.com \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=linux-nvdimm@ml01.01.org \ --cc=luto@kernel.org \ --cc=mingo@kernel.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.