From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-ed1-f70.google.com (mail-ed1-f70.google.com [209.85.208.70]) by kanga.kvack.org (Postfix) with ESMTP id 431876B0278 for ; Wed, 18 Jul 2018 05:41:31 -0400 (EDT) Received: by mail-ed1-f70.google.com with SMTP id r9-v6so1701557edh.14 for ; Wed, 18 Jul 2018 02:41:31 -0700 (PDT) Received: from theia.8bytes.org (8bytes.org. [2a01:238:4383:600:38bc:a715:4b6d:a889]) by mx.google.com with ESMTPS id q4-v6si28349edc.335.2018.07.18.02.41.30 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Wed, 18 Jul 2018 02:41:30 -0700 (PDT) From: Joerg Roedel Subject: [PATCH 13/39] x86/entry/32: Add PTI cr3 switches to NMI handler code Date: Wed, 18 Jul 2018 11:40:50 +0200 Message-Id: <1531906876-13451-14-git-send-email-joro@8bytes.org> In-Reply-To: <1531906876-13451-1-git-send-email-joro@8bytes.org> References: <1531906876-13451-1-git-send-email-joro@8bytes.org> Sender: owner-linux-mm@kvack.org List-ID: To: Thomas Gleixner , Ingo Molnar , "H . Peter Anvin" Cc: x86@kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Linus Torvalds , Andy Lutomirski , Dave Hansen , Josh Poimboeuf , Juergen Gross , Peter Zijlstra , Borislav Petkov , Jiri Kosina , Boris Ostrovsky , Brian Gerst , David Laight , Denys Vlasenko , Eduardo Valentin , Greg KH , Will Deacon , aliguori@amazon.com, daniel.gruss@iaik.tugraz.at, hughd@google.com, keescook@google.com, Andrea Arcangeli , Waiman Long , Pavel Machek , "David H . Gutteridge" , jroedel@suse.de, joro@8bytes.org From: Joerg Roedel The NMI handler is special, as it needs to leave with the same cr3 as it was entered with. We need to do this because we could enter the NMI handler from kernel code with user-cr3 already loaded. Signed-off-by: Joerg Roedel --- arch/x86/entry/entry_32.S | 39 +++++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 60b28df..b1541c7 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -210,8 +210,19 @@ .endm -.macro SAVE_ALL_NMI +.macro SAVE_ALL_NMI cr3_reg:req SAVE_ALL + + /* + * Now switch the CR3 when PTI is enabled. + * + * We can enter with either user or kernel cr3, the code will + * store the old cr3 in \cr3_reg and switches to the kernel cr3 + * if necessary. + */ + SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg + +.Lend_\@: .endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The @@ -259,7 +270,23 @@ POP_GS_EX .endm -.macro RESTORE_ALL_NMI pop=0 +.macro RESTORE_ALL_NMI cr3_reg:req pop=0 + /* + * Now switch the CR3 when PTI is enabled. + * + * We enter with kernel cr3 and switch the cr3 to the value + * stored on \cr3_reg, which is either a user or a kernel cr3. + */ + ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI + + testl $PTI_SWITCH_MASK, \cr3_reg + jz .Lswitched_\@ + + /* User cr3 in \cr3_reg - write it to hardware cr3 */ + movl \cr3_reg, %cr3 + +.Lswitched_\@: + RESTORE_REGS pop=\pop .endm @@ -1331,7 +1358,7 @@ ENTRY(nmi) #endif pushl %eax # pt_regs->orig_ax - SAVE_ALL_NMI + SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER xorl %edx, %edx # zero error code movl %esp, %eax # pt_regs pointer @@ -1359,7 +1386,7 @@ ENTRY(nmi) .Lnmi_return: CHECK_AND_APPLY_ESPFIX - RESTORE_ALL_NMI pop=4 + RESTORE_ALL_NMI cr3_reg=%edi pop=4 jmp .Lirq_return #ifdef CONFIG_X86_ESPFIX32 @@ -1375,12 +1402,12 @@ ENTRY(nmi) pushl 16(%esp) .endr pushl %eax - SAVE_ALL_NMI + SAVE_ALL_NMI cr3_reg=%edi ENCODE_FRAME_POINTER FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx, %edx # zero error code call do_nmi - RESTORE_ALL_NMI + RESTORE_ALL_NMI cr3_reg=%edi lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -- 2.7.4