[1/4] x86/paravirt: Dont patch flush_tlb_single
diff mbox series

Message ID 20171127204257.497387357@linutronix.de
State New, archived
Headers show
Series
  • x86/kaiser: Paravirt support and various fixlets
Related show

Commit Message

Thomas Gleixner Nov. 27, 2017, 8:34 p.m. UTC
native_flush_tlb_single() is not just INLVPG anymore. With
X86_FEATURE_INVPCID_SINGLE and KAISER enabled it flushes also the shadow
mapping. But even with KAISER disabled flushing the particular ASID is the
right thing to do.

Remove the paravirt patching for it.

Fixes: 1fde25dc8ef4 ("x86/mm/kaiser: Use PCID feature to make user and kernel switches faster")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/kernel/paravirt_patch_64.c |    2 --
 1 file changed, 2 deletions(-)

Comments

Peter Zijlstra Nov. 27, 2017, 9:01 p.m. UTC | #1
On Mon, Nov 27, 2017 at 09:34:17PM +0100, Thomas Gleixner wrote:
> native_flush_tlb_single() is not just INLVPG anymore. With
> X86_FEATURE_INVPCID_SINGLE and KAISER enabled it flushes also the shadow
> mapping. But even with KAISER disabled flushing the particular ASID is the
> right thing to do.
> 
> Remove the paravirt patching for it.
> 
> Fixes: 1fde25dc8ef4 ("x86/mm/kaiser: Use PCID feature to make user and kernel switches faster")
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Argh!! Yes quite.

ACK

> ---
>  arch/x86/kernel/paravirt_patch_64.c |    2 --
>  1 file changed, 2 deletions(-)
> 
> --- a/arch/x86/kernel/paravirt_patch_64.c
> +++ b/arch/x86/kernel/paravirt_patch_64.c
> @@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq;
>  DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
>  DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
>  DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
> -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
>  DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
>  
>  DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
> @@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobb
>  		PATCH_SITE(pv_mmu_ops, read_cr2);
>  		PATCH_SITE(pv_mmu_ops, read_cr3);
>  		PATCH_SITE(pv_mmu_ops, write_cr3);
> -		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
>  		PATCH_SITE(pv_cpu_ops, wbinvd);
>  #if defined(CONFIG_PARAVIRT_SPINLOCKS)
>  		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
> 
>
Josh Poimboeuf Nov. 27, 2017, 9:13 p.m. UTC | #2
On Mon, Nov 27, 2017 at 09:34:17PM +0100, Thomas Gleixner wrote:
> native_flush_tlb_single() is not just INLVPG anymore. With
> X86_FEATURE_INVPCID_SINGLE and KAISER enabled it flushes also the shadow
> mapping. But even with KAISER disabled flushing the particular ASID is the
> right thing to do.
> 
> Remove the paravirt patching for it.
> 
> Fixes: 1fde25dc8ef4 ("x86/mm/kaiser: Use PCID feature to make user and kernel switches faster")
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  arch/x86/kernel/paravirt_patch_64.c |    2 --
>  1 file changed, 2 deletions(-)
> 
> --- a/arch/x86/kernel/paravirt_patch_64.c
> +++ b/arch/x86/kernel/paravirt_patch_64.c
> @@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq;
>  DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
>  DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
>  DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
> -DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
>  DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
>  
>  DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
> @@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobb
>  		PATCH_SITE(pv_mmu_ops, read_cr2);
>  		PATCH_SITE(pv_mmu_ops, read_cr3);
>  		PATCH_SITE(pv_mmu_ops, write_cr3);
> -		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
>  		PATCH_SITE(pv_cpu_ops, wbinvd);
>  #if defined(CONFIG_PARAVIRT_SPINLOCKS)
>  		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):

Nice find!

Reviewed-and-tested-by: Josh Poimboeuf <jpoimboe@redhat.com>

Patch
diff mbox series

--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -10,7 +10,6 @@  DEF_NATIVE(pv_irq_ops, save_fl, "pushfq;
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@  unsigned native_patch(u8 type, u16 clobb
 		PATCH_SITE(pv_mmu_ops, read_cr2);
 		PATCH_SITE(pv_mmu_ops, read_cr3);
 		PATCH_SITE(pv_mmu_ops, write_cr3);
-		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
 		PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):