All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS
@ 2018-02-22 15:43 Paolo Bonzini
  2018-02-22 15:43 ` [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL Paolo Bonzini
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Paolo Bonzini @ 2018-02-22 15:43 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: x86, Radim Krčmář,
	KarimAllah Ahmed, David Woodhouse, Jim Mattson, Thomas Gleixner,
	Ingo Molnar, stable

Two tiny patches for the IBRS code.  They should go in
through the x86/pti tree and should apply to both 4.9 and 4.14 trees.

Thanks,

Paolo

v1->v2: remove patch 2, the same bug has already been fixed

Paolo Bonzini (3):
  KVM: x86: use native MSR ops for SPEC_CTRL
  KVM: VMX: mark RDMSR path as unlikely

 arch/x86/kvm/svm.c |  9 +++++----
 arch/x86/kvm/vmx.c |  9 +++++----
 2 files changed, 10 insertions(+), 8 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL
  2018-02-22 15:43 [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Paolo Bonzini
@ 2018-02-22 15:43 ` Paolo Bonzini
  2018-02-23  8:26   ` [tip:x86/pti] KVM/x86: Remove indirect MSR op calls from SPEC_CTRL tip-bot for Paolo Bonzini
  2018-02-22 15:43 ` [PATCH v2 2/2] KVM: VMX: mark RDMSR path as unlikely Paolo Bonzini
  2018-02-23  7:26 ` [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Ingo Molnar
  2 siblings, 1 reply; 6+ messages in thread
From: Paolo Bonzini @ 2018-02-22 15:43 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: x86, Radim Krčmář,
	KarimAllah Ahmed, David Woodhouse, Jim Mattson, Thomas Gleixner,
	Ingo Molnar, stable

Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl on the vmentry path too.

Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
Cc: x86@kernel.org
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Jim Mattson <jmattson@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: stable@vger.kernel.org
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c | 7 ++++---
 arch/x86/kvm/vmx.c | 7 ++++---
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * being speculatively taken.
 	 */
 	if (svm->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * save it.
 	 */
 	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (svm->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 67b028d8e726..5caeb8dc5bda 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include "trace.h"
@@ -9453,7 +9454,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * being speculatively taken.
 	 */
 	if (vmx->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
@@ -9589,10 +9590,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * save it.
 	 */
 	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (vmx->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] KVM: VMX: mark RDMSR path as unlikely
  2018-02-22 15:43 [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Paolo Bonzini
  2018-02-22 15:43 ` [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL Paolo Bonzini
@ 2018-02-22 15:43 ` Paolo Bonzini
  2018-02-23  8:26   ` [tip:x86/pti] KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely() tip-bot for Paolo Bonzini
  2018-02-23  7:26 ` [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Ingo Molnar
  2 siblings, 1 reply; 6+ messages in thread
From: Paolo Bonzini @ 2018-02-22 15:43 UTC (permalink / raw)
  To: linux-kernel, kvm
  Cc: x86, Radim Krčmář,
	KarimAllah Ahmed, David Woodhouse, Jim Mattson, Thomas Gleixner,
	Ingo Molnar, stable

vmx_vcpu_run and svm_vcpu_run are large functions, and this can actually
make a substantial cycle difference by keeping the fast path contiguous
in memory.  Without it, the retpoline guest/retpoline host case is about
50 cycles slower.

Cc: x86@kernel.org
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Jim Mattson <jmattson@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: stable@vger.kernel.org
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c | 2 +-
 arch/x86/kvm/vmx.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1598beeda11c..24c9521ebc24 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5465,7 +5465,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
 	 * save it.
 	 */
-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (svm->spec_ctrl)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index af89d377681d..e13fd2a833c4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9589,7 +9589,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
 	 * save it.
 	 */
-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (vmx->spec_ctrl)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS
  2018-02-22 15:43 [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Paolo Bonzini
  2018-02-22 15:43 ` [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL Paolo Bonzini
  2018-02-22 15:43 ` [PATCH v2 2/2] KVM: VMX: mark RDMSR path as unlikely Paolo Bonzini
@ 2018-02-23  7:26 ` Ingo Molnar
  2 siblings, 0 replies; 6+ messages in thread
From: Ingo Molnar @ 2018-02-23  7:26 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: linux-kernel, kvm, x86, Radim Krčmář,
	KarimAllah Ahmed, David Woodhouse, Jim Mattson, Thomas Gleixner,
	stable


* Paolo Bonzini <pbonzini@redhat.com> wrote:

> Two tiny patches for the IBRS code.  They should go in
> through the x86/pti tree and should apply to both 4.9 and 4.14 trees.
> 
> Thanks,
> 
> Paolo
> 
> v1->v2: remove patch 2, the same bug has already been fixed
> 
> Paolo Bonzini (3):
>   KVM: x86: use native MSR ops for SPEC_CTRL
>   KVM: VMX: mark RDMSR path as unlikely
> 
>  arch/x86/kvm/svm.c |  9 +++++----
>  arch/x86/kvm/vmx.c |  9 +++++----
>  2 files changed, 10 insertions(+), 8 deletions(-)

Applied to tip:x86/pti, with minor tweaks to the titles/changelogs.

If all goes fine in testing I will send all pending tip:x86/pti changes to Linus 
later today, so the KVM development tree should be able to pull in these changes 
via upstream pretty soon.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [tip:x86/pti] KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
  2018-02-22 15:43 ` [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL Paolo Bonzini
@ 2018-02-23  8:26   ` tip-bot for Paolo Bonzini
  0 siblings, 0 replies; 6+ messages in thread
From: tip-bot for Paolo Bonzini @ 2018-02-23  8:26 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: rkrcmar, jmattson, tglx, linux-kernel, karahmed, torvalds, mingo,
	dwmw, peterz, pbonzini, hpa

Commit-ID:  ecb586bd29c99fb4de599dec388658e74388daad
Gitweb:     https://git.kernel.org/tip/ecb586bd29c99fb4de599dec388658e74388daad
Author:     Paolo Bonzini <pbonzini@redhat.com>
AuthorDate: Thu, 22 Feb 2018 16:43:17 +0100
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Fri, 23 Feb 2018 08:24:35 +0100

KVM/x86: Remove indirect MSR op calls from SPEC_CTRL

Having a paravirt indirect call in the IBRS restore path is not a
good idea, since we are trying to protect from speculative execution
of bogus indirect branch targets.  It is also slower, so use
native_wrmsrl() on the vmentry path too.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: stable@vger.kernel.org
Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d
Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/kvm/svm.c | 7 ++++---
 arch/x86/kvm/vmx.c | 7 ++++---
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * being speculatively taken.
 	 */
 	if (svm->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
 	asm volatile (
 		"push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * save it.
 	 */
 	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (svm->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3dec126aa302..0927be315965 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,6 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include "trace.h"
@@ -9452,7 +9453,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * being speculatively taken.
 	 */
 	if (vmx->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 	asm(
@@ -9588,10 +9589,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * save it.
 	 */
 	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (vmx->spec_ctrl)
-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
 	/* Eliminate branch target predictions from guest mode */
 	vmexit_fill_RSB();

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [tip:x86/pti] KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely()
  2018-02-22 15:43 ` [PATCH v2 2/2] KVM: VMX: mark RDMSR path as unlikely Paolo Bonzini
@ 2018-02-23  8:26   ` tip-bot for Paolo Bonzini
  0 siblings, 0 replies; 6+ messages in thread
From: tip-bot for Paolo Bonzini @ 2018-02-23  8:26 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: rkrcmar, peterz, jmattson, dwmw, linux-kernel, hpa, torvalds,
	karahmed, pbonzini, tglx, mingo

Commit-ID:  946fbbc13dce68902f64515b610eeb2a6c3d7a64
Gitweb:     https://git.kernel.org/tip/946fbbc13dce68902f64515b610eeb2a6c3d7a64
Author:     Paolo Bonzini <pbonzini@redhat.com>
AuthorDate: Thu, 22 Feb 2018 16:43:18 +0100
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Fri, 23 Feb 2018 08:24:36 +0100

KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely()

vmx_vcpu_run() and svm_vcpu_run() are large functions, and giving
branch hints to the compiler can actually make a substantial cycle
difference by keeping the fast path contiguous in memory.

With this optimization, the retpoline-guest/retpoline-host case is
about 50 cycles faster.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: KarimAllah Ahmed <karahmed@amazon.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm@vger.kernel.org
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/20180222154318.20361-3-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 arch/x86/kvm/svm.c | 2 +-
 arch/x86/kvm/vmx.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1598beeda11c..24c9521ebc24 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5465,7 +5465,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
 	 * save it.
 	 */
-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (svm->spec_ctrl)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0927be315965..7f8401d05939 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9588,7 +9588,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
 	 * save it.
 	 */
-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
 	if (vmx->spec_ctrl)

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-02-23  8:27 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-22 15:43 [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Paolo Bonzini
2018-02-22 15:43 ` [PATCH v2 1/2] KVM: x86: use native MSR ops for SPEC_CTRL Paolo Bonzini
2018-02-23  8:26   ` [tip:x86/pti] KVM/x86: Remove indirect MSR op calls from SPEC_CTRL tip-bot for Paolo Bonzini
2018-02-22 15:43 ` [PATCH v2 2/2] KVM: VMX: mark RDMSR path as unlikely Paolo Bonzini
2018-02-23  8:26   ` [tip:x86/pti] KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely() tip-bot for Paolo Bonzini
2018-02-23  7:26 ` [PATCH v2 0/2] x86/pti: KVM: fixes and optimizations for IBRS Ingo Molnar

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.