linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Julian Stecklina <jsteckli@amazon.de>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>
Cc: Julian Stecklina <jsteckli@amazon.de>,
	js@alien8.de, linux-kernel@vger.kernel.org
Subject: [PATCH 4/4] kvm, vmx: remove manually coded vmx instructions
Date: Wed, 24 Oct 2018 10:28:59 +0200	[thread overview]
Message-ID: <ba8f4a02e90f1dc95a900f6a34caddce40a358d4.1540369608.git.jsteckli@amazon.de> (raw)
In-Reply-To: <09986c98c9655f1542768ecfda644ac821e67a57.1540369608.git.jsteckli@amazon.de>
In-Reply-To: <09986c98c9655f1542768ecfda644ac821e67a57.1540369608.git.jsteckli@amazon.de>

So far the VMX code relied on manually assembled VMX instructions. This
was apparently done to ensure compatibility with old binutils. VMX
instructions were introduced with binutils 2.19 and the kernel currently
requires binutils 2.20.

Remove the manually assembled versions and replace them with the proper
inline assembly. This improves code generation (and source code
readability).

According to the bloat-o-meter this change removes ~1300 bytes from the
text segment.

Signed-off-by: Julian Stecklina <jsteckli@amazon.de>
Reviewed-by: Jan H. Schönherr <jschoenh@amazon.de>
Reviewed-by: Konrad Jan Miller <kjm@amazon.de>
Reviewed-by: Razvan-Alin Ghitulete <rga@amazon.de>
---
 arch/x86/include/asm/virtext.h |  2 +-
 arch/x86/include/asm/vmx.h     | 13 -------------
 arch/x86/kvm/vmx.c             | 39 ++++++++++++++++++---------------------
 3 files changed, 19 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 0116b2e..c5395b3 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void)
  */
 static inline void cpu_vmxoff(void)
 {
-	asm volatile (ASM_VMX_VMXOFF : : : "cc");
+	asm volatile ("vmxoff" : : : "cc");
 	cr4_clear_bits(X86_CR4_VMXE);
 }
 
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9527ba5..ade0f15 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -503,19 +503,6 @@ enum vmcs_field {
 
 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR		0xfffbc000ul
 
-
-#define ASM_VMX_VMCLEAR_RAX       ".byte 0x66, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMLAUNCH          ".byte 0x0f, 0x01, 0xc2"
-#define ASM_VMX_VMRESUME          ".byte 0x0f, 0x01, 0xc3"
-#define ASM_VMX_VMPTRLD_RAX       ".byte 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMREAD_RDX_RAX    ".byte 0x0f, 0x78, 0xd0"
-#define ASM_VMX_VMWRITE_RAX_RDX   ".byte 0x0f, 0x79, 0xd0"
-#define ASM_VMX_VMWRITE_RSP_RDX   ".byte 0x0f, 0x79, 0xd4"
-#define ASM_VMX_VMXOFF            ".byte 0x0f, 0x01, 0xc4"
-#define ASM_VMX_VMXON_RAX         ".byte 0xf3, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_INVEPT		  ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
-#define ASM_VMX_INVVPID		  ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
 struct vmx_msr_entry {
 	u32 index;
 	u32 reserved;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 82cfb909..bbbdccb 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2077,7 +2077,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
 	return -1;
 }
 
-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+static inline void __invvpid(long ext, u16 vpid, gva_t gva)
 {
 	struct {
 		u64 vpid : 16;
@@ -2086,21 +2086,21 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
 	} operand = { vpid, 0, gva };
 	bool error;
 
-	asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
-		      : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
+	asm volatile ("invvpid %1, %2" CC_SET(na)
+		      : CC_OUT(na) (error) : "m"(operand), "r"(ext)
 		      : "memory");
 	BUG_ON(error);
 }
 
-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
+static inline void __invept(long ext, u64 eptp, gpa_t gpa)
 {
 	struct {
 		u64 eptp, gpa;
 	} operand = {eptp, gpa};
 	bool error;
 
-	asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
-		      : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
+	asm volatile ("invept %1, %2" CC_SET(na)
+		      : CC_OUT(na) (error) : "m" (operand), "r" (ext)
 		      : "memory");
 	BUG_ON(error);
 }
@@ -2120,8 +2120,8 @@ static void vmcs_clear(struct vmcs *vmcs)
 	u64 phys_addr = __pa(vmcs);
 	bool error;
 
-	asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
-		      : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+	asm volatile ("vmclear %1" CC_SET(na)
+		      : CC_OUT(na) (error) : "m"(phys_addr)
 		      : "memory");
 	if (unlikely(error))
 		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
@@ -2145,8 +2145,8 @@ static void vmcs_load(struct vmcs *vmcs)
 	if (static_branch_unlikely(&enable_evmcs))
 		return evmcs_load(phys_addr);
 
-	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
-		      : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
+	asm volatile ("vmptrld %1" CC_SET(na)
+		      : CC_OUT(na) (error) : "m"(phys_addr)
 		      : "memory");
 	if (unlikely(error))
 		printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
@@ -2323,8 +2323,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
 {
 	unsigned long value;
 
-	asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
-		      : "=a"(value) : "d"(field) : "cc");
+	asm volatile ("vmread %1, %0" : "=rm"(value) : "r"(field) : "cc");
 	return value;
 }
 
@@ -2375,8 +2374,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
 {
 	bool error;
 
-	asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
-		      : CC_OUT(na) (error) : "a"(value), "d"(field));
+	asm volatile ("vmwrite %1, %2" CC_SET(na)
+		      : CC_OUT(na) (error) : "rm"(value), "r"(field));
 	if (unlikely(error))
 		vmwrite_error(field, value);
 }
@@ -4397,9 +4396,7 @@ static void kvm_cpu_vmxon(u64 addr)
 	cr4_set_bits(X86_CR4_VMXE);
 	intel_pt_handle_vmx(1);
 
-	asm volatile (ASM_VMX_VMXON_RAX
-			: : "a"(&addr), "m"(addr)
-			: "memory", "cc");
+	asm volatile ("vmxon %0" : : "m"(addr) : "memory", "cc");
 }
 
 static int hardware_enable(void)
@@ -4468,7 +4465,7 @@ static void vmclear_local_loaded_vmcss(void)
  */
 static void kvm_cpu_vmxoff(void)
 {
-	asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+	asm volatile ("vmxoff" : : : "cc");
 
 	intel_pt_handle_vmx(0);
 	cr4_clear_bits(X86_CR4_VMXE);
@@ -10748,7 +10745,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
 		"jmp 1f \n\t"
 		"2: \n\t"
-		__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
+		"vmwrite %%" _ASM_SP ", %%" _ASM_DX "\n\t"
 		"1: \n\t"
 		/* Check if vmlaunch of vmresume is needed */
 		"cmpl $0, %c[launched](%0) \n\t"
@@ -10773,9 +10770,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 		/* Enter guest mode */
 		"jne 1f \n\t"
-		__ex(ASM_VMX_VMLAUNCH) "\n\t"
+		"vmlaunch \n\t"
 		"jmp 2f \n\t"
-		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+		"1: vmresume \n\t"
 		"2: "
 		/* Save guest registers, load host registers, keep flags */
 		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
-- 
2.7.4


  parent reply	other threads:[~2018-10-24  8:29 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-10-24  8:28 [PATCH 1/4] kvm, vmx: move CR2 context switch out of assembly path Julian Stecklina
2018-10-24  8:28 ` [PATCH 2/4] kvm, vmx: move register clearing " Julian Stecklina
2018-10-25 16:55   ` Jim Mattson
2018-10-26 10:31     ` Stecklina, Julian
2018-10-26 15:46   ` Sean Christopherson
2018-10-26 16:30     ` Jim Mattson
2018-10-29 13:47       ` Stecklina, Julian
2018-10-24  8:28 ` [PATCH 3/4] kvm, vmx: fix __invvpid style Julian Stecklina
2018-10-24  8:28 ` Julian Stecklina [this message]
2018-10-24 17:44   ` [PATCH 4/4] kvm, vmx: remove manually coded vmx instructions Eric Northup
2018-10-26 10:46     ` Stecklina, Julian
2018-10-26 14:30       ` Sean Christopherson
2018-10-25 17:02 ` [PATCH 1/4] kvm, vmx: move CR2 context switch out of assembly path Jim Mattson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ba8f4a02e90f1dc95a900f6a34caddce40a358d4.1540369608.git.jsteckli@amazon.de \
    --to=jsteckli@amazon.de \
    --cc=js@alien8.de \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).