All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] kvm/svm: implement new DecodeAssist features
@ 2010-12-07 10:59 Andre Przywara
  2010-12-07 10:59 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
                   ` (4 more replies)
  0 siblings, 5 replies; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti

Hi,

upcoming AMD CPUs will have a SVM enhancement called DecodeAssist
which will provide more information when intercepting certain events.
These information allows to skip the instruction fetching and
decoding and handle the intercept immediately.
This patch set implements all the features which are documented
in the recent AMD manual (APM vol. 2). For details see the patches.

Please review and apply.

Regards,
Andre.



^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/5] kvm/svm: add new SVM feature bit names
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

the recent APM Vol.2 and the recent AMD CPUID specification describe
new CPUID features bits for SVM. Name them here for later usage.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |    4 ++++
 1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ae943bb..c573e2d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
  2010-12-07 10:59 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 13:24   ` Avi Kivity
  2010-12-07 10:59 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle CR
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/svm.h |    2 +
 arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 68 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 11dbca7..589fc25 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c573e2d..b7233fd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2594,12 +2594,70 @@ static int emulate_on_interception(struct vcpu_svm *svm)
 	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
 }
 
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+
+	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST) ||
+	    (svm->vmcb->control.exit_info_1 & (1ULL << 63)) == 0)
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+	if (cr > 15) { /* mov to cr */
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr & 0x0F) {
+		case 0:
+			kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			kvm_set_cr8(&svm->vcpu, val & 0xfUL);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr & 0x0F);
+			return EMULATE_FAIL;
+		}
+	} else { /* mov from cr */
+		switch (cr & 0x0F) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = svm->vcpu.arch.cr3;
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr & 0x0F);
+			return EMULATE_FAIL;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	skip_emulated_instruction(&svm->vcpu);
+
+	return 1;
+}
+
 static int cr0_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2617,7 +2675,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	emulate_instruction(&svm->vcpu, 0, 0, 0);
+	cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
 		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 		return 1;
@@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
 	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
 	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
  2010-12-07 10:59 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
  2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 11:02   ` Alexander Graf
  2010-12-07 13:25   ` Avi Kivity
  2010-12-07 10:59 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
  2010-12-07 10:59 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
  4 siblings, 2 replies; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

Newer SVM implementations provide the GPR number in the VMCB, so
that the emulation path is no longer necesarry to handle debug
register access intercepts. Implement the handling in svm.c and
use it when the info is provided.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |   55 ++++++++++++++++++++++++++++++++++++---------------
 1 files changed, 39 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b7233fd..369bd85 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2669,6 +2669,29 @@ static int cr0_write_interception(struct vcpu_svm *svm)
 	return r == EMULATE_DONE;
 }
 
+static int dr_interception(struct vcpu_svm *svm)
+{
+	int reg, dr;
+	unsigned long val;
+
+	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+	if (dr > 15) { /* mov to DRn */
+		val = kvm_register_read(&svm->vcpu, reg);
+		kvm_set_dr(&svm->vcpu, dr - 16, val);
+	} else {
+		kvm_get_dr(&svm->vcpu, dr, &val);
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
+}
+
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2931,22 +2954,22 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_WRITE_CR3]			= cr_interception,
 	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
-	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR5]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR6]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR7]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR4]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR6]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+	[SVM_EXIT_READ_DR0]			= dr_interception,
+	[SVM_EXIT_READ_DR1]			= dr_interception,
+	[SVM_EXIT_READ_DR2]			= dr_interception,
+	[SVM_EXIT_READ_DR3]			= dr_interception,
+	[SVM_EXIT_READ_DR4]			= dr_interception,
+	[SVM_EXIT_READ_DR5]			= dr_interception,
+	[SVM_EXIT_READ_DR6]			= dr_interception,
+	[SVM_EXIT_READ_DR7]			= dr_interception,
+	[SVM_EXIT_WRITE_DR0]			= dr_interception,
+	[SVM_EXIT_WRITE_DR1]			= dr_interception,
+	[SVM_EXIT_WRITE_DR2]			= dr_interception,
+	[SVM_EXIT_WRITE_DR3]			= dr_interception,
+	[SVM_EXIT_WRITE_DR4]			= dr_interception,
+	[SVM_EXIT_WRITE_DR5]			= dr_interception,
+	[SVM_EXIT_WRITE_DR6]			= dr_interception,
+	[SVM_EXIT_WRITE_DR7]			= dr_interception,
 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
                   ` (2 preceding siblings ...)
  2010-12-07 10:59 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 13:27   ` Avi Kivity
  2010-12-07 10:59 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
  4 siblings, 1 reply; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

When the DecodeAssist feature is available, the linear address
is provided in the VMCB on INVLPG intercepts. Use it directly to
avoid any decoding and emulation.
This is only useful for shadow paging, though.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/kvm/svm.c |    7 ++++++-
 1 files changed, 6 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 369bd85..3cf2cef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2586,7 +2586,12 @@ static int iret_interception(struct vcpu_svm *svm)
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
+		return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+
+	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
                   ` (3 preceding siblings ...)
  2010-12-07 10:59 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
@ 2010-12-07 10:59 ` Andre Przywara
  2010-12-07 13:33   ` Avi Kivity
  4 siblings, 1 reply; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 10:59 UTC (permalink / raw)
  To: avi; +Cc: kvm, mtosatti, Andre Przywara

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    3 +++
 arch/x86/include/asm/svm.h      |    4 +++-
 arch/x86/kvm/emulate.c          |    1 +
 arch/x86/kvm/svm.c              |   20 ++++++++++++++++++++
 arch/x86/kvm/vmx.c              |    7 +++++++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cfbcbfa..3e3a67e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+	int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
 	const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u64 lbr_ctl;
 	u64 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..abff8ff 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -525,6 +525,7 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
 	/* x86 instructions are limited to 15 bytes. */
 	if (eip + size - ctxt->eip > 15)
 		return X86EMUL_UNHANDLEABLE;
+	kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
 	while (size--) {
 		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
 		if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3cf2cef..ed94e9a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	uint8_t len;
+	struct fetch_cache *fetch;
+
+	len = svm->vmcb->control.insn_len & 0x0F;
+	if (len == 0)
+		return 1;
+
+	fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+	fetch->start = kvm_rip_read(&svm->vcpu);
+	fetch->end = fetch->start + len;
+	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+	return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -3830,6 +3848,8 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.adjust_tsc_offset = svm_adjust_tsc_offset,
 
 	.set_tdp_cr3 = set_tdp_cr3,
+
+	.prefetch_instruction = prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 72cfdb7..4825545 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.adjust_tsc_offset = vmx_adjust_tsc_offset,
 
 	.set_tdp_cr3 = vmx_set_cr3,
+
+	.prefetch_instruction = prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-07 10:59 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
@ 2010-12-07 11:02   ` Alexander Graf
  2010-12-07 13:14     ` Avi Kivity
  2010-12-07 13:25   ` Avi Kivity
  1 sibling, 1 reply; 16+ messages in thread
From: Alexander Graf @ 2010-12-07 11:02 UTC (permalink / raw)
  To: Andre Przywara; +Cc: avi, kvm, mtosatti


On 07.12.2010, at 11:59, Andre Przywara wrote:

> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle debug
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
> 
> Signed-off-by: Andre Przywara <andre.przywara@amd.com>
> ---
> arch/x86/kvm/svm.c |   55 ++++++++++++++++++++++++++++++++++++---------------
> 1 files changed, 39 insertions(+), 16 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index b7233fd..369bd85 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2669,6 +2669,29 @@ static int cr0_write_interception(struct vcpu_svm *svm)
> 	return r == EMULATE_DONE;
> }
> 
> +static int dr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, dr;
> +	unsigned long val;
> +
> +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
> +		return emulate_on_interception(svm);

Wouldn't it be better to just change the entry in svm_exit_handlers if we detect that feature on init? No unnecessary branching then.


Alex


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-07 11:02   ` Alexander Graf
@ 2010-12-07 13:14     ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 13:14 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Andre Przywara, kvm, mtosatti

On 12/07/2010 01:02 PM, Alexander Graf wrote:
> On 07.12.2010, at 11:59, Andre Przywara wrote:
>
> >  Newer SVM implementations provide the GPR number in the VMCB, so
> >  that the emulation path is no longer necesarry to handle debug
> >  register access intercepts. Implement the handling in svm.c and
> >  use it when the info is provided.
> >
> >  Signed-off-by: Andre Przywara<andre.przywara@amd.com>
> >  ---
> >  arch/x86/kvm/svm.c |   55 ++++++++++++++++++++++++++++++++++++---------------
> >  1 files changed, 39 insertions(+), 16 deletions(-)
> >
> >  diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> >  index b7233fd..369bd85 100644
> >  --- a/arch/x86/kvm/svm.c
> >  +++ b/arch/x86/kvm/svm.c
> >  @@ -2669,6 +2669,29 @@ static int cr0_write_interception(struct vcpu_svm *svm)
> >  	return r == EMULATE_DONE;
> >  }
> >
> >  +static int dr_interception(struct vcpu_svm *svm)
> >  +{
> >  +	int reg, dr;
> >  +	unsigned long val;
> >  +
> >  +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
> >  +		return emulate_on_interception(svm);
>
> Wouldn't it be better to just change the entry in svm_exit_handlers if we detect that feature on init? No unnecessary branching then.
>

I'd rather make them const instead.  For something performance critical 
(unlike this) we can use static_cpu_has().

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
@ 2010-12-07 13:24   ` Avi Kivity
  2010-12-07 14:30     ` Andre Przywara
  0 siblings, 1 reply; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 13:24 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 12:59 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle CR
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
> ---
>   arch/x86/include/asm/svm.h |    2 +
>   arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
>   2 files changed, 68 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 11dbca7..589fc25 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -256,6 +256,8 @@ struct __attribute__ ((__packed__)) vmcb {
>   #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
>   #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
>
> +#define SVM_EXITINFO_REG_MASK 0x0F
> +
>   #define	SVM_EXIT_READ_CR0 	0x000
>   #define	SVM_EXIT_READ_CR3 	0x003
>   #define	SVM_EXIT_READ_CR4 	0x004
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index c573e2d..b7233fd 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2594,12 +2594,70 @@ static int emulate_on_interception(struct vcpu_svm *svm)
>   	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
>   }
>
> +static int cr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, cr;
> +	unsigned long val;
> +
> +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST) ||
> +	    (svm->vmcb->control.exit_info_1&  (1ULL<<  63)) == 0)
> +		return emulate_on_interception(svm);
> +
> +	reg = svm->vmcb->control.exit_info_1&  SVM_EXITINFO_REG_MASK;
> +	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
> +	if (cr>  15) { /* mov to cr */
> +		val = kvm_register_read(&svm->vcpu, reg);
> +		switch (cr&  0x0F) {
> +		case 0:
> +			kvm_set_cr0(&svm->vcpu, val);
> +			break;
> +		case 3:
> +			kvm_set_cr3(&svm->vcpu, val);
> +			break;
> +		case 4:
> +			kvm_set_cr4(&svm->vcpu, val);
> +			break;
> +		case 8:
> +			kvm_set_cr8(&svm->vcpu, val&  0xfUL);
> +			break;
> +		default:
> +			WARN(1, "unhandled write to CR%d", cr&  0x0F);
> +			return EMULATE_FAIL;
> +		}
>

...

> +	}
> +	skip_emulated_instruction(&svm->vcpu);
> +
> +	return 1;
> +}
> +

Must not skip_emulated_instruction() if we inject an exception; see how 
vmx does this (handle_cr).

> @@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
>   }
>
>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
> -	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
> -	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
> +	[SVM_EXIT_READ_CR0]			= cr_interception,
> +	[SVM_EXIT_READ_CR3]			= cr_interception,
> +	[SVM_EXIT_READ_CR4]			= cr_interception,
> +	[SVM_EXIT_READ_CR8]			= cr_interception,
>   	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
>   	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
> -	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
> -	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
> +	[SVM_EXIT_WRITE_CR3]			= cr_interception,
> +	[SVM_EXIT_WRITE_CR4]			= cr_interception,
>   	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,

We could move cr[08]_write_interception into cr_interception, but that 
takes a bit more thought.  Best done later.

>   	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
>   	[SVM_EXIT_READ_DR1]			= emulate_on_interception,


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/5] kvm/svm: enhance mov DR intercept handler
  2010-12-07 10:59 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
  2010-12-07 11:02   ` Alexander Graf
@ 2010-12-07 13:25   ` Avi Kivity
  1 sibling, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 13:25 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 12:59 PM, Andre Przywara wrote:
> Newer SVM implementations provide the GPR number in the VMCB, so
> that the emulation path is no longer necesarry to handle debug
> register access intercepts. Implement the handling in svm.c and
> use it when the info is provided.
>
>
>
> +static int dr_interception(struct vcpu_svm *svm)
> +{
> +	int reg, dr;
> +	unsigned long val;
> +
> +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
> +		return emulate_on_interception(svm);
> +
> +	reg = svm->vmcb->control.exit_info_1&  SVM_EXITINFO_REG_MASK;
> +	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
> +
> +	if (dr>  15) { /* mov to DRn */
> +		val = kvm_register_read(&svm->vcpu, reg);
> +		kvm_set_dr(&svm->vcpu, dr - 16, val);
> +	} else {
> +		kvm_get_dr(&svm->vcpu, dr,&val);
> +		kvm_register_write(&svm->vcpu, reg, val);
> +	}
> +
> +	skip_emulated_instruction(&svm->vcpu);
> +	return 1;
> +}

Again, need to handle faults.


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept
  2010-12-07 10:59 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
@ 2010-12-07 13:27   ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 13:27 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 12:59 PM, Andre Przywara wrote:
> When the DecodeAssist feature is available, the linear address
> is provided in the VMCB on INVLPG intercepts. Use it directly to
> avoid any decoding and emulation.
> This is only useful for shadow paging, though.
>
> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
> ---
>   arch/x86/kvm/svm.c |    7 ++++++-
>   1 files changed, 6 insertions(+), 1 deletions(-)
>
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 369bd85..3cf2cef 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -2586,7 +2586,12 @@ static int iret_interception(struct vcpu_svm *svm)
>
>   static int invlpg_interception(struct vcpu_svm *svm)
>   {
> -	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
> +	if (!boot_cpu_has(SVM_FEATURE_DECODE_ASSIST))
> +		return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;

static_cpu_has()

> +
> +	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
> +	skip_emulated_instruction(&svm->vcpu);
> +	return 1;
>   }
>
>   static int emulate_on_interception(struct vcpu_svm *svm)


-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-07 10:59 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
@ 2010-12-07 13:33   ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 13:33 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 12:59 PM, Andre Przywara wrote:
> In case of a nested page fault or an intercepted #PF newer SVM
> implementations provide a copy of the faulting instruction bytes
> in the VMCB.
> Use these bytes to feed the instruction emulator and avoid the costly
> guest instruction fetch in this case.
>
>
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index cfbcbfa..3e3a67e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -586,6 +586,9 @@ struct kvm_x86_ops {
>   	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
>
>   	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
> +
> +	int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
> +
>   	const struct trace_print_flags *exit_reasons_str;
>   };

How about adding a byte array/len parameter to x86_decode_insn()?  It 
could be used to prefill the buffer instead of invoking a callback.

>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index 6366735..abff8ff 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -525,6 +525,7 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
>   	/* x86 instructions are limited to 15 bytes. */
>   	if (eip + size - ctxt->eip>  15)
>   		return X86EMUL_UNHANDLEABLE;
> +	kvm_x86_ops->prefetch_instruction(ctxt->vcpu);

Even with the callback, this belongs in x86_decode_insn(), not on every 
fetch.

>   	while (size--) {
>   		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
>   		if (rc != X86EMUL_CONTINUE)
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 3cf2cef..ed94e9a 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
>   	svm_set_interrupt_shadow(vcpu, 0);
>   }
>
> +static int prefetch_instruction(struct kvm_vcpu *vcpu)

svm_prefetch_instruction()

> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +	uint8_t len;
> +	struct fetch_cache *fetch;
> +
> +	len = svm->vmcb->control.insn_len&  0x0F;
> +	if (len == 0)
> +		return 1;
> +
> +	fetch =&svm->vcpu.arch.emulate_ctxt.decode.fetch;
> +	fetch->start = kvm_rip_read(&svm->vcpu);
> +	fetch->end = fetch->start + len;
> +	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
> +
> +	return 0;
> +}

svm code shouldn't reach in so deep into the emulator privates.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 13:24   ` Avi Kivity
@ 2010-12-07 14:30     ` Andre Przywara
  2010-12-07 14:41       ` Avi Kivity
  0 siblings, 1 reply; 16+ messages in thread
From: Andre Przywara @ 2010-12-07 14:30 UTC (permalink / raw)
  To: Avi Kivity; +Cc: kvm, mtosatti

Avi Kivity wrote:
> On 12/07/2010 12:59 PM, Andre Przywara wrote:
>> Newer SVM implementations provide the GPR number in the VMCB, so
>> that the emulation path is no longer necesarry to handle CR
>> register access intercepts. Implement the handling in svm.c and
>> use it when the info is provided.
>>
>> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
>> ---
>>   arch/x86/include/asm/svm.h |    2 +
>>   arch/x86/kvm/svm.c         |   74 +++++++++++++++++++++++++++++++++++++++-----
>>   2 files changed, 68 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
>> index 11dbca7..589fc25 100644
>> --- a/arch/x86/include/asm/svm.h
>> +++ b/arch/x86/include/asm/svm.h
>> ...
>> @@ -2864,14 +2922,14 @@ static int pause_interception(struct vcpu_svm *svm)
>>   }
>>
>>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
>> -	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
>> -	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
>> +	[SVM_EXIT_READ_CR0]			= cr_interception,
>> +	[SVM_EXIT_READ_CR3]			= cr_interception,
>> +	[SVM_EXIT_READ_CR4]			= cr_interception,
>> +	[SVM_EXIT_READ_CR8]			= cr_interception,
>>   	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
>>   	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
>> -	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
>> -	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
>> +	[SVM_EXIT_WRITE_CR3]			= cr_interception,
>> +	[SVM_EXIT_WRITE_CR4]			= cr_interception,
>>   	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
> 
> We could move cr[08]_write_interception into cr_interception, but that 
> takes a bit more thought.  Best done later.
Yes, I thought about that, too. But since we still have to deal with the 
emulation code path, it would not make the code easier. But if we 
overwrite the svm_exit_handlers[] on detecting the SVM feature, this 
could make more sense. I will check this out.

Thanks for the review, I will address your comments.

Regards,
Andre.




^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler
  2010-12-07 14:30     ` Andre Przywara
@ 2010-12-07 14:41       ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-07 14:41 UTC (permalink / raw)
  To: Andre Przywara; +Cc: kvm, mtosatti

On 12/07/2010 04:30 PM, Andre Przywara wrote:
> Avi Kivity wrote:
>> On 12/07/2010 12:59 PM, Andre Przywara wrote:
>>> Newer SVM implementations provide the GPR number in the VMCB, so
>>> that the emulation path is no longer necesarry to handle CR
>>> register access intercepts. Implement the handling in svm.c and
>>> use it when the info is provided.
>>>
>>> Signed-off-by: Andre Przywara<andre.przywara@amd.com>
>>> ---
>>>   arch/x86/include/asm/svm.h |    2 +
>>>   arch/x86/kvm/svm.c         |   74 
>>> +++++++++++++++++++++++++++++++++++++++-----
>>>   2 files changed, 68 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
>>> index 11dbca7..589fc25 100644
>>> --- a/arch/x86/include/asm/svm.h
>>> +++ b/arch/x86/include/asm/svm.h
>>> ...
>>> @@ -2864,14 +2922,14 @@ static int pause_interception(struct 
>>> vcpu_svm *svm)
>>>   }
>>>
>>>   static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
>>> -    [SVM_EXIT_READ_CR0]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR3]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR4]            = emulate_on_interception,
>>> -    [SVM_EXIT_READ_CR8]            = emulate_on_interception,
>>> +    [SVM_EXIT_READ_CR0]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR3]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR4]            = cr_interception,
>>> +    [SVM_EXIT_READ_CR8]            = cr_interception,
>>>       [SVM_EXIT_CR0_SEL_WRITE]        = emulate_on_interception,
>>>       [SVM_EXIT_WRITE_CR0]            = cr0_write_interception,
>>> -    [SVM_EXIT_WRITE_CR3]            = emulate_on_interception,
>>> -    [SVM_EXIT_WRITE_CR4]            = emulate_on_interception,
>>> +    [SVM_EXIT_WRITE_CR3]            = cr_interception,
>>> +    [SVM_EXIT_WRITE_CR4]            = cr_interception,
>>>       [SVM_EXIT_WRITE_CR8]            = cr8_write_interception,
>>
>> We could move cr[08]_write_interception into cr_interception, but 
>> that takes a bit more thought.  Best done later.
> Yes, I thought about that, too. But since we still have to deal with 
> the emulation code path, it would not make the code easier. But if we 
> overwrite the svm_exit_handlers[] on detecting the SVM feature, this 
> could make more sense. I will check this out.
>

In fact the correct thing is to make sure kvm_set_cr0() and 
kvm_set_cr8() do the right thing, so that the emulation path invoked 
directly (not through a cr intercept) works.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
@ 2010-12-13 12:24   ` Avi Kivity
  0 siblings, 0 replies; 16+ messages in thread
From: Avi Kivity @ 2010-12-13 12:24 UTC (permalink / raw)
  To: Andre Przywara; +Cc: mtosatti, kvm

On 12/10/2010 03:51 PM, Andre Przywara wrote:
> In case of a nested page fault or an intercepted #PF newer SVM
> implementations provide a copy of the faulting instruction bytes
> in the VMCB.
> Use these bytes to feed the instruction emulator and avoid the costly
> guest instruction fetch in this case.
>
>
>
> +static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +	uint8_t len;
> +	struct fetch_cache *fetch;
> +
> +	len = svm->vmcb->control.insn_len&  0x0F;
> +	if (len == 0)
> +		return 1;
> +
> +	fetch =&svm->vcpu.arch.emulate_ctxt.decode.fetch;
> +	fetch->start = kvm_rip_read(&svm->vcpu);
> +	fetch->end = fetch->start + len;
> +	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
> +
> +	return 0;
> +}

This reaching in into the emulator internals from svm code is not very 
good.  It also assumes ->prefetch_instruction() is called immediately 
after an exit; this isn't true in vmx and at least was considered for 
svm (emulating multiple instructions during the nsvm vmexit sequence).

Alternatives are:
- add the insn data to emulate_instruction() and friends (my first 
suggestion)
- adding x86_decode_insn_init(), which initializes the decode cache, and 
x86_decode_insn_prefill_cache(), called only if we have the insn data

Another one: teach kvm_fetch_guest_virt() to check if addr/bytes 
intersects with csbase+rip/len; if so, use that instead of doing the 
page table dance.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB
  2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
@ 2010-12-10 13:51 ` Andre Przywara
  2010-12-13 12:24   ` Avi Kivity
  0 siblings, 1 reply; 16+ messages in thread
From: Andre Przywara @ 2010-12-10 13:51 UTC (permalink / raw)
  To: avi; +Cc: mtosatti, kvm, Andre Przywara

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara <andre.przywara@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    3 +++
 arch/x86/include/asm/svm.h      |    4 +++-
 arch/x86/kvm/emulate.c          |    1 +
 arch/x86/kvm/svm.c              |   20 ++++++++++++++++++++
 arch/x86/kvm/vmx.c              |    7 +++++++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2b89195..baaf063 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+	int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
 	const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 	u64 lbr_ctl;
 	u64 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..70385ee 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2623,6 +2623,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)
 	c->eip = ctxt->eip;
 	c->fetch.start = c->fetch.end = c->eip;
 	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
+	kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
 
 	switch (mode) {
 	case X86EMUL_MODE_REAL:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 73f1a6d..685b264 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+	uint8_t len;
+	struct fetch_cache *fetch;
+
+	len = svm->vmcb->control.insn_len & 0x0F;
+	if (len == 0)
+		return 1;
+
+	fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+	fetch->start = kvm_rip_read(&svm->vcpu);
+	fetch->end = fetch->start + len;
+	memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+	return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -3848,6 +3866,8 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.adjust_tsc_offset = svm_adjust_tsc_offset,
 
 	.set_tdp_cr3 = set_tdp_cr3,
+
+	.prefetch_instruction = svm_prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e5ef924..7572751 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int vmx_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.adjust_tsc_offset = vmx_adjust_tsc_offset,
 
 	.set_tdp_cr3 = vmx_set_cr3,
+
+	.prefetch_instruction = vmx_prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4



^ permalink raw reply related	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2010-12-13 12:24 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-12-07 10:59 [PATCH 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-07 10:59 ` [PATCH 1/5] kvm/svm: add new SVM feature bit names Andre Przywara
2010-12-07 10:59 ` [PATCH 2/5] kvm/svm: enhance MOV CR intercept handler Andre Przywara
2010-12-07 13:24   ` Avi Kivity
2010-12-07 14:30     ` Andre Przywara
2010-12-07 14:41       ` Avi Kivity
2010-12-07 10:59 ` [PATCH 3/5] kvm/svm: enhance mov DR " Andre Przywara
2010-12-07 11:02   ` Alexander Graf
2010-12-07 13:14     ` Avi Kivity
2010-12-07 13:25   ` Avi Kivity
2010-12-07 10:59 ` [PATCH 4/5] kvm/svm: implement enhanced INVLPG intercept Andre Przywara
2010-12-07 13:27   ` Avi Kivity
2010-12-07 10:59 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
2010-12-07 13:33   ` Avi Kivity
2010-12-10 13:51 [PATCH -v2 0/5] kvm/svm: implement new DecodeAssist features Andre Przywara
2010-12-10 13:51 ` [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB Andre Przywara
2010-12-13 12:24   ` Avi Kivity

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.