All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
@ 2012-10-17  2:10 Will Auld
  2012-10-17 10:35 ` Avi Kivity
  0 siblings, 1 reply; 8+ messages in thread
From: Will Auld @ 2012-10-17  2:10 UTC (permalink / raw)
  To: mtosatti, avi, kvm, xiantao.zhang, jinsong.liu, will.auld

Signed-off-by: Will Auld <will.auld@intel.com>
---

Resending to full list

Marcelo,

This patch is what I believe you ask for as foundational for later
patches to address IA32_TSC_ADJUST. 

Thanks,

Will

 arch/x86/include/asm/kvm_host.h |  8 ++++----
 arch/x86/kvm/svm.c              | 18 ++++++++++--------
 arch/x86/kvm/vmx.c              | 18 ++++++++++--------
 arch/x86/kvm/x86.c              | 18 ++++++++++--------
 arch/x86/kvm/x86.h              |  2 +-
 5 files changed, 35 insertions(+), 29 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09155d6..c06f0d1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -621,7 +621,7 @@ struct kvm_x86_ops {
 	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
 				struct kvm_guest_debug *dbg);
 	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
-	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated);
 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
 	void (*get_segment)(struct kvm_vcpu *vcpu,
 			    struct kvm_segment *var, int seg);
@@ -684,7 +684,7 @@ struct kvm_x86_ops {
 	bool (*has_wbinvd_exit)(void);
 
 	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
-	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset, bool guest_initiated);
 
 	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
 	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
@@ -772,7 +772,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
 
 void kvm_enable_efer_bits(u64);
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated);
 
 struct x86_emulate_ctxt;
 
@@ -799,7 +799,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
 
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool guest_initiated);
 
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index baead95..424be27 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1012,7 +1012,8 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
 	svm->tsc_ratio             = ratio;
 }
 
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, 
+				bool guest_initiated)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 g_tsc_offset = 0;
@@ -1255,7 +1256,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
 	svm->asid_generation = 0;
 	init_vmcb(svm);
-	kvm_write_tsc(&svm->vcpu, 0);
+	kvm_write_tsc(&svm->vcpu, 0, false /*Not Guest Initiated*/);
 
 	err = fx_init(&svm->vcpu);
 	if (err)
@@ -3147,13 +3148,14 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
 	return 0;
 }
 
-static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
+static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data, 
+			bool guest_initiated)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	switch (ecx) {
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, data, guest_initiated);
 		break;
 	case MSR_STAR:
 		svm->vmcb->save.star = data;
@@ -3208,12 +3210,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
 	default:
-		return kvm_set_msr_common(vcpu, ecx, data);
+		return kvm_set_msr_common(vcpu, ecx, data, guest_initiated);
 	}
 	return 0;
 }
 
-static int wrmsr_interception(struct vcpu_svm *svm)
+static int wrmsr_interception(struct vcpu_svm *svm, bool guest_initiated)
 {
 	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
 	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
@@ -3221,7 +3223,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
 

 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-	if (svm_set_msr(&svm->vcpu, ecx, data)) {
+	if (svm_set_msr(&svm->vcpu, ecx, data, guest_initiated)) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(&svm->vcpu, 0);
 	} else {
@@ -3234,7 +3236,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
 static int msr_interception(struct vcpu_svm *svm)
 {
 	if (svm->vmcb->control.exit_info_1)
-		return wrmsr_interception(svm);
+		return wrmsr_interception(svm, true /*Guest Initiated*/);
 	else
 		return rdmsr_interception(svm);
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03d..85a9603 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1864,7 +1864,8 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
 /*
  * writes 'offset' into guest's timestamp counter offset register
  */
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, 
+				bool guest_initiated)
 {
 	if (is_guest_mode(vcpu)) {
 		/*
@@ -2197,7 +2198,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, 
+			bool guest_initiated)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct shared_msr_entry *msr;
@@ -2205,7 +2207,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
 	switch (msr_index) {
 	case MSR_EFER:
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
 		break;
 #ifdef CONFIG_X86_64
 	case MSR_FS_BASE:
@@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		vmcs_writel(GUEST_SYSENTER_ESP, data);
 		break;
 	case MSR_IA32_TSC:
-		kvm_write_tsc(vcpu, data);
+		kvm_write_tsc(vcpu, data, guest_initiated);
 		break;
 	case MSR_IA32_CR_PAT:
 		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
@@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			vcpu->arch.pat = data;
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
 		break;
 	case MSR_TSC_AUX:
 		if (!vmx->rdtscp_enabled)
@@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 			}
 			break;
 		}
-		ret = kvm_set_msr_common(vcpu, msr_index, data);
+		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
 	}
 
 	return ret;
@@ -3918,7 +3920,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
 	set_cr4_guest_host_mask(vmx);
 
-	kvm_write_tsc(&vmx->vcpu, 0);
+	kvm_write_tsc(&vmx->vcpu, 0, false /*Not Guest Initiated*/);
 
 	return 0;
 }
@@ -4653,7 +4655,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
 	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
 		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-	if (vmx_set_msr(vcpu, ecx, data) != 0) {
+	if (vmx_set_msr(vcpu, ecx, data, true /*Guest Initiated*/) != 0) {
 		trace_kvm_msr_write_ex(ecx, data);
 		kvm_inject_gp(vcpu, 0);
 		return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48..9b1263d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
  */
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated)
 {
-	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
+	return kvm_x86_ops->set_msr(vcpu, msr_index, data, guest_initiated);
 }
 
 /*
@@ -893,7 +893,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
  */
 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 {
-	return kvm_set_msr(vcpu, index, *data);
+	return kvm_set_msr(vcpu, index, *data, false /*Not Guest Initiated*/);
 }
 
 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
@@ -1043,7 +1043,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 	return tsc;
 }
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
@@ -1126,7 +1126,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
 
-	kvm_x86_ops->write_tsc_offset(vcpu, offset);
+	kvm_x86_ops->write_tsc_offset(vcpu, offset, guest_initiated);
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
@@ -1561,7 +1561,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, 
+			bool guest_initiated)
 {
 	bool pr = false;
 
@@ -2324,7 +2325,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		if (check_tsc_unstable()) {
 			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
 						vcpu->arch.last_guest_tsc);
-			kvm_x86_ops->write_tsc_offset(vcpu, offset);
+			kvm_x86_ops->write_tsc_offset(vcpu, offset, 
+						false /*Not Guest Initiated*/);
 			vcpu->arch.tsc_catchup = 1;
 		}
 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -4286,7 +4288,7 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 			    u32 msr_index, u64 data)
 {
-	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
+	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data, false /*Not Guest Initiated*/);
 }
 
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 3d1134d..241f62c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated);
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
 	gva_t addr, void *val, unsigned int bytes,
-- 
1.8.0.rc0




^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-17  2:10 [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere Will Auld
@ 2012-10-17 10:35 ` Avi Kivity
  2012-10-17 14:09   ` Marcelo Tosatti
  0 siblings, 1 reply; 8+ messages in thread
From: Avi Kivity @ 2012-10-17 10:35 UTC (permalink / raw)
  To: will.auld; +Cc: Will Auld, mtosatti, kvm, xiantao.zhang, jinsong.liu

On 10/17/2012 04:10 AM, Will Auld wrote:
> Signed-off-by: Will Auld <will.auld@intel.com>
> ---
> 
> Resending to full list
> 
> Marcelo,
> 
> This patch is what I believe you ask for as foundational for later
> patches to address IA32_TSC_ADJUST. 
> 

Please write a changelog to reflect the motivation.

All those bool parameters scattered all over the place aren't very
pretty.  Usually we solve this with helpers that embed the parameter
name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many
functions for this to work here.

Marcelo, any ideas?

> Thanks,
> 
> Will
> 
>  arch/x86/include/asm/kvm_host.h |  8 ++++----
>  arch/x86/kvm/svm.c              | 18 ++++++++++--------
>  arch/x86/kvm/vmx.c              | 18 ++++++++++--------
>  arch/x86/kvm/x86.c              | 18 ++++++++++--------
>  arch/x86/kvm/x86.h              |  2 +-
>  5 files changed, 35 insertions(+), 29 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 09155d6..c06f0d1 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -621,7 +621,7 @@ struct kvm_x86_ops {
>  	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
>  				struct kvm_guest_debug *dbg);
>  	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
> -	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated);
>  	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
>  	void (*get_segment)(struct kvm_vcpu *vcpu,
>  			    struct kvm_segment *var, int seg);
> @@ -684,7 +684,7 @@ struct kvm_x86_ops {
>  	bool (*has_wbinvd_exit)(void);
>  
>  	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
> -	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
> +	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset, bool guest_initiated);
>  
>  	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
>  	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
> @@ -772,7 +772,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
>  
>  void kvm_enable_efer_bits(u64);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
> +int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated);
>  
>  struct x86_emulate_ctxt;
>  
> @@ -799,7 +799,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
>  int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
>  
>  int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool guest_initiated);
>  
>  unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
>  void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index baead95..424be27 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -1012,7 +1012,8 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
>  	svm->tsc_ratio             = ratio;
>  }
>  
> -static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> +static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, 
> +				bool guest_initiated)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	u64 g_tsc_offset = 0;
> @@ -1255,7 +1256,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
>  	svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
>  	svm->asid_generation = 0;
>  	init_vmcb(svm);
> -	kvm_write_tsc(&svm->vcpu, 0);
> +	kvm_write_tsc(&svm->vcpu, 0, false /*Not Guest Initiated*/);
>  
>  	err = fx_init(&svm->vcpu);
>  	if (err)
> @@ -3147,13 +3148,14 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
>  	return 0;
>  }
>  
> -static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
> +static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data, 
> +			bool guest_initiated)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  
>  	switch (ecx) {
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, data, guest_initiated);
>  		break;
>  	case MSR_STAR:
>  		svm->vmcb->save.star = data;
> @@ -3208,12 +3210,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
>  		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
>  		break;
>  	default:
> -		return kvm_set_msr_common(vcpu, ecx, data);
> +		return kvm_set_msr_common(vcpu, ecx, data, guest_initiated);
>  	}
>  	return 0;
>  }
>  
> -static int wrmsr_interception(struct vcpu_svm *svm)
> +static int wrmsr_interception(struct vcpu_svm *svm, bool guest_initiated)
>  {
>  	u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
>  	u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
> @@ -3221,7 +3223,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
>  
> 
>  	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
> -	if (svm_set_msr(&svm->vcpu, ecx, data)) {
> +	if (svm_set_msr(&svm->vcpu, ecx, data, guest_initiated)) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(&svm->vcpu, 0);
>  	} else {
> @@ -3234,7 +3236,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
>  static int msr_interception(struct vcpu_svm *svm)
>  {
>  	if (svm->vmcb->control.exit_info_1)
> -		return wrmsr_interception(svm);
> +		return wrmsr_interception(svm, true /*Guest Initiated*/);
>  	else
>  		return rdmsr_interception(svm);
>  }
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index c00f03d..85a9603 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -1864,7 +1864,8 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
>  /*
>   * writes 'offset' into guest's timestamp counter offset register
>   */
> -static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
> +static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, 
> +				bool guest_initiated)
>  {
>  	if (is_guest_mode(vcpu)) {
>  		/*
> @@ -2197,7 +2198,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, 
> +			bool guest_initiated)
>  {
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  	struct shared_msr_entry *msr;
> @@ -2205,7 +2207,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  
>  	switch (msr_index) {
>  	case MSR_EFER:
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
>  		break;
>  #ifdef CONFIG_X86_64
>  	case MSR_FS_BASE:
> @@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  		vmcs_writel(GUEST_SYSENTER_ESP, data);
>  		break;
>  	case MSR_IA32_TSC:
> -		kvm_write_tsc(vcpu, data);
> +		kvm_write_tsc(vcpu, data, guest_initiated);
>  		break;
>  	case MSR_IA32_CR_PAT:
>  		if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
> @@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			vcpu->arch.pat = data;
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
>  		break;
>  	case MSR_TSC_AUX:
>  		if (!vmx->rdtscp_enabled)
> @@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>  			}
>  			break;
>  		}
> -		ret = kvm_set_msr_common(vcpu, msr_index, data);
> +		ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated);
>  	}
>  
>  	return ret;
> @@ -3918,7 +3920,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  	vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
>  	set_cr4_guest_host_mask(vmx);
>  
> -	kvm_write_tsc(&vmx->vcpu, 0);
> +	kvm_write_tsc(&vmx->vcpu, 0, false /*Not Guest Initiated*/);
>  
>  	return 0;
>  }
> @@ -4653,7 +4655,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
>  	u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
>  		| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
>  
> -	if (vmx_set_msr(vcpu, ecx, data) != 0) {
> +	if (vmx_set_msr(vcpu, ecx, data, true /*Guest Initiated*/) != 0) {
>  		trace_kvm_msr_write_ex(ecx, data);
>  		kvm_inject_gp(vcpu, 0);
>  		return 1;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 42bce48..9b1263d 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
>   * Returns 0 on success, non-0 otherwise.
>   * Assumes vcpu_load() was already called.
>   */
> -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
> +int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated)
>  {
> -	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
> +	return kvm_x86_ops->set_msr(vcpu, msr_index, data, guest_initiated);
>  }
>  
>  /*
> @@ -893,7 +893,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
>   */
>  static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
>  {
> -	return kvm_set_msr(vcpu, index, *data);
> +	return kvm_set_msr(vcpu, index, *data, false /*Not Guest Initiated*/);
>  }
>  
>  static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
> @@ -1043,7 +1043,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
>  	return tsc;
>  }
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated)
>  {
>  	struct kvm *kvm = vcpu->kvm;
>  	u64 offset, ns, elapsed;
> @@ -1126,7 +1126,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
>  	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
>  	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
>  
> -	kvm_x86_ops->write_tsc_offset(vcpu, offset);
> +	kvm_x86_ops->write_tsc_offset(vcpu, offset, guest_initiated);
>  	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
>  }
>  
> @@ -1561,7 +1561,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
> +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, 
> +			bool guest_initiated)
>  {
>  	bool pr = false;
>  
> @@ -2324,7 +2325,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  		if (check_tsc_unstable()) {
>  			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
>  						vcpu->arch.last_guest_tsc);
> -			kvm_x86_ops->write_tsc_offset(vcpu, offset);
> +			kvm_x86_ops->write_tsc_offset(vcpu, offset, 
> +						false /*Not Guest Initiated*/);
>  			vcpu->arch.tsc_catchup = 1;
>  		}
>  		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
> @@ -4286,7 +4288,7 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
>  static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>  			    u32 msr_index, u64 data)
>  {
> -	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
> +	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data, false /*Not Guest Initiated*/);
>  }
>  
>  static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 3d1134d..241f62c 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
>  void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
>  int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
>  
> -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
> +void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated);
>  
>  int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
>  	gva_t addr, void *val, unsigned int bytes,
> 


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-17 10:35 ` Avi Kivity
@ 2012-10-17 14:09   ` Marcelo Tosatti
  2012-10-17 14:28     ` Avi Kivity
  2012-10-17 22:08     ` Auld, Will
  0 siblings, 2 replies; 8+ messages in thread
From: Marcelo Tosatti @ 2012-10-17 14:09 UTC (permalink / raw)
  To: Avi Kivity; +Cc: will.auld, Will Auld, kvm, xiantao.zhang, jinsong.liu

On Wed, Oct 17, 2012 at 12:35:33PM +0200, Avi Kivity wrote:
> On 10/17/2012 04:10 AM, Will Auld wrote:
> > Signed-off-by: Will Auld <will.auld@intel.com>
> > ---
> > 
> > Resending to full list
> > 
> > Marcelo,
> > 
> > This patch is what I believe you ask for as foundational for later
> > patches to address IA32_TSC_ADJUST. 
> > 
> 
> Please write a changelog to reflect the motivation.
> 
> All those bool parameters scattered all over the place aren't very
> pretty.  Usually we solve this with helpers that embed the parameter
> name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many
> functions for this to work here.
> 
> Marcelo, any ideas?

Its easier to read

kvm_x86_ops->kvm_set_msr()
kvm_x86_ops->kvm_set_msr_host()

then

kvm_x86_ops->kvm_set_msr(,false)
kvm_x86_ops->kvm_set_msr(,true)

So you're right.


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-17 14:09   ` Marcelo Tosatti
@ 2012-10-17 14:28     ` Avi Kivity
  2012-10-22 21:58       ` Will Auld
  2012-10-17 22:08     ` Auld, Will
  1 sibling, 1 reply; 8+ messages in thread
From: Avi Kivity @ 2012-10-17 14:28 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: will.auld, Will Auld, kvm, xiantao.zhang, jinsong.liu

On 10/17/2012 04:09 PM, Marcelo Tosatti wrote:
> On Wed, Oct 17, 2012 at 12:35:33PM +0200, Avi Kivity wrote:
>> On 10/17/2012 04:10 AM, Will Auld wrote:
>> > Signed-off-by: Will Auld <will.auld@intel.com>
>> > ---
>> > 
>> > Resending to full list
>> > 
>> > Marcelo,
>> > 
>> > This patch is what I believe you ask for as foundational for later
>> > patches to address IA32_TSC_ADJUST. 
>> > 
>> 
>> Please write a changelog to reflect the motivation.
>> 
>> All those bool parameters scattered all over the place aren't very
>> pretty.  Usually we solve this with helpers that embed the parameter
>> name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many
>> functions for this to work here.
>> 
>> Marcelo, any ideas?
> 
> Its easier to read
> 
> kvm_x86_ops->kvm_set_msr()
> kvm_x86_ops->kvm_set_msr_host()
> 
> then
> 
> kvm_x86_ops->kvm_set_msr(,false)
> kvm_x86_ops->kvm_set_msr(,true)
> 
> So you're right.

Yes, but we have a million functions for setting MSRs.

Maybe

struct msr {
    bool host_requested;
    u32 index;
    u64 data;
};

and change all the APIs to use that.


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-17 14:09   ` Marcelo Tosatti
  2012-10-17 14:28     ` Avi Kivity
@ 2012-10-17 22:08     ` Auld, Will
  1 sibling, 0 replies; 8+ messages in thread
From: Auld, Will @ 2012-10-17 22:08 UTC (permalink / raw)
  To: Marcelo Tosatti, Avi Kivity; +Cc: Will Auld, kvm, Zhang, Xiantao, Liu, Jinsong

OK, agreed it is not pretty. 

Thanks,

Will

> -----Original Message-----
> From: Marcelo Tosatti [mailto:mtosatti@redhat.com]
> Sent: Wednesday, October 17, 2012 7:09 AM
> To: Avi Kivity
> Cc: Auld, Will; Will Auld; kvm@vger.kernel.org; Zhang, Xiantao; Liu,
> Jinsong
> Subject: Re: [PATCH] Added call parameter to track whether invocation
> originated with guest or elsewhere
> 
> On Wed, Oct 17, 2012 at 12:35:33PM +0200, Avi Kivity wrote:
> > On 10/17/2012 04:10 AM, Will Auld wrote:
> > > Signed-off-by: Will Auld <will.auld@intel.com>
> > > ---
> > >
> > > Resending to full list
> > >
> > > Marcelo,
> > >
> > > This patch is what I believe you ask for as foundational for later
> > > patches to address IA32_TSC_ADJUST.
> > >
> >
> > Please write a changelog to reflect the motivation.
> >
> > All those bool parameters scattered all over the place aren't very
> > pretty.  Usually we solve this with helpers that embed the parameter
> > name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many
> > functions for this to work here.
> >
> > Marcelo, any ideas?
> 
> Its easier to read
> 
> kvm_x86_ops->kvm_set_msr()
> kvm_x86_ops->kvm_set_msr_host()
> 
> then
> 
> kvm_x86_ops->kvm_set_msr(,false)
> kvm_x86_ops->kvm_set_msr(,true)
> 
> So you're right.


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-17 14:28     ` Avi Kivity
@ 2012-10-22 21:58       ` Will Auld
  2012-10-23 19:56         ` Auld, Will
  0 siblings, 1 reply; 8+ messages in thread
From: Will Auld @ 2012-10-22 21:58 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, will.auld, kvm, xiantao.zhang, jinsong.liu

On Wed, 2012-10-17 at 16:28 +0200, Avi Kivity wrote:
> On 10/17/2012 04:09 PM, Marcelo Tosatti wrote:
> > On Wed, Oct 17, 2012 at 12:35:33PM +0200, Avi Kivity wrote:
> >> On 10/17/2012 04:10 AM, Will Auld wrote:
> >> > Signed-off-by: Will Auld <will.auld@intel.com>
> >> > ---
> >> > 
> >> > Resending to full list
> >> > 
> >> > Marcelo,
> >> > 
> >> > This patch is what I believe you ask for as foundational for later
> >> > patches to address IA32_TSC_ADJUST. 
> >> > 
> >> 
> >> Please write a changelog to reflect the motivation.
> >> 
> >> All those bool parameters scattered all over the place aren't very
> >> pretty.  Usually we solve this with helpers that embed the parameter
> >> name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many
> >> functions for this to work here.
> >> 
> >> Marcelo, any ideas?
> > 
> > Its easier to read
> > 
> > kvm_x86_ops->kvm_set_msr()
> > kvm_x86_ops->kvm_set_msr_host()
> > 
> > then
> > 
> > kvm_x86_ops->kvm_set_msr(,false)
> > kvm_x86_ops->kvm_set_msr(,true)
> > 
> > So you're right.
> 
> Yes, but we have a million functions for setting MSRs.
> 
> Maybe
> 
> struct msr {
>     bool host_requested;
>     u32 index;
>     u64 data;
> };
> 
> and change all the APIs to use that.
> 
> 

I was working on a different solution and then saw this suggestion just
now. I like this tact and will look at it closer. 

Thanks,

Will


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-22 21:58       ` Will Auld
@ 2012-10-23 19:56         ` Auld, Will
  2012-10-26 20:48           ` Marcelo Tosatti
  0 siblings, 1 reply; 8+ messages in thread
From: Auld, Will @ 2012-10-23 19:56 UTC (permalink / raw)
  To: Auld, Will, Avi Kivity; +Cc: Marcelo Tosatti, kvm, Zhang, Xiantao, Liu, Jinsong

Having looked closer at the tacked of changing out the index and data fields in some
function calls for a struct parameter with these and a originator field (host or guest)
it is less attractive than I thought it would be. The only place where we need to know the initiator is in kvm_write_tsc() which has an implicit index.

I have been trying to determine whether there is a possibility for taking a context switch while a guest initiated set_msr() is in progress whereby the new thread might invoke the set_msr()/kvm_write_tsc() routines. It looks to me like this is not possible but I can't be sure. If it is not possible we can set a variable for the vcpu when a guest call is in progress and this would be sufficient. 

What do you think?

Thanks,

Will

> -----Original Message-----
> From: Will Auld [mailto:will.auld.intel@gmail.com]
> Sent: Monday, October 22, 2012 2:58 PM
> To: Avi Kivity
> Cc: Marcelo Tosatti; Auld, Will; kvm@vger.kernel.org; Zhang, Xiantao;
> Liu, Jinsong
> Subject: Re: [PATCH] Added call parameter to track whether invocation
> originated with guest or elsewhere
> 
> On Wed, 2012-10-17 at 16:28 +0200, Avi Kivity wrote:
> > On 10/17/2012 04:09 PM, Marcelo Tosatti wrote:
> > > On Wed, Oct 17, 2012 at 12:35:33PM +0200, Avi Kivity wrote:
> > >> On 10/17/2012 04:10 AM, Will Auld wrote:
> > >> > Signed-off-by: Will Auld <will.auld@intel.com>
> > >> > ---
> > >> >
> > >> > Resending to full list
> > >> >
> > >> > Marcelo,
> > >> >
> > >> > This patch is what I believe you ask for as foundational for
> > >> > later patches to address IA32_TSC_ADJUST.
> > >> >
> > >>
> > >> Please write a changelog to reflect the motivation.
> > >>
> > >> All those bool parameters scattered all over the place aren't very
> > >> pretty.  Usually we solve this with helpers that embed the
> > >> parameter name (kvm_set_msr() vs. kvm_set_msr_host()) but there
> are
> > >> too many functions for this to work here.
> > >>
> > >> Marcelo, any ideas?
> > >
> > > Its easier to read
> > >
> > > kvm_x86_ops->kvm_set_msr()
> > > kvm_x86_ops->kvm_set_msr_host()
> > >
> > > then
> > >
> > > kvm_x86_ops->kvm_set_msr(,false)
> > > kvm_x86_ops->kvm_set_msr(,true)
> > >
> > > So you're right.
> >
> > Yes, but we have a million functions for setting MSRs.
> >
> > Maybe
> >
> > struct msr {
> >     bool host_requested;
> >     u32 index;
> >     u64 data;
> > };
> >
> > and change all the APIs to use that.
> >
> >
> 
> I was working on a different solution and then saw this suggestion just
> now. I like this tact and will look at it closer.
> 
> Thanks,
> 
> Will


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere
  2012-10-23 19:56         ` Auld, Will
@ 2012-10-26 20:48           ` Marcelo Tosatti
  0 siblings, 0 replies; 8+ messages in thread
From: Marcelo Tosatti @ 2012-10-26 20:48 UTC (permalink / raw)
  To: Auld, Will; +Cc: Avi Kivity, kvm, Zhang, Xiantao, Liu, Jinsong

On Tue, Oct 23, 2012 at 07:56:54PM +0000, Auld, Will wrote:
> Having looked closer at the tacked of changing out the index and data fields in some
> function calls for a struct parameter with these and a originator field (host or guest)
> it is less attractive than I thought it would be. The only place where we need to know the initiator is in kvm_write_tsc() which has an implicit index.

At the moment yes, but it might have other uses in the future.

> I have been trying to determine whether there is a possibility for taking a context switch while a guest initiated set_msr() is in progress whereby the new thread might invoke the set_msr()/kvm_write_tsc() routines. It looks to me like this is not possible but I can't be sure. 

It is not possible.

> If it is not possible we can set a variable for the vcpu when a guest call is in progress and this would be sufficient. 
>
> What do you think?
> Thanks,

The struct parameter seems the preferred choice as there might be other
uses to this information in the future.

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2012-10-26 20:55 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-10-17  2:10 [PATCH] Added call parameter to track whether invocation originated with guest or elsewhere Will Auld
2012-10-17 10:35 ` Avi Kivity
2012-10-17 14:09   ` Marcelo Tosatti
2012-10-17 14:28     ` Avi Kivity
2012-10-22 21:58       ` Will Auld
2012-10-23 19:56         ` Auld, Will
2012-10-26 20:48           ` Marcelo Tosatti
2012-10-17 22:08     ` Auld, Will

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.