All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
@ 2020-01-21 13:48 Paolo Bonzini
  2020-01-24  8:00 ` Xiaoyao Li
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Paolo Bonzini @ 2020-01-21 13:48 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Jim Mattson

If the guest is configured to have SPEC_CTRL but the host does not
(which is a nonsensical configuration but these are not explicitly
forbidden) then a host-initiated MSR write can write vmx->spec_ctrl
(respectively svm->spec_ctrl) and trigger a #GP when KVM tries to
restore the host value of the MSR.  Add a more comprehensive check
for valid bits of SPEC_CTRL, covering host CPUID flags and,
since we are at it and it is more correct that way, guest CPUID
flags too.

For AMD, remove the unnecessary is_guest_mode check around setting
the MSR interception bitmap, so that the code looks the same as
for Intel.

Cc: Jim Mattson <jmattson@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm.c     |  9 +++------
 arch/x86/kvm/vmx/vmx.c |  7 +++----
 arch/x86/kvm/x86.c     | 22 ++++++++++++++++++++++
 arch/x86/kvm/x86.h     |  1 +
 4 files changed, 29 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b7c5369c7998..235a7e51de96 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4324,12 +4324,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
 			return 1;
 
-		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
 			return 1;
 
 		svm->spec_ctrl = data;
-
 		if (!data)
 			break;
 
@@ -4353,13 +4351,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 		if (data & ~PRED_CMD_IBPB)
 			return 1;
-
+		if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
+			return 1;
 		if (!data)
 			break;
 
 		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-		if (is_guest_mode(vcpu))
-			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
 		break;
 	case MSR_AMD64_VIRT_SPEC_CTRL:
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index bdbf27e92851..112d2314231d 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1998,12 +1998,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
 			return 1;
 
-		/* The STIBP bit doesn't fault even if it's not advertised */
-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
 			return 1;
 
 		vmx->spec_ctrl = data;
-
 		if (!data)
 			break;
 
@@ -2037,7 +2035,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 		if (data & ~PRED_CMD_IBPB)
 			return 1;
-
+		if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+			return 1;
 		if (!data)
 			break;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f24f5d16854..141fb129c6bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10389,6 +10389,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
 
+bool kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
+{
+	uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
+
+	/* The STIBP bit doesn't fault even if it's not advertised */
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+	    !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+
+	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+		bits &= ~SPEC_CTRL_SSBD;
+	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+	    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+		bits &= ~SPEC_CTRL_SSBD;
+
+	return bits;
+}
+EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index ab715cee3653..bc38ac695776 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -367,5 +367,6 @@ static inline bool kvm_pat_valid(u64 data)
 
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
+bool kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
 
 #endif
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
  2020-01-21 13:48 [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL Paolo Bonzini
@ 2020-01-24  8:00 ` Xiaoyao Li
  2020-01-24  8:22   ` Paolo Bonzini
  2020-01-25  1:32 ` kbuild test robot
  2020-02-18  7:11 ` kbuild test robot
  2 siblings, 1 reply; 5+ messages in thread
From: Xiaoyao Li @ 2020-01-24  8:00 UTC (permalink / raw)
  To: Paolo Bonzini, linux-kernel, kvm; +Cc: Jim Mattson

On 1/21/2020 9:48 PM, Paolo Bonzini wrote:
> If the guest is configured to have SPEC_CTRL but the host does not
> (which is a nonsensical configuration but these are not explicitly
> forbidden) then a host-initiated MSR write can write vmx->spec_ctrl
> (respectively svm->spec_ctrl) and trigger a #GP when KVM tries to
> restore the host value of the MSR.  Add a more comprehensive check
> for valid bits of SPEC_CTRL, covering host CPUID flags and,
> since we are at it and it is more correct that way, guest CPUID
> flags too.
> 
> For AMD, remove the unnecessary is_guest_mode check around setting
> the MSR interception bitmap, so that the code looks the same as
> for Intel.
> 
> Cc: Jim Mattson <jmattson@redhat.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>   arch/x86/kvm/svm.c     |  9 +++------
>   arch/x86/kvm/vmx/vmx.c |  7 +++----
>   arch/x86/kvm/x86.c     | 22 ++++++++++++++++++++++
>   arch/x86/kvm/x86.h     |  1 +
>   4 files changed, 29 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index b7c5369c7998..235a7e51de96 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -4324,12 +4324,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>   		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
>   			return 1;
>   
> -		/* The STIBP bit doesn't fault even if it's not advertised */
> -		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
> +		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
>   			return 1;
>   
>   		svm->spec_ctrl = data;
> -
>   		if (!data)
>   			break;
>   
> @@ -4353,13 +4351,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>   
>   		if (data & ~PRED_CMD_IBPB)
>   			return 1;
> -
> +		if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
> +			return 1;
>   		if (!data)
>   			break;
>   
>   		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
> -		if (is_guest_mode(vcpu))
> -			break;
>   		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
>   		break;
>   	case MSR_AMD64_VIRT_SPEC_CTRL:
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index bdbf27e92851..112d2314231d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -1998,12 +1998,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
>   			return 1;
>   
> -		/* The STIBP bit doesn't fault even if it's not advertised */
> -		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
> +		if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
>   			return 1;
>   
>   		vmx->spec_ctrl = data;
> -
>   		if (!data)
>   			break;
>   
> @@ -2037,7 +2035,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   
>   		if (data & ~PRED_CMD_IBPB)
>   			return 1;
> -
> +		if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
> +			return 1;
>   		if (!data)
>   			break;
>   
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 9f24f5d16854..141fb129c6bf 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10389,6 +10389,28 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
>   }
>   EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
>   
> +bool kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)

The return type should be u64.

> +{
> +	uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
> +
> +	/* The STIBP bit doesn't fault even if it's not advertised */
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
> +	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
> +		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
> +	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
> +	    !boot_cpu_has(X86_FEATURE_AMD_IBRS))
> +		bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
> +
> +	if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
> +	    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
> +		bits &= ~SPEC_CTRL_SSBD;
> +	if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
> +	    !boot_cpu_has(X86_FEATURE_AMD_SSBD))
> +		bits &= ~SPEC_CTRL_SSBD;
> +
> +	return bits;
> +}
> +EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
>   
>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
>   EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index ab715cee3653..bc38ac695776 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -367,5 +367,6 @@ static inline bool kvm_pat_valid(u64 data)
>   
>   void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
>   void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
> +bool kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
>   
>   #endif
> 


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
  2020-01-24  8:00 ` Xiaoyao Li
@ 2020-01-24  8:22   ` Paolo Bonzini
  0 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2020-01-24  8:22 UTC (permalink / raw)
  To: Xiaoyao Li, linux-kernel, kvm; +Cc: Jim Mattson

On 24/01/20 09:00, Xiaoyao Li wrote:
>>
>>   +bool kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
> 
> The return type should be u64.

Ugh... Thanks.

Paolo


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
  2020-01-21 13:48 [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL Paolo Bonzini
  2020-01-24  8:00 ` Xiaoyao Li
@ 2020-01-25  1:32 ` kbuild test robot
  2020-02-18  7:11 ` kbuild test robot
  2 siblings, 0 replies; 5+ messages in thread
From: kbuild test robot @ 2020-01-25  1:32 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 9885 bytes --]

Hi Paolo,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/auto-latest]
[also build test WARNING on linux/master linus/master v5.5-rc7 next-20200121]
[cannot apply to kvm/linux-next]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Paolo-Bonzini/KVM-x86-avoid-incorrect-writes-to-host-MSR_IA32_SPEC_CTRL/20200124-083109
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 1e53251a964b1875f82a071c0b59d135dd0cc563
config: x86_64-randconfig-g003-20200125 (attached as .config)
compiler: gcc-7 (Debian 7.5.0-3) 7.5.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   In file included from include/linux/export.h:43:0,
                    from include/linux/linkage.h:7,
                    from include/linux/preempt.h:10,
                    from include/linux/hardirq.h:5,
                    from include/linux/kvm_host.h:7,
                    from arch/x86//kvm/svm.c:17:
   arch/x86//kvm/svm.c: In function 'svm_set_msr':
   arch/x86//kvm/svm.c:4284:14: warning: '~' on a boolean expression [-Wbool-operation]
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:58:52: note: in definition of macro '__trace_if_var'
    #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
                                                       ^~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~
   arch/x86//kvm/svm.c:4284:14: note: did you mean to use logical not?
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:58:52: note: in definition of macro '__trace_if_var'
    #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
                                                       ^~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~
   arch/x86//kvm/svm.c:4284:14: warning: '~' on a boolean expression [-Wbool-operation]
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:58:61: note: in definition of macro '__trace_if_var'
    #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
                                                                ^~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~
   arch/x86//kvm/svm.c:4284:14: note: did you mean to use logical not?
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:58:61: note: in definition of macro '__trace_if_var'
    #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
                                                                ^~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~
   arch/x86//kvm/svm.c:4284:14: warning: '~' on a boolean expression [-Wbool-operation]
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:69:3: note: in definition of macro '__trace_if_value'
     (cond) ?     \
      ^~~~
   include/linux/compiler.h:56:28: note: in expansion of macro '__trace_if_var'
    #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
                               ^~~~~~~~~~~~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~
   arch/x86//kvm/svm.c:4284:14: note: did you mean to use logical not?
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   include/linux/compiler.h:69:3: note: in definition of macro '__trace_if_value'
     (cond) ?     \
      ^~~~
   include/linux/compiler.h:56:28: note: in expansion of macro '__trace_if_var'
    #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
                               ^~~~~~~~~~~~~~
>> arch/x86//kvm/svm.c:4284:3: note: in expansion of macro 'if'
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
      ^~

vim +/if +4284 arch/x86//kvm/svm.c

  4263	
  4264	static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
  4265	{
  4266		struct vcpu_svm *svm = to_svm(vcpu);
  4267	
  4268		u32 ecx = msr->index;
  4269		u64 data = msr->data;
  4270		switch (ecx) {
  4271		case MSR_IA32_CR_PAT:
  4272			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
  4273				return 1;
  4274			vcpu->arch.pat = data;
  4275			svm->vmcb->save.g_pat = data;
  4276			mark_dirty(svm->vmcb, VMCB_NPT);
  4277			break;
  4278		case MSR_IA32_SPEC_CTRL:
  4279			if (!msr->host_initiated &&
  4280			    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
  4281			    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
  4282				return 1;
  4283	
> 4284			if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
  4285				return 1;
  4286	
  4287			svm->spec_ctrl = data;
  4288			if (!data)
  4289				break;
  4290	
  4291			/*
  4292			 * For non-nested:
  4293			 * When it's written (to non-zero) for the first time, pass
  4294			 * it through.
  4295			 *
  4296			 * For nested:
  4297			 * The handling of the MSR bitmap for L2 guests is done in
  4298			 * nested_svm_vmrun_msrpm.
  4299			 * We update the L1 MSR bit as well since it will end up
  4300			 * touching the MSR anyway now.
  4301			 */
  4302			set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
  4303			break;
  4304		case MSR_IA32_PRED_CMD:
  4305			if (!msr->host_initiated &&
  4306			    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
  4307				return 1;
  4308	
  4309			if (data & ~PRED_CMD_IBPB)
  4310				return 1;
  4311			if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
  4312				return 1;
  4313			if (!data)
  4314				break;
  4315	
  4316			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
  4317			set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
  4318			break;
  4319		case MSR_AMD64_VIRT_SPEC_CTRL:
  4320			if (!msr->host_initiated &&
  4321			    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
  4322				return 1;
  4323	
  4324			if (data & ~SPEC_CTRL_SSBD)
  4325				return 1;
  4326	
  4327			svm->virt_spec_ctrl = data;
  4328			break;
  4329		case MSR_STAR:
  4330			svm->vmcb->save.star = data;
  4331			break;
  4332	#ifdef CONFIG_X86_64
  4333		case MSR_LSTAR:
  4334			svm->vmcb->save.lstar = data;
  4335			break;
  4336		case MSR_CSTAR:
  4337			svm->vmcb->save.cstar = data;
  4338			break;
  4339		case MSR_KERNEL_GS_BASE:
  4340			svm->vmcb->save.kernel_gs_base = data;
  4341			break;
  4342		case MSR_SYSCALL_MASK:
  4343			svm->vmcb->save.sfmask = data;
  4344			break;
  4345	#endif
  4346		case MSR_IA32_SYSENTER_CS:
  4347			svm->vmcb->save.sysenter_cs = data;
  4348			break;
  4349		case MSR_IA32_SYSENTER_EIP:
  4350			svm->sysenter_eip = data;
  4351			svm->vmcb->save.sysenter_eip = data;
  4352			break;
  4353		case MSR_IA32_SYSENTER_ESP:
  4354			svm->sysenter_esp = data;
  4355			svm->vmcb->save.sysenter_esp = data;
  4356			break;
  4357		case MSR_TSC_AUX:
  4358			if (!boot_cpu_has(X86_FEATURE_RDTSCP))
  4359				return 1;
  4360	
  4361			/*
  4362			 * This is rare, so we update the MSR here instead of using
  4363			 * direct_access_msrs.  Doing that would require a rdmsr in
  4364			 * svm_vcpu_put.
  4365			 */
  4366			svm->tsc_aux = data;
  4367			wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
  4368			break;
  4369		case MSR_IA32_DEBUGCTLMSR:
  4370			if (!boot_cpu_has(X86_FEATURE_LBRV)) {
  4371				vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
  4372					    __func__, data);
  4373				break;
  4374			}
  4375			if (data & DEBUGCTL_RESERVED_BITS)
  4376				return 1;
  4377	
  4378			svm->vmcb->save.dbgctl = data;
  4379			mark_dirty(svm->vmcb, VMCB_LBR);
  4380			if (data & (1ULL<<0))
  4381				svm_enable_lbrv(svm);
  4382			else
  4383				svm_disable_lbrv(svm);
  4384			break;
  4385		case MSR_VM_HSAVE_PA:
  4386			svm->nested.hsave_msr = data;
  4387			break;
  4388		case MSR_VM_CR:
  4389			return svm_set_vm_cr(vcpu, data);
  4390		case MSR_VM_IGNNE:
  4391			vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
  4392			break;
  4393		case MSR_F10H_DECFG: {
  4394			struct kvm_msr_entry msr_entry;
  4395	
  4396			msr_entry.index = msr->index;
  4397			if (svm_get_msr_feature(&msr_entry))
  4398				return 1;
  4399	
  4400			/* Check the supported bits */
  4401			if (data & ~msr_entry.data)
  4402				return 1;
  4403	
  4404			/* Don't allow the guest to change a bit, #GP */
  4405			if (!msr->host_initiated && (data ^ msr_entry.data))
  4406				return 1;
  4407	
  4408			svm->msr_decfg = data;
  4409			break;
  4410		}
  4411		case MSR_IA32_APICBASE:
  4412			if (kvm_vcpu_apicv_active(vcpu))
  4413				avic_update_vapic_bar(to_svm(vcpu), data);
  4414			/* Fall through */
  4415		default:
  4416			return kvm_set_msr_common(vcpu, msr);
  4417		}
  4418		return 0;
  4419	}
  4420	

---
0-DAY kernel test infrastructure                 Open Source Technology Center
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org Intel Corporation

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 32805 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL
  2020-01-21 13:48 [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL Paolo Bonzini
  2020-01-24  8:00 ` Xiaoyao Li
  2020-01-25  1:32 ` kbuild test robot
@ 2020-02-18  7:11 ` kbuild test robot
  2 siblings, 0 replies; 5+ messages in thread
From: kbuild test robot @ 2020-02-18  7:11 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 11027 bytes --]

Hi Paolo,

I love your patch! Perhaps something to improve:

[auto build test WARNING on tip/auto-latest]
[also build test WARNING on linux/master]
[cannot apply to kvm/linux-next linus/master v5.6-rc2 next-20200217]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Paolo-Bonzini/KVM-x86-avoid-incorrect-writes-to-host-MSR_IA32_SPEC_CTRL/20200124-083109
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 1e53251a964b1875f82a071c0b59d135dd0cc563
config: x86_64-lkp (attached as .config)
compiler: gcc-7 (Debian 7.5.0-3) 7.5.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   arch/x86/kvm/vmx/vmx.c: In function 'vmx_set_msr':
>> arch/x86/kvm/vmx/vmx.c:1997:14: warning: '~' on a boolean expression [-Wbool-operation]
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
   arch/x86/kvm/vmx/vmx.c:1997:14: note: did you mean to use logical not?
      if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
                 ^
                 !

vim +1997 arch/x86/kvm/vmx/vmx.c

  1917	
  1918	/*
  1919	 * Writes msr value into into the appropriate "register".
  1920	 * Returns 0 on success, non-0 otherwise.
  1921	 * Assumes vcpu_load() was already called.
  1922	 */
  1923	static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  1924	{
  1925		struct vcpu_vmx *vmx = to_vmx(vcpu);
  1926		struct shared_msr_entry *msr;
  1927		int ret = 0;
  1928		u32 msr_index = msr_info->index;
  1929		u64 data = msr_info->data;
  1930		u32 index;
  1931	
  1932		switch (msr_index) {
  1933		case MSR_EFER:
  1934			ret = kvm_set_msr_common(vcpu, msr_info);
  1935			break;
  1936	#ifdef CONFIG_X86_64
  1937		case MSR_FS_BASE:
  1938			vmx_segment_cache_clear(vmx);
  1939			vmcs_writel(GUEST_FS_BASE, data);
  1940			break;
  1941		case MSR_GS_BASE:
  1942			vmx_segment_cache_clear(vmx);
  1943			vmcs_writel(GUEST_GS_BASE, data);
  1944			break;
  1945		case MSR_KERNEL_GS_BASE:
  1946			vmx_write_guest_kernel_gs_base(vmx, data);
  1947			break;
  1948	#endif
  1949		case MSR_IA32_SYSENTER_CS:
  1950			if (is_guest_mode(vcpu))
  1951				get_vmcs12(vcpu)->guest_sysenter_cs = data;
  1952			vmcs_write32(GUEST_SYSENTER_CS, data);
  1953			break;
  1954		case MSR_IA32_SYSENTER_EIP:
  1955			if (is_guest_mode(vcpu))
  1956				get_vmcs12(vcpu)->guest_sysenter_eip = data;
  1957			vmcs_writel(GUEST_SYSENTER_EIP, data);
  1958			break;
  1959		case MSR_IA32_SYSENTER_ESP:
  1960			if (is_guest_mode(vcpu))
  1961				get_vmcs12(vcpu)->guest_sysenter_esp = data;
  1962			vmcs_writel(GUEST_SYSENTER_ESP, data);
  1963			break;
  1964		case MSR_IA32_DEBUGCTLMSR:
  1965			if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
  1966							VM_EXIT_SAVE_DEBUG_CONTROLS)
  1967				get_vmcs12(vcpu)->guest_ia32_debugctl = data;
  1968	
  1969			ret = kvm_set_msr_common(vcpu, msr_info);
  1970			break;
  1971	
  1972		case MSR_IA32_BNDCFGS:
  1973			if (!kvm_mpx_supported() ||
  1974			    (!msr_info->host_initiated &&
  1975			     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
  1976				return 1;
  1977			if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
  1978			    (data & MSR_IA32_BNDCFGS_RSVD))
  1979				return 1;
  1980			vmcs_write64(GUEST_BNDCFGS, data);
  1981			break;
  1982		case MSR_IA32_UMWAIT_CONTROL:
  1983			if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
  1984				return 1;
  1985	
  1986			/* The reserved bit 1 and non-32 bit [63:32] should be zero */
  1987			if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
  1988				return 1;
  1989	
  1990			vmx->msr_ia32_umwait_control = data;
  1991			break;
  1992		case MSR_IA32_SPEC_CTRL:
  1993			if (!msr_info->host_initiated &&
  1994			    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
  1995				return 1;
  1996	
> 1997			if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
  1998				return 1;
  1999	
  2000			vmx->spec_ctrl = data;
  2001			if (!data)
  2002				break;
  2003	
  2004			/*
  2005			 * For non-nested:
  2006			 * When it's written (to non-zero) for the first time, pass
  2007			 * it through.
  2008			 *
  2009			 * For nested:
  2010			 * The handling of the MSR bitmap for L2 guests is done in
  2011			 * nested_vmx_merge_msr_bitmap. We should not touch the
  2012			 * vmcs02.msr_bitmap here since it gets completely overwritten
  2013			 * in the merging. We update the vmcs01 here for L1 as well
  2014			 * since it will end up touching the MSR anyway now.
  2015			 */
  2016			vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
  2017						      MSR_IA32_SPEC_CTRL,
  2018						      MSR_TYPE_RW);
  2019			break;
  2020		case MSR_IA32_TSX_CTRL:
  2021			if (!msr_info->host_initiated &&
  2022			    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
  2023				return 1;
  2024			if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
  2025				return 1;
  2026			goto find_shared_msr;
  2027		case MSR_IA32_PRED_CMD:
  2028			if (!msr_info->host_initiated &&
  2029			    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
  2030				return 1;
  2031	
  2032			if (data & ~PRED_CMD_IBPB)
  2033				return 1;
  2034			if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL))
  2035				return 1;
  2036			if (!data)
  2037				break;
  2038	
  2039			wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
  2040	
  2041			/*
  2042			 * For non-nested:
  2043			 * When it's written (to non-zero) for the first time, pass
  2044			 * it through.
  2045			 *
  2046			 * For nested:
  2047			 * The handling of the MSR bitmap for L2 guests is done in
  2048			 * nested_vmx_merge_msr_bitmap. We should not touch the
  2049			 * vmcs02.msr_bitmap here since it gets completely overwritten
  2050			 * in the merging.
  2051			 */
  2052			vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
  2053						      MSR_TYPE_W);
  2054			break;
  2055		case MSR_IA32_CR_PAT:
  2056			if (!kvm_pat_valid(data))
  2057				return 1;
  2058	
  2059			if (is_guest_mode(vcpu) &&
  2060			    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
  2061				get_vmcs12(vcpu)->guest_ia32_pat = data;
  2062	
  2063			if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
  2064				vmcs_write64(GUEST_IA32_PAT, data);
  2065				vcpu->arch.pat = data;
  2066				break;
  2067			}
  2068			ret = kvm_set_msr_common(vcpu, msr_info);
  2069			break;
  2070		case MSR_IA32_TSC_ADJUST:
  2071			ret = kvm_set_msr_common(vcpu, msr_info);
  2072			break;
  2073		case MSR_IA32_MCG_EXT_CTL:
  2074			if ((!msr_info->host_initiated &&
  2075			     !(to_vmx(vcpu)->msr_ia32_feature_control &
  2076			       FEAT_CTL_LMCE_ENABLED)) ||
  2077			    (data & ~MCG_EXT_CTL_LMCE_EN))
  2078				return 1;
  2079			vcpu->arch.mcg_ext_ctl = data;
  2080			break;
  2081		case MSR_IA32_FEAT_CTL:
  2082			if (!vmx_feature_control_msr_valid(vcpu, data) ||
  2083			    (to_vmx(vcpu)->msr_ia32_feature_control &
  2084			     FEAT_CTL_LOCKED && !msr_info->host_initiated))
  2085				return 1;
  2086			vmx->msr_ia32_feature_control = data;
  2087			if (msr_info->host_initiated && data == 0)
  2088				vmx_leave_nested(vcpu);
  2089			break;
  2090		case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
  2091			if (!msr_info->host_initiated)
  2092				return 1; /* they are read-only */
  2093			if (!nested_vmx_allowed(vcpu))
  2094				return 1;
  2095			return vmx_set_vmx_msr(vcpu, msr_index, data);
  2096		case MSR_IA32_RTIT_CTL:
  2097			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2098				vmx_rtit_ctl_check(vcpu, data) ||
  2099				vmx->nested.vmxon)
  2100				return 1;
  2101			vmcs_write64(GUEST_IA32_RTIT_CTL, data);
  2102			vmx->pt_desc.guest.ctl = data;
  2103			pt_update_intercept_for_msr(vmx);
  2104			break;
  2105		case MSR_IA32_RTIT_STATUS:
  2106			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2107				(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
  2108				(data & MSR_IA32_RTIT_STATUS_MASK))
  2109				return 1;
  2110			vmx->pt_desc.guest.status = data;
  2111			break;
  2112		case MSR_IA32_RTIT_CR3_MATCH:
  2113			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2114				(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
  2115				!intel_pt_validate_cap(vmx->pt_desc.caps,
  2116							PT_CAP_cr3_filtering))
  2117				return 1;
  2118			vmx->pt_desc.guest.cr3_match = data;
  2119			break;
  2120		case MSR_IA32_RTIT_OUTPUT_BASE:
  2121			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2122				(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
  2123				(!intel_pt_validate_cap(vmx->pt_desc.caps,
  2124						PT_CAP_topa_output) &&
  2125				 !intel_pt_validate_cap(vmx->pt_desc.caps,
  2126						PT_CAP_single_range_output)) ||
  2127				(data & MSR_IA32_RTIT_OUTPUT_BASE_MASK))
  2128				return 1;
  2129			vmx->pt_desc.guest.output_base = data;
  2130			break;
  2131		case MSR_IA32_RTIT_OUTPUT_MASK:
  2132			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2133				(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
  2134				(!intel_pt_validate_cap(vmx->pt_desc.caps,
  2135						PT_CAP_topa_output) &&
  2136				 !intel_pt_validate_cap(vmx->pt_desc.caps,
  2137						PT_CAP_single_range_output)))
  2138				return 1;
  2139			vmx->pt_desc.guest.output_mask = data;
  2140			break;
  2141		case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
  2142			index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
  2143			if ((pt_mode != PT_MODE_HOST_GUEST) ||
  2144				(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
  2145				(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
  2146						PT_CAP_num_address_ranges)))
  2147				return 1;
  2148			if (index % 2)
  2149				vmx->pt_desc.guest.addr_b[index / 2] = data;
  2150			else
  2151				vmx->pt_desc.guest.addr_a[index / 2] = data;
  2152			break;
  2153		case MSR_TSC_AUX:
  2154			if (!msr_info->host_initiated &&
  2155			    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
  2156				return 1;
  2157			/* Check reserved bit, higher 32 bits should be zero */
  2158			if ((data >> 32) != 0)
  2159				return 1;
  2160			goto find_shared_msr;
  2161	
  2162		default:
  2163		find_shared_msr:
  2164			msr = find_msr_entry(vmx, msr_index);
  2165			if (msr)
  2166				ret = vmx_set_guest_msr(vmx, msr, data);
  2167			else
  2168				ret = kvm_set_msr_common(vcpu, msr_info);
  2169		}
  2170	
  2171		return ret;
  2172	}
  2173	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 28760 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-02-18  7:11 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-21 13:48 [PATCH] KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL Paolo Bonzini
2020-01-24  8:00 ` Xiaoyao Li
2020-01-24  8:22   ` Paolo Bonzini
2020-01-25  1:32 ` kbuild test robot
2020-02-18  7:11 ` kbuild test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.