* Re: [PATCH v2] kvm: nvmx: limit atomic switch MSRs
2019-09-14 0:39 [PATCH v2] kvm: nvmx: limit atomic switch MSRs Marc Orr
@ 2019-09-16 21:13 ` Marc Orr
2019-09-16 23:02 ` Sean Christopherson
2019-09-24 13:53 ` Paolo Bonzini
2 siblings, 0 replies; 5+ messages in thread
From: Marc Orr @ 2019-09-16 21:13 UTC (permalink / raw)
To: kvm, Jim Mattson, Peter Shier, Sean Christopherson
> +static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
> +{
> + return fixed_bits_valid(control, low, high);
> +}
> +
> +static inline u64 vmx_control_msr(u32 low, u32 high)
> +{
> + return low | ((u64)high << 32);
> +}
> +
> static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
> {
> secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
> @@ -856,6 +866,17 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
> return 0;
> }
>
> +static u64 vmx_control_msr(u32 low, u32 high);
Oops, I forgot to delete this declaration, which is no longer needed
after applying Sean's suggestions, in v2. I can send out a v3 with
this line removed after folks comment on this version.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] kvm: nvmx: limit atomic switch MSRs
2019-09-14 0:39 [PATCH v2] kvm: nvmx: limit atomic switch MSRs Marc Orr
2019-09-16 21:13 ` Marc Orr
@ 2019-09-16 23:02 ` Sean Christopherson
2019-09-17 19:00 ` Marc Orr
2019-09-24 13:53 ` Paolo Bonzini
2 siblings, 1 reply; 5+ messages in thread
From: Sean Christopherson @ 2019-09-16 23:02 UTC (permalink / raw)
To: Marc Orr; +Cc: kvm, jmattson, pshier
On Fri, Sep 13, 2019 at 05:39:40PM -0700, Marc Orr wrote:
> Allowing an unlimited number of MSRs to be specified via the VMX
> load/store MSR lists (e.g., vm-entry MSR load list) is bad for two
> reasons. First, a guest can specify an unreasonable number of MSRs,
> forcing KVM to process all of them in software. Second, the SDM bounds
> the number of MSRs allowed to be packed into the atomic switch MSR lists.
> Quoting the "Miscellaneous Data" section in the "VMX Capability
> Reporting Facility" appendix:
>
> "Bits 27:25 is used to compute the recommended maximum number of MSRs
> that should appear in the VM-exit MSR-store list, the VM-exit MSR-load
> list, or the VM-entry MSR-load list. Specifically, if the value bits
> 27:25 of IA32_VMX_MISC is N, then 512 * (N + 1) is the recommended
> maximum number of MSRs to be included in each list. If the limit is
> exceeded, undefined processor behavior may result (including a machine
> check during the VMX transition)."
>
> Thus, force a VM-entry to fail due to MSR loading when the MSR load
> list is too large. Similarly, trigger an abort during a VM exit that
> encounters an MSR load list or MSR store list that is too large.
It's probably redundant/obvious, but I think it's worth calling out that
this is arbitrary KVM behavior, e.g. replace "Thus," with something like:
Because KVM needs to protect itself and can't model "undefined processor
behavior", arbitrarily
The changelog (and maybe a comment in the code) should also state that
the count is intentionally not pre-checked so as to maintain compability
with hardware inasmuch as possible. That's a subtlety that's likely to
lead to "cleanup" in the future :-)
Code itself looks good, with the spurious vmx_control_msr() removed.
> Test these new checks with the kvm-unit-test "x86: nvmx: test max atomic
> switch MSRs".
>
> Suggested-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Signed-off-by: Marc Orr <marcorr@google.com>
> ---
> v1 -> v2
> * Updated description to distinguish the relevant appendix.
> * Renamed VMX_MISC_MSR_LIST_INCREMENT to VMX_MISC_MSR_LIST_MULTIPLIER.
> * Moved vmx_control_msr() and vmx_control_verify() up in the source.
> * Modified nested_vmx_store_msr() to fail lazily, like
> nested_vmx_load_msr().
>
> arch/x86/include/asm/vmx.h | 1 +
> arch/x86/kvm/vmx/nested.c | 41 ++++++++++++++++++++++++++++----------
> 2 files changed, 31 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index a39136b0d509..a1f6ed187ccd 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -110,6 +110,7 @@
> #define VMX_MISC_SAVE_EFER_LMA 0x00000020
> #define VMX_MISC_ACTIVITY_HLT 0x00000040
> #define VMX_MISC_ZERO_LEN_INS 0x40000000
> +#define VMX_MISC_MSR_LIST_MULTIPLIER 512
>
> /* VMFUNC functions */
> #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index ced9fba32598..bca0167b8bdd 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -190,6 +190,16 @@ static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
> pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
> }
>
> +static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
> +{
> + return fixed_bits_valid(control, low, high);
> +}
> +
> +static inline u64 vmx_control_msr(u32 low, u32 high)
> +{
> + return low | ((u64)high << 32);
> +}
> +
> static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
> {
> secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
> @@ -856,6 +866,17 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
> return 0;
> }
>
> +static u64 vmx_control_msr(u32 low, u32 high);
> +
> +static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> + u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
> + vmx->nested.msrs.misc_high);
> +
> + return (vmx_miscd_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
> +}
> +
> /*
> * Load guest's/host's msr at nested entry/exit.
> * return 0 for success, entry index for failure.
> @@ -865,9 +886,13 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> u32 i;
> struct vmx_msr_entry e;
> struct msr_data msr;
> + u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
>
> msr.host_initiated = false;
> for (i = 0; i < count; i++) {
> + if (unlikely(i >= max_msr_list_size))
> + goto fail;
> +
> if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
> &e, sizeof(e))) {
> pr_debug_ratelimited(
> @@ -899,9 +924,14 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> {
> u32 i;
> struct vmx_msr_entry e;
> + u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
>
> for (i = 0; i < count; i++) {
> struct msr_data msr_info;
> +
> + if (unlikely(i >= max_msr_list_size))
> + return -EINVAL;
> +
> if (kvm_vcpu_read_guest(vcpu,
> gpa + i * sizeof(e),
> &e, 2 * sizeof(u32))) {
> @@ -1009,17 +1039,6 @@ static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
> return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
> }
>
> -
> -static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
> -{
> - return fixed_bits_valid(control, low, high);
> -}
> -
> -static inline u64 vmx_control_msr(u32 low, u32 high)
> -{
> - return low | ((u64)high << 32);
> -}
> -
> static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
> {
> superset &= mask;
> --
> 2.23.0.237.gc6a4ce50a0-goog
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] kvm: nvmx: limit atomic switch MSRs
2019-09-16 23:02 ` Sean Christopherson
@ 2019-09-17 19:00 ` Marc Orr
0 siblings, 0 replies; 5+ messages in thread
From: Marc Orr @ 2019-09-17 19:00 UTC (permalink / raw)
To: Sean Christopherson; +Cc: kvm, Jim Mattson, Peter Shier
> > Allowing an unlimited number of MSRs to be specified via the VMX
> > load/store MSR lists (e.g., vm-entry MSR load list) is bad for two
> > reasons. First, a guest can specify an unreasonable number of MSRs,
> > forcing KVM to process all of them in software. Second, the SDM bounds
> > the number of MSRs allowed to be packed into the atomic switch MSR lists.
> > Quoting the "Miscellaneous Data" section in the "VMX Capability
> > Reporting Facility" appendix:
> >
> > "Bits 27:25 is used to compute the recommended maximum number of MSRs
> > that should appear in the VM-exit MSR-store list, the VM-exit MSR-load
> > list, or the VM-entry MSR-load list. Specifically, if the value bits
> > 27:25 of IA32_VMX_MISC is N, then 512 * (N + 1) is the recommended
> > maximum number of MSRs to be included in each list. If the limit is
> > exceeded, undefined processor behavior may result (including a machine
> > check during the VMX transition)."
> >
> > Thus, force a VM-entry to fail due to MSR loading when the MSR load
> > list is too large. Similarly, trigger an abort during a VM exit that
> > encounters an MSR load list or MSR store list that is too large.
>
> It's probably redundant/obvious, but I think it's worth calling out that
> this is arbitrary KVM behavior, e.g. replace "Thus," with something like:
>
> Because KVM needs to protect itself and can't model "undefined processor
> behavior", arbitrarily
>
> The changelog (and maybe a comment in the code) should also state that
> the count is intentionally not pre-checked so as to maintain compability
> with hardware inasmuch as possible. That's a subtlety that's likely to
> lead to "cleanup" in the future :-)
Done.
> Code itself looks good, with the spurious vmx_control_msr() removed.
Done.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v2] kvm: nvmx: limit atomic switch MSRs
2019-09-14 0:39 [PATCH v2] kvm: nvmx: limit atomic switch MSRs Marc Orr
2019-09-16 21:13 ` Marc Orr
2019-09-16 23:02 ` Sean Christopherson
@ 2019-09-24 13:53 ` Paolo Bonzini
2 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2019-09-24 13:53 UTC (permalink / raw)
To: Marc Orr, kvm, jmattson, pshier, sean.j.christopherson
On 14/09/19 02:39, Marc Orr wrote:
> Allowing an unlimited number of MSRs to be specified via the VMX
> load/store MSR lists (e.g., vm-entry MSR load list) is bad for two
> reasons. First, a guest can specify an unreasonable number of MSRs,
> forcing KVM to process all of them in software. Second, the SDM bounds
> the number of MSRs allowed to be packed into the atomic switch MSR lists.
> Quoting the "Miscellaneous Data" section in the "VMX Capability
> Reporting Facility" appendix:
>
> "Bits 27:25 is used to compute the recommended maximum number of MSRs
> that should appear in the VM-exit MSR-store list, the VM-exit MSR-load
> list, or the VM-entry MSR-load list. Specifically, if the value bits
> 27:25 of IA32_VMX_MISC is N, then 512 * (N + 1) is the recommended
> maximum number of MSRs to be included in each list. If the limit is
> exceeded, undefined processor behavior may result (including a machine
> check during the VMX transition)."
>
> Thus, force a VM-entry to fail due to MSR loading when the MSR load
> list is too large. Similarly, trigger an abort during a VM exit that
> encounters an MSR load list or MSR store list that is too large.
>
> Test these new checks with the kvm-unit-test "x86: nvmx: test max atomic
> switch MSRs".
>
> Suggested-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Reviewed-by: Peter Shier <pshier@google.com>
> Signed-off-by: Marc Orr <marcorr@google.com>
> ---
> v1 -> v2
> * Updated description to distinguish the relevant appendix.
> * Renamed VMX_MISC_MSR_LIST_INCREMENT to VMX_MISC_MSR_LIST_MULTIPLIER.
> * Moved vmx_control_msr() and vmx_control_verify() up in the source.
> * Modified nested_vmx_store_msr() to fail lazily, like
> nested_vmx_load_msr().
>
> arch/x86/include/asm/vmx.h | 1 +
> arch/x86/kvm/vmx/nested.c | 41 ++++++++++++++++++++++++++++----------
> 2 files changed, 31 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
> index a39136b0d509..a1f6ed187ccd 100644
> --- a/arch/x86/include/asm/vmx.h
> +++ b/arch/x86/include/asm/vmx.h
> @@ -110,6 +110,7 @@
> #define VMX_MISC_SAVE_EFER_LMA 0x00000020
> #define VMX_MISC_ACTIVITY_HLT 0x00000040
> #define VMX_MISC_ZERO_LEN_INS 0x40000000
> +#define VMX_MISC_MSR_LIST_MULTIPLIER 512
>
> /* VMFUNC functions */
> #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index ced9fba32598..bca0167b8bdd 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -190,6 +190,16 @@ static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
> pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
> }
>
> +static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
> +{
> + return fixed_bits_valid(control, low, high);
> +}
> +
> +static inline u64 vmx_control_msr(u32 low, u32 high)
> +{
> + return low | ((u64)high << 32);
> +}
> +
> static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
> {
> secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
> @@ -856,6 +866,17 @@ static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
> return 0;
> }
>
> +static u64 vmx_control_msr(u32 low, u32 high);
> +
> +static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> + u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
> + vmx->nested.msrs.misc_high);
> +
> + return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
> +}
> +
> /*
> * Load guest's/host's msr at nested entry/exit.
> * return 0 for success, entry index for failure.
> @@ -865,9 +886,13 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> u32 i;
> struct vmx_msr_entry e;
> struct msr_data msr;
> + u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
>
> msr.host_initiated = false;
> for (i = 0; i < count; i++) {
> + if (unlikely(i >= max_msr_list_size))
> + goto fail;
> +
> if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
> &e, sizeof(e))) {
> pr_debug_ratelimited(
> @@ -899,9 +924,14 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> {
> u32 i;
> struct vmx_msr_entry e;
> + u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
>
> for (i = 0; i < count; i++) {
> struct msr_data msr_info;
> +
> + if (unlikely(i >= max_msr_list_size))
> + return -EINVAL;
> +
> if (kvm_vcpu_read_guest(vcpu,
> gpa + i * sizeof(e),
> &e, 2 * sizeof(u32))) {
> @@ -1009,17 +1039,6 @@ static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
> return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
> }
>
> -
> -static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
> -{
> - return fixed_bits_valid(control, low, high);
> -}
> -
> -static inline u64 vmx_control_msr(u32 low, u32 high)
> -{
> - return low | ((u64)high << 32);
> -}
> -
> static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
> {
> superset &= mask;
>
Can you please resubmit?
Thanks,
Paolo
^ permalink raw reply [flat|nested] 5+ messages in thread