From dc3064e6a73951feae52b47dc1b44a50ae589339 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 8 Mar 2019 09:12:07 -0800 Subject: [PATCH] KVM: x86: load guest fpu state when accessing MSRs managed by XSAVES A handful of CET MSRs are not context switched through "traditional" methods, e.g. VMCS or manual switching, but rather are passed through to the guest and are saved and restored by XSAVES/XRSTORS, i.e. the guest's FPU state. Load the guest's FPU state if userspace is accessing MSRs whose values are managed by XSAVES so that the MSR helper, e.g. vmx_{get,set}_msr(), can simply do {RD,WR}MSR to access the guest's value. Note that guest_cpuid_has() is not queried as host userspace is allowed to access MSRs that have not been exposed to the guest, e.g. it might do KVM_SET_MSRS prior to KVM_SET_CPUID2. Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 65 +++++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d90f011f3e32..e0d48f22edd9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2901,6 +2901,38 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } EXPORT_SYMBOL_GPL(kvm_get_msr_common); +static bool is_xsaves_msr(u32 index) +{ + if (!kvm_x86_ops->xsaves_supported()) + return false; + + return index == MSR_IA32_U_CET || + (index >= MSR_IA32_PL0_SSP && index <= MSR_IA32_PL3_SSP); +} + +/* Swap (qemu) user FPU context for the guest FPU context. */ +static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(¤t->thread.fpu); + /* PKRU is separately restored in kvm_x86_ops->run. */ + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, + ~XFEATURE_MASK_PKRU); + preempt_enable(); + trace_kvm_fpu(1); +} + +/* When vcpu_run ends, restore user space FPU context. */ +static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); + copy_kernel_to_fpregs(¤t->thread.fpu.state); + preempt_enable(); + ++vcpu->stat.fpu_reload; + trace_kvm_fpu(0); +} + /* * Read or write a bunch of msrs. All parameters are kernel addresses. * @@ -2911,11 +2943,19 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { + bool fpu_loaded = false; int i; - for (i = 0; i < msrs->nmsrs; ++i) + for (i = 0; i < msrs->nmsrs; ++i) { + if (!fpu_loaded && is_xsaves_msr(entries[i].index)) { + kvm_load_guest_fpu(vcpu); + fpu_loaded = true; + } if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; + } + if (fpu_loaded) + kvm_put_guest_fpu(vcpu); return i; } @@ -8116,29 +8156,6 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) return 0; } -/* Swap (qemu) user FPU context for the guest FPU context. */ -static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(¤t->thread.fpu); - /* PKRU is separately restored in kvm_x86_ops->run. */ - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, - ~XFEATURE_MASK_PKRU); - preempt_enable(); - trace_kvm_fpu(1); -} - -/* When vcpu_run ends, restore user space FPU context. */ -static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - copy_fpregs_to_fpstate(vcpu->arch.guest_fpu); - copy_kernel_to_fpregs(¤t->thread.fpu.state); - preempt_enable(); - ++vcpu->stat.fpu_reload; - trace_kvm_fpu(0); -} - int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; -- 2.21.0