On 29/10/19 21:33, speck for mark gross wrote: > On Fri, Oct 25, 2019 at 12:39:58PM +0200, speck for Paolo Bonzini wrote: >> On 25/10/19 11:45, speck for Joerg Roedel wrote: >>> On Fri, Oct 25, 2019 at 11:08:25AM +0200, speck for Paolo Bonzini wrote: >>>> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >>>> index e7970a2e8eae..8979d5e7b6f5 100644 >>>> --- a/arch/x86/kvm/vmx/vmx.c >>>> +++ b/arch/x86/kvm/vmx/vmx.c >>>> @@ -969,17 +969,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) >>>> u64 guest_efer = vmx->vcpu.arch.efer; >>>> u64 ignore_bits = 0; >>>> >>>> - if (!enable_ept) { >>>> - /* >>>> - * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing >>>> - * host CPUID is more efficient than testing guest CPUID >>>> - * or CR4. Host SMEP is anyway a requirement for guest SMEP. >>>> - */ >>>> - if (boot_cpu_has(X86_FEATURE_SMEP)) >>>> - guest_efer |= EFER_NX; >>>> - else if (!(guest_efer & EFER_NX)) >>>> - ignore_bits |= EFER_NX; >>>> - } >>>> + /* Shadow paging assumes the NX bit to be available. */ >>>> + if (!enable_ept) >>>> + guest_efer |= EFER_NX; >>>> >>>> /* >>>> * LMA and LME handled by hardware; SCE meaningless outside long mode. >>> >>> Works with ept on and off, thanks. >> >> Thanks, I'll include also the AMD version in a new patch and send it out as v8: >> >> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c >> index 4153ca8cddb7..29feb3ecc91c 100644 >> --- a/arch/x86/kvm/svm.c >> +++ b/arch/x86/kvm/svm.c >> @@ -739,8 +739,12 @@ static int get_npt_level(struct kvm_vcpu *vcpu) >> static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) >> { >> vcpu->arch.efer = efer; >> - if (!npt_enabled && !(efer & EFER_LMA)) >> - efer &= ~EFER_LME; >> + if (!npt_enabled) { >> + /* Shadow paging assumes the NX bit to be available. */ >> + efer |= EFER_NXE; > ^EFER_NX ? Yes, see message id 20191027152323.24326-1-pbonzini@redhat.com on kvm@vger.kernel.org; I am about to send it to Linus. Paolo > >> + if (!(efer & EFER_LMA)) >> + efer &= ~EFER_LME; >> + } >> >> to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; >> mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); >> >> Paolo >> >