From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751327AbbD3LhA (ORCPT ); Thu, 30 Apr 2015 07:37:00 -0400 Received: from mail-wg0-f41.google.com ([74.125.82.41]:35679 "EHLO mail-wg0-f41.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751094AbbD3Lg4 (ORCPT ); Thu, 30 Apr 2015 07:36:56 -0400 From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: rkrcmar@redhat.com, bsd@redhat.com, guangrong.xiao@linux.intel.com, Yang Zhang , wanpeng.li@linux.intel.com Subject: [PATCH 11/13] KVM: x86: add SMM to the MMU role Date: Thu, 30 Apr 2015 13:36:10 +0200 Message-Id: <1430393772-27208-12-git-send-email-pbonzini@redhat.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1430393772-27208-1-git-send-email-pbonzini@redhat.com> References: <1430393772-27208-1-git-send-email-pbonzini@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Whenever it may change, the context is reset. Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 11 +---------- arch/x86/kvm/mmu.c | 5 ++++- arch/x86/kvm/x86.c | 5 ++++- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 68a09a4394d8..b43c3a708328 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -184,16 +184,6 @@ struct kvm_mmu_memory_cache { void *objects[KVM_NR_MEM_OBJS]; }; -/* - * kvm_mmu_page_role, below, is defined as: - * - * bits 0:3 - total guest paging levels (2-4, or zero for real mode) - * bits 4:7 - page table level for this shadow (1-4) - * bits 8:9 - page table quadrant for 2-level guests - * bit 16 - direct mapping of virtual to physical mapping at gfn - * used for real mode and two-dimensional paging - * bits 17:19 - common access permissions for all ptes in this shadow page - */ union kvm_mmu_page_role { unsigned word; struct { @@ -207,6 +197,7 @@ union kvm_mmu_page_role { unsigned nxe:1; unsigned cr0_wp:1; unsigned smep_andnot_wp:1; + unsigned smm:1; }; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4694ad42aa8b..043680d90964 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3879,6 +3879,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) struct kvm_mmu *context = &vcpu->arch.mmu; context->base_role.word = 0; + context->base_role.smm = is_smm(vcpu); context->page_fault = tdp_page_fault; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; @@ -3937,6 +3938,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) context->base_role.cr0_wp = is_write_protection(vcpu); context->base_role.smep_andnot_wp = smep && !is_write_protection(vcpu); + context->base_role.smm = is_smm(vcpu); } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); @@ -4239,7 +4241,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ++vcpu->kvm->stat.mmu_pte_write; kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); - mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1; + mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = + mask.smm = 1; for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { if (detect_write_misaligned(sp, gpa, bytes) || detect_write_flooding(sp)) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 90ab62f54e1c..a4192c352744 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5542,7 +5542,10 @@ restart: unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - vcpu->arch.hflags = ctxt->emul_flags; + if (ctxt->emul_flags != vcpu->arch.hflags) { + vcpu->arch.hflags = ctxt->emul_flags; + kvm_mmu_reset_context(vcpu); + } kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); -- 1.8.3.1