From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paolo Bonzini Subject: Re: [PATCH v3 02/13] nEPT: Move gpte_access() and prefetch_invalid_gpte() to paging_tmpl.h Date: Mon, 20 May 2013 14:34:31 +0200 Message-ID: <519A1857.9010802@redhat.com> References: <1368939152-11406-1-git-send-email-jun.nakajima@intel.com> <1368939152-11406-2-git-send-email-jun.nakajima@intel.com> Mime-Version: 1.0 Content-Type: text/plain; charset=ISO-8859-15 Content-Transfer-Encoding: 7bit Cc: kvm@vger.kernel.org, Gleb Natapov To: Jun Nakajima Return-path: Received: from mx1.redhat.com ([209.132.183.28]:54172 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754878Ab3ETMej (ORCPT ); Mon, 20 May 2013 08:34:39 -0400 In-Reply-To: <1368939152-11406-2-git-send-email-jun.nakajima@intel.com> Sender: kvm-owner@vger.kernel.org List-ID: Il 19/05/2013 06:52, Jun Nakajima ha scritto: > From: Nadav Har'El > > For preparation, we just move gpte_access() and prefetch_invalid_gpte() from mmu.c to paging_tmpl.h. > > Signed-off-by: Nadav Har'El > Signed-off-by: Jun Nakajima > Signed-off-by: Xinhao Xu > --- > arch/x86/kvm/mmu.c | 30 ------------------------------ > arch/x86/kvm/paging_tmpl.h | 40 +++++++++++++++++++++++++++++++++++----- > 2 files changed, 35 insertions(+), 35 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 004cc87..117233f 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -2488,26 +2488,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, > return gfn_to_pfn_memslot_atomic(slot, gfn); > } > > -static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu, > - struct kvm_mmu_page *sp, u64 *spte, > - u64 gpte) > -{ > - if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > - goto no_present; > - > - if (!is_present_gpte(gpte)) > - goto no_present; > - > - if (!(gpte & PT_ACCESSED_MASK)) > - goto no_present; > - > - return false; > - > -no_present: > - drop_spte(vcpu->kvm, spte); > - return true; > -} > - > static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, > struct kvm_mmu_page *sp, > u64 *start, u64 *end) > @@ -3408,16 +3388,6 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, > return false; > } > > -static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte) > -{ > - unsigned access; > - > - access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > - access &= ~(gpte >> PT64_NX_SHIFT); > - > - return access; > -} > - > static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) > { > unsigned index; > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h > index da20860..df34d4a 100644 > --- a/arch/x86/kvm/paging_tmpl.h > +++ b/arch/x86/kvm/paging_tmpl.h > @@ -103,6 +103,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, > return (ret != orig_pte); > } > > +static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, > + struct kvm_mmu_page *sp, u64 *spte, > + u64 gpte) > +{ > + if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) > + goto no_present; > + > + if (!is_present_gpte(gpte)) > + goto no_present; > + > + if (!(gpte & PT_ACCESSED_MASK)) > + goto no_present; > + > + return false; > + > +no_present: > + drop_spte(vcpu->kvm, spte); > + return true; > +} > + > +static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) > +{ > + unsigned access; > + > + access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; > + access &= ~(gpte >> PT64_NX_SHIFT); > + > + return access; > +} > + > static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, > struct kvm_mmu *mmu, > struct guest_walker *walker, > @@ -225,7 +255,7 @@ retry_walk: > } > > accessed_dirty &= pte; > - pte_access = pt_access & gpte_access(vcpu, pte); > + pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); > > walker->ptes[walker->level - 1] = pte; > } while (!is_last_gpte(mmu, walker->level, pte)); > @@ -309,13 +339,13 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, > gfn_t gfn; > pfn_t pfn; > > - if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) > + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) > return false; > > pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); > > gfn = gpte_to_gfn(gpte); > - pte_access = sp->role.access & gpte_access(vcpu, gpte); > + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); > protect_clean_gpte(&pte_access, gpte); > pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, > no_dirty_log && (pte_access & ACC_WRITE_MASK)); > @@ -782,14 +812,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) > sizeof(pt_element_t))) > return -EINVAL; > > - if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) { > + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { > vcpu->kvm->tlbs_dirty++; > continue; > } > > gfn = gpte_to_gfn(gpte); > pte_access = sp->role.access; > - pte_access &= gpte_access(vcpu, gpte); > + pte_access &= FNAME(gpte_access)(vcpu, gpte); > protect_clean_gpte(&pte_access, gpte); > > if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) > Reviewed-by: Paolo Bonzini