From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757286AbcBXNUt (ORCPT ); Wed, 24 Feb 2016 08:20:49 -0500 Received: from mail-wm0-f67.google.com ([74.125.82.67]:34429 "EHLO mail-wm0-f67.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755538AbcBXNSI (ORCPT ); Wed, 24 Feb 2016 08:18:08 -0500 From: Paolo Bonzini To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: yoshikawa_takuya_b1@lab.ntt.co.jp, guangrong.xiao@linux.intel.com, mtosatti@redhat.com Subject: [PATCH 08/12] KVM: MMU: move zap/flush to kvm_mmu_get_page Date: Wed, 24 Feb 2016 14:17:49 +0100 Message-Id: <1456319873-34182-9-git-send-email-pbonzini@redhat.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1456319873-34182-1-git-send-email-pbonzini@redhat.com> References: <1456319873-34182-1-git-send-email-pbonzini@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org kvm_mmu_get_page is the only caller of kvm_sync_page_transient and kvm_sync_pages. Moving the handling of the invalid_list there removes the need for the underdocumented kvm_sync_page_transient function. Signed-off-by: Paolo Bonzini --- Guangrong, at this point I am confused about why kvm_sync_page_transient didn't clear sp->unsync. Do you remember? Or perhaps kvm_mmu_get_page could just call kvm_sync_page now? Also, can you explain the need_sync variable in kvm_mmu_get_page? arch/x86/kvm/mmu.c | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e3215cc89d97..725316df32ec 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1917,18 +1917,6 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } -static bool kvm_sync_page_transient(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - LIST_HEAD(invalid_list); - int ret; - - ret = __kvm_sync_page(vcpu, sp, &invalid_list); - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, ret); - - return ret; -} - #ifdef CONFIG_KVM_MMU_AUDIT #include "mmu_audit.c" #else @@ -1944,21 +1932,21 @@ static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, } /* @gfn should be write-protected at the call site */ -static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) +static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, + struct list_head *invalid_list) { struct kvm_mmu_page *s; - LIST_HEAD(invalid_list); - bool flush = false; + bool ret = false; for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { if (!s->unsync) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); - flush |= kvm_sync_page(vcpu, s, &invalid_list); + ret |= kvm_sync_page(vcpu, s, invalid_list); } - kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); + return ret; } struct mmu_page_path { @@ -2089,6 +2077,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, unsigned quadrant; struct kvm_mmu_page *sp; bool need_sync = false; + bool flush = false; + LIST_HEAD(invalid_list); role = vcpu->arch.mmu.base_role; role.level = level; @@ -2112,8 +2103,16 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (sp->role.word != role.word) continue; - if (sp->unsync && !kvm_sync_page_transient(vcpu, sp)) - break; + if (sp->unsync) { + /* The page is good, but __kvm_sync_page might still end + * up zapping it. If so, break in order to rebuild it. + */ + if (!__kvm_sync_page(vcpu, sp, &invalid_list)) + break; + + WARN_ON(!list_empty(&invalid_list)); + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } if (sp->unsync_children) kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); @@ -2133,13 +2132,15 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, if (rmap_write_protect(vcpu, gfn)) kvm_flush_remote_tlbs(vcpu->kvm); if (level > PT_PAGE_TABLE_LEVEL && need_sync) - kvm_sync_pages(vcpu, gfn); + flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); account_shadowed(vcpu->kvm, sp); } sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; clear_page(sp->spt); trace_kvm_mmu_get_page(sp, true); + + kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); return sp; } -- 1.8.3.1