All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: yoshikawa_takuya_b1@lab.ntt.co.jp,
	guangrong.xiao@linux.intel.com, mtosatti@redhat.com
Subject: [PATCH 07/12] KVM: MMU: invert return value of FNAME(sync_page) and *kvm_sync_page*
Date: Wed, 24 Feb 2016 14:17:48 +0100	[thread overview]
Message-ID: <1456319873-34182-8-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1456319873-34182-1-git-send-email-pbonzini@redhat.com>

Return true if the page was synced (and the TLB must be flushed)
and false if the page was zapped.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu.c         | 29 +++++++++++++----------------
 arch/x86/kvm/paging_tmpl.h |  4 ++--
 2 files changed, 15 insertions(+), 18 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 56fa1636c0cf..e3215cc89d97 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1886,20 +1886,20 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 		if ((_sp)->role.direct || (_sp)->role.invalid) {} else
 
 /* @sp->gfn should be write-protected at the call site */
-static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			   struct list_head *invalid_list)
+static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+			    struct list_head *invalid_list)
 {
 	if (sp->role.cr4_pae != !!is_pae(vcpu)) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
-		return 1;
+		return false;
 	}
 
-	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
+	if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
-		return 1;
+		return false;
 	}
 
-	return 0;
+	return true;
 }
 
 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
@@ -1917,14 +1917,14 @@ static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
-static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
-				   struct kvm_mmu_page *sp)
+static bool kvm_sync_page_transient(struct kvm_vcpu *vcpu,
+				    struct kvm_mmu_page *sp)
 {
 	LIST_HEAD(invalid_list);
 	int ret;
 
 	ret = __kvm_sync_page(vcpu, sp, &invalid_list);
-	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, !ret);
+	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, ret);
 
 	return ret;
 }
@@ -1936,7 +1936,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
 static void mmu_audit_disable(void) { }
 #endif
 
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			 struct list_head *invalid_list)
 {
 	kvm_unlink_unsync_page(vcpu->kvm, sp);
@@ -1955,8 +1955,7 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
 			continue;
 
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
-		if (!kvm_sync_page(vcpu, s, &invalid_list))
-			flush = true;
+		flush |= kvm_sync_page(vcpu, s, &invalid_list);
 	}
 
 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
@@ -2054,9 +2053,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 			kvm_flush_remote_tlbs(vcpu->kvm);
 
 		for_each_sp(pages, sp, parents, i) {
-			if (!kvm_sync_page(vcpu, sp, &invalid_list))
-				flush = true;
-
+			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
 			mmu_pages_clear_parents(&parents);
 		}
 		kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
@@ -2115,7 +2112,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		if (sp->role.word != role.word)
 			continue;
 
-		if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
+		if (sp->unsync && !kvm_sync_page_transient(vcpu, sp))
 			break;
 
 		if (sp->unsync_children)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 05827ff7bd2e..2e019b103249 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -938,7 +938,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
 		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 					       sizeof(pt_element_t)))
-			return -EINVAL;
+			return 0;
 
 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 			vcpu->kvm->tlbs_dirty++;
@@ -970,7 +970,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 			 host_writable);
 	}
 
-	return !nr_present;
+	return nr_present;
 }
 
 #undef pt_element_t
-- 
1.8.3.1

  parent reply	other threads:[~2016-02-24 13:18 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-24 13:17 [PATCH 00/12] KVM: MMU: cleanup around kvm_sync_page, and a few micro-optimizations Paolo Bonzini
2016-02-24 13:17 ` [PATCH 01/12] KVM: MMU: Fix ubsan warnings Paolo Bonzini
2016-02-24 13:42   ` Mike Krinkin
2016-02-24 13:43     ` Paolo Bonzini
2016-02-24 13:17 ` [PATCH 02/12] KVM: MMU: check kvm_mmu_pages and mmu_page_path indices Paolo Bonzini
2016-02-24 13:17 ` [PATCH 03/12] KVM: MMU: introduce kvm_mmu_flush_or_zap Paolo Bonzini
2016-02-24 13:17 ` [PATCH 04/12] KVM: MMU: move TLB flush out of __kvm_sync_page Paolo Bonzini
2016-02-24 13:17 ` [PATCH 05/12] KVM: MMU: use kvm_sync_page in kvm_sync_pages Paolo Bonzini
2016-02-24 13:17 ` [PATCH 06/12] KVM: MMU: cleanup __kvm_sync_page and its callers Paolo Bonzini
2016-02-24 13:17 ` Paolo Bonzini [this message]
2016-02-24 13:17 ` [PATCH 08/12] KVM: MMU: move zap/flush to kvm_mmu_get_page Paolo Bonzini
2016-02-25  7:32   ` Xiao Guangrong
2016-02-25  8:48     ` Paolo Bonzini
2016-02-24 13:17 ` [PATCH 09/12] KVM: MMU: coalesce zapping page after mmu_sync_children Paolo Bonzini
2016-02-25  2:15   ` Takuya Yoshikawa
2016-02-25  7:35     ` Xiao Guangrong
2016-02-25  8:49       ` Paolo Bonzini
2016-02-25  9:10         ` Xiao Guangrong
2016-02-25  9:55           ` Paolo Bonzini
2016-02-25  8:46     ` Paolo Bonzini
2016-02-24 13:17 ` [PATCH 10/12] KVM: mark memory barrier with smp_mb__after_atomic Paolo Bonzini
2016-02-24 13:17 ` [PATCH 11/12] KVM: MMU: simplify last_pte_bitmap Paolo Bonzini
2016-02-24 13:17 ` [PATCH 12/12] KVM: MMU: micro-optimize gpte_access Paolo Bonzini
2016-02-25  8:28 ` [PATCH 00/12] KVM: MMU: cleanup around kvm_sync_page, and a few micro-optimizations Xiao Guangrong
2016-02-25  8:49   ` Paolo Bonzini
2016-03-04 21:43 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1456319873-34182-8-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=guangrong.xiao@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=yoshikawa_takuya_b1@lab.ntt.co.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.