All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tianyu Lan <Tianyu.Lan@microsoft.com>
To: unlisted-recipients:; (no To-header on input)
Cc: Tianyu Lan <Tianyu.Lan@microsoft.com>,
	KY Srinivasan <kys@microsoft.com>,
	Haiyang Zhang <haiyangz@microsoft.com>,
	Stephen Hemminger <sthemmin@microsoft.com>,
	"tglx@linutronix.de" <tglx@linutronix.de>,
	"mingo@redhat.com" <mingo@redhat.com>,
	"hpa@zytor.com" <hpa@zytor.com>,
	"x86@kernel.org" <x86@kernel.org>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"rkrcmar@redhat.com" <rkrcmar@redhat.com>,
	"devel@linuxdriverproject.org" <devel@linuxdriverproject.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"Michael Kelley (EOSG)" <Michael.H.Kelley@microsoft.com>,
	vkuznets <vkuznets@redhat.com>,
	Jork Loeser <Jork.Loeser@microsoft.com>
Subject: [PATCH 9/13] KVM/MMU: Replace tlb flush function with range list flush function
Date: Mon, 10 Sep 2018 08:39:21 +0000	[thread overview]
Message-ID: <20180910083806.65177-10-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20180910083806.65177-1-Tianyu.Lan@microsoft.com>

This patch is to use range list flush function in the
mmu_sync_children(), kvm_mmu_commit_zap_page() and
FNAME(sync_page)().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c         | 26 +++++++++++++++++++++++---
 arch/x86/kvm/paging_tmpl.h |  5 ++++-
 2 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 73e19ce589e7..a071da797a15 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1092,6 +1092,13 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
 	}
 }
 
+static void kvm_mmu_queue_flush_request(struct kvm_mmu_page *sp,
+		struct list_head *flush_list)
+{
+	if (sp->sptep && is_last_spte(*sp->sptep, sp->role.level))
+		list_add(&sp->flush_link, flush_list);
+}
+
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	update_gfn_disallow_lpage_count(slot, gfn, 1);
@@ -2373,12 +2380,16 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 
 	while (mmu_unsync_walk(parent, &pages)) {
 		bool protected = false;
+		LIST_HEAD(flush_list);
 
-		for_each_sp(pages, sp, parents, i)
+		for_each_sp(pages, sp, parents, i) {
 			protected |= rmap_write_protect(vcpu, sp->gfn);
+			kvm_mmu_queue_flush_request(sp, &flush_list);
+		}
 
 		if (protected) {
-			kvm_flush_remote_tlbs(vcpu->kvm);
+			kvm_flush_remote_tlbs_with_list(vcpu->kvm,
+					&flush_list);
 			flush = false;
 		}
 
@@ -2715,6 +2726,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	LIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2728,7 +2740,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			kvm_mmu_queue_flush_request(sp, &flush_list);
+
+		if (!list_empty(&flush_list))
+			kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bb8c2cdf70c3..aa450e0596a4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -976,6 +976,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	bool host_writable;
 	gpa_t first_pte_gpa;
 	int set_spte_ret = 0;
+	LIST_HEAD(flush_list);
 
 	/* direct kvm_mmu_page can not be unsync. */
 	BUG_ON(sp->role.direct);
@@ -1036,10 +1037,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 					 pte_access, PT_PAGE_TABLE_LEVEL,
 					 gfn, spte_to_pfn(sp->spt[i]),
 					 true, false, host_writable);
+		if (set_spte_ret && kvm_available_flush_tlb_with_range())
+			kvm_mmu_queue_flush_request(sp, &flush_list);
 	}
 
 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
-		kvm_flush_remote_tlbs(vcpu->kvm);
+		kvm_flush_remote_tlbs_with_list(vcpu->kvm, &flush_list);
 
 	return nr_present;
 }
-- 
2.14.4

WARNING: multiple messages have this Message-ID (diff)
From: Tianyu Lan <Tianyu.Lan@microsoft.com>
Cc: Stephen Hemminger <sthemmin@microsoft.com>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"pbonzini@redhat.com" <pbonzini@redhat.com>,
	"rkrcmar@redhat.com" <rkrcmar@redhat.com>,
	Jork Loeser <Jork.Loeser@microsoft.com>,
	Haiyang Zhang <haiyangz@microsoft.com>,
	"x86@kernel.org" <x86@kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"devel@linuxdriverproject.org" <devel@linuxdriverproject.org>,
	"Michael Kelley \(EOSG\)" <Michael.H.Kelley@microsoft.com>,
	"mingo@redhat.com" <mingo@redhat.com>,
	"hpa@zytor.com" <hpa@zytor.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	"tglx@linutronix.de" <tglx@linutronix.de>,
	vkuznets <vkuznets@redhat.com>
Subject: [PATCH 9/13] KVM/MMU: Replace tlb flush function with range list flush function
Date: Mon, 10 Sep 2018 08:39:21 +0000	[thread overview]
Message-ID: <20180910083806.65177-10-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20180910083806.65177-1-Tianyu.Lan@microsoft.com>

This patch is to use range list flush function in the
mmu_sync_children(), kvm_mmu_commit_zap_page() and
FNAME(sync_page)().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c         | 26 +++++++++++++++++++++++---
 arch/x86/kvm/paging_tmpl.h |  5 ++++-
 2 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 73e19ce589e7..a071da797a15 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1092,6 +1092,13 @@ static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
 	}
 }
 
+static void kvm_mmu_queue_flush_request(struct kvm_mmu_page *sp,
+		struct list_head *flush_list)
+{
+	if (sp->sptep && is_last_spte(*sp->sptep, sp->role.level))
+		list_add(&sp->flush_link, flush_list);
+}
+
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	update_gfn_disallow_lpage_count(slot, gfn, 1);
@@ -2373,12 +2380,16 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
 
 	while (mmu_unsync_walk(parent, &pages)) {
 		bool protected = false;
+		LIST_HEAD(flush_list);
 
-		for_each_sp(pages, sp, parents, i)
+		for_each_sp(pages, sp, parents, i) {
 			protected |= rmap_write_protect(vcpu, sp->gfn);
+			kvm_mmu_queue_flush_request(sp, &flush_list);
+		}
 
 		if (protected) {
-			kvm_flush_remote_tlbs(vcpu->kvm);
+			kvm_flush_remote_tlbs_with_list(vcpu->kvm,
+					&flush_list);
 			flush = false;
 		}
 
@@ -2715,6 +2726,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	LIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2728,7 +2740,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			kvm_mmu_queue_flush_request(sp, &flush_list);
+
+		if (!list_empty(&flush_list))
+			kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bb8c2cdf70c3..aa450e0596a4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -976,6 +976,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	bool host_writable;
 	gpa_t first_pte_gpa;
 	int set_spte_ret = 0;
+	LIST_HEAD(flush_list);
 
 	/* direct kvm_mmu_page can not be unsync. */
 	BUG_ON(sp->role.direct);
@@ -1036,10 +1037,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 					 pte_access, PT_PAGE_TABLE_LEVEL,
 					 gfn, spte_to_pfn(sp->spt[i]),
 					 true, false, host_writable);
+		if (set_spte_ret && kvm_available_flush_tlb_with_range())
+			kvm_mmu_queue_flush_request(sp, &flush_list);
 	}
 
 	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
-		kvm_flush_remote_tlbs(vcpu->kvm);
+		kvm_flush_remote_tlbs_with_list(vcpu->kvm, &flush_list);
 
 	return nr_present;
 }
-- 
2.14.4

  parent reply	other threads:[~2018-09-10  8:39 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-10  8:38 [PATCH 00/13] x86/KVM/Hyper-v: Add HV ept tlb range flush hypercall support in KVM Tianyu Lan
2018-09-10  8:38 ` [PATCH 1/13] KVM: Add tlb_remote_flush_with_range callback in kvm_x86_ops Tianyu Lan
2018-09-10 14:21   ` Sean Christopherson
2018-09-12 13:40     ` Tianyu Lan
2018-09-12 13:40       ` Tianyu Lan
2018-09-10  8:38 ` [PATCH 2/13] KVM/MMU: Add tlb flush with range helper function Tianyu Lan
2018-09-10  8:38 ` [PATCH 3/13] KVM: Replace old tlb flush function with new one to flush a specified range Tianyu Lan
2018-09-10  8:38 ` [PATCH 4/13] KVM/MMU: Flush tlb directly in the kvm_handle_hva_range() Tianyu Lan
2018-09-10  8:38 ` [PATCH 5/13] KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range() Tianyu Lan
2018-09-10  8:39 ` [PATCH 6/13] KVM/MMU: Flush tlb directly in kvm_mmu_zap_collapsible_spte() Tianyu Lan
2018-09-10  8:39   ` Tianyu Lan
2018-09-10  8:39 ` [PATCH 7/13] KVM: Add flush_link and parent_pte in the struct kvm_mmu_page Tianyu Lan
2018-09-10  8:39 ` [PATCH 8/13] KVM: Add spte's point " Tianyu Lan
2018-09-10  8:39   ` Tianyu Lan
2018-09-10  8:39 ` Tianyu Lan [this message]
2018-09-10  8:39   ` [PATCH 9/13] KVM/MMU: Replace tlb flush function with range list flush function Tianyu Lan
2018-09-10  8:39 ` [PATCH 10/13] x86/hyper-v: Add HvFlushGuestAddressList hypercall support Tianyu Lan
2018-09-10  8:39   ` Tianyu Lan
2018-09-12  0:22   ` Michael Kelley (EOSG)
2018-09-12 13:31     ` Tianyu Lan
2018-09-10  8:39 ` [PATCH 11/13] x86/Hyper-v: Add trace in the hyperv_nested_flush_guest_mapping_range() Tianyu Lan
2018-09-10  8:39   ` Tianyu Lan
2018-09-10  8:39 ` [PATCH 12/13] KVM/VMX: Change hv flush logic when ept tables are mismatched Tianyu Lan
2018-09-10  8:39 ` [PATCH 13/13] KVM/VMX: Add hv tlb range flush support Tianyu Lan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180910083806.65177-10-Tianyu.Lan@microsoft.com \
    --to=tianyu.lan@microsoft.com \
    --cc=Jork.Loeser@microsoft.com \
    --cc=Michael.H.Kelley@microsoft.com \
    --cc=devel@linuxdriverproject.org \
    --cc=haiyangz@microsoft.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=kys@microsoft.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=sthemmin@microsoft.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.