All of lore.kernel.org
 help / color / mirror / Atom feed
From: lantianyu1986@gmail.com
To: unlisted-recipients:; (no To-header on input)
Cc: Lan Tianyu <Tianyu.Lan@microsoft.com>,
	christoffer.dall@arm.com, marc.zyngier@arm.com,
	linux@armlinux.org.uk, catalin.marinas@arm.com,
	will.deacon@arm.com, jhogan@kernel.org, ralf@linux-mips.org,
	paul.burton@mips.com, paulus@ozlabs.org,
	benh@kernel.crashing.org, mpe@ellerman.id.au,
	pbonzini@redhat.com, rkrcmar@redhat.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, hpa@zytor.com, x86@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	linux-mips@vger.kernel.org, kvm-ppc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org,
	michael.h.kelley@microsoft.com, kys@microsoft.com,
	vkuznets@redhat.com
Subject: [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list
Date: Sat,  2 Feb 2019 09:38:20 +0800	[thread overview]
Message-ID: <20190202013825.51261-5-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20190202013825.51261-1-Tianyu.Lan@microsoft.com>

From: Lan Tianyu <Tianyu.Lan@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cafd3f95ab..d57574b49823 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 
 	range.start_gfn = start_gfn;
 	range.pages = pages;
+	range.flush_list = NULL;
+
+	kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+		struct hlist_head *flush_list)
+{
+	struct kvm_tlb_range range;
+
+	if (hlist_empty(flush_list))
+		return;
+
+	range.flush_list = flush_list;
 
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	HLIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			if (sp->last_level)
+				hlist_add_head(&sp->flush_link, &flush_list);
+
+		kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4


WARNING: multiple messages have this Message-ID (diff)
From: lantianyu1986@gmail.com
Cc: kvm@vger.kernel.org, catalin.marinas@arm.com,
	will.deacon@arm.com, paulus@ozlabs.org, hpa@zytor.com,
	kys@microsoft.com, kvmarm@lists.cs.columbia.edu,
	mpe@ellerman.id.au, x86@kernel.org, linux@armlinux.org.uk,
	michael.h.kelley@microsoft.com, mingo@redhat.com,
	benh@kernel.crashing.org, jhogan@kernel.org,
	linux-mips@vger.kernel.org, Lan Tianyu <Tianyu.Lan@microsoft.com>,
	marc.zyngier@arm.com, kvm-ppc@vger.kernel.org, bp@alien8.de,
	tglx@linutronix.de, linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, ralf@linux-mips.org,
	paul.burton@mips.com, pbonzini@redhat.com, vkuznets@redhat.com,
	linuxppc-dev@lists.ozlabs.org
Subject: [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list
Date: Sat,  2 Feb 2019 09:38:20 +0800	[thread overview]
Message-ID: <20190202013825.51261-5-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20190202013825.51261-1-Tianyu.Lan@microsoft.com>

From: Lan Tianyu <Tianyu.Lan@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cafd3f95ab..d57574b49823 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 
 	range.start_gfn = start_gfn;
 	range.pages = pages;
+	range.flush_list = NULL;
+
+	kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+		struct hlist_head *flush_list)
+{
+	struct kvm_tlb_range range;
+
+	if (hlist_empty(flush_list))
+		return;
+
+	range.flush_list = flush_list;
 
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	HLIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			if (sp->last_level)
+				hlist_add_head(&sp->flush_link, &flush_list);
+
+		kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4

WARNING: multiple messages have this Message-ID (diff)
From: lantianyu1986@gmail.com
Cc: kvm@vger.kernel.org, rkrcmar@redhat.com, catalin.marinas@arm.com,
	will.deacon@arm.com, christoffer.dall@arm.com, hpa@zytor.com,
	kys@microsoft.com, kvmarm@lists.cs.columbia.edu, x86@kernel.org,
	linux@armlinux.org.uk, michael.h.kelley@microsoft.com,
	mingo@redhat.com, jhogan@kernel.org, linux-mips@vger.kernel.org,
	Lan Tianyu <Tianyu.Lan@microsoft.com>,
	marc.zyngier@arm.com, kvm-ppc@vger.kernel.org, bp@alien8.de,
	tglx@linutronix.de, linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, ralf@linux-mips.org,
	paul.burton@mips.com, pbonzini@redhat.com, vkuznets@redhat.com,
	linuxppc-dev@lists.ozlabs.org
Subject: [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list
Date: Sat,  2 Feb 2019 09:38:20 +0800	[thread overview]
Message-ID: <20190202013825.51261-5-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20190202013825.51261-1-Tianyu.Lan@microsoft.com>

From: Lan Tianyu <Tianyu.Lan@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cafd3f95ab..d57574b49823 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 
 	range.start_gfn = start_gfn;
 	range.pages = pages;
+	range.flush_list = NULL;
+
+	kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+		struct hlist_head *flush_list)
+{
+	struct kvm_tlb_range range;
+
+	if (hlist_empty(flush_list))
+		return;
+
+	range.flush_list = flush_list;
 
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	HLIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			if (sp->last_level)
+				hlist_add_head(&sp->flush_link, &flush_list);
+
+		kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4


WARNING: multiple messages have this Message-ID (diff)
From: lantianyu1986@gmail.com
Cc: kvm@vger.kernel.org, rkrcmar@redhat.com, catalin.marinas@arm.com,
	will.deacon@arm.com, christoffer.dall@arm.com, paulus@ozlabs.org,
	hpa@zytor.com, kys@microsoft.com, kvmarm@lists.cs.columbia.edu,
	mpe@ellerman.id.au, x86@kernel.org, linux@armlinux.org.uk,
	michael.h.kelley@microsoft.com, mingo@redhat.com,
	benh@kernel.crashing.org, jhogan@kernel.org,
	linux-mips@vger.kernel.org, Lan Tianyu <Tianyu.Lan@microsoft.com>,
	marc.zyngier@arm.com, kvm-ppc@vger.kernel.org, bp@alien8.de,
	tglx@linutronix.de, linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, ralf@linux-mips.org,
	paul.burton@mips.com, pbonzini@redhat.com, vkuznets@redhat.com,
	linuxppc-dev@lists.ozlabs.org
Subject: [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list
Date: Sat,  2 Feb 2019 09:38:20 +0800	[thread overview]
Message-ID: <20190202013825.51261-5-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20190202013825.51261-1-Tianyu.Lan@microsoft.com>

From: Lan Tianyu <Tianyu.Lan@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cafd3f95ab..d57574b49823 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 
 	range.start_gfn = start_gfn;
 	range.pages = pages;
+	range.flush_list = NULL;
+
+	kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+		struct hlist_head *flush_list)
+{
+	struct kvm_tlb_range range;
+
+	if (hlist_empty(flush_list))
+		return;
+
+	range.flush_list = flush_list;
 
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	HLIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			if (sp->last_level)
+				hlist_add_head(&sp->flush_link, &flush_list);
+
+		kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: lantianyu1986@gmail.com
Cc: Lan Tianyu <Tianyu.Lan@microsoft.com>,
	christoffer.dall@arm.com, marc.zyngier@arm.com,
	linux@armlinux.org.uk, catalin.marinas@arm.com,
	will.deacon@arm.com, jhogan@kernel.org, ralf@linux-mips.org,
	paul.burton@mips.com, paulus@ozlabs.org,
	benh@kernel.crashing.org, mpe@ellerman.id.au,
	pbonzini@redhat.com, rkrcmar@redhat.com, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, hpa@zytor.com, x86@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org,
	linux-mips@vger.kernel.org, kvm-ppc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org,
	michael.h.kelley@microsoft.com, kys@microsoft.com,
	vkuznets@redhat.com
Subject: [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list
Date: Sat, 02 Feb 2019 01:38:20 +0000	[thread overview]
Message-ID: <20190202013825.51261-5-Tianyu.Lan@microsoft.com> (raw)
In-Reply-To: <20190202013825.51261-1-Tianyu.Lan@microsoft.com>

From: Lan Tianyu <Tianyu.Lan@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
---
 arch/x86/kvm/mmu.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70cafd3f95ab..d57574b49823 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,20 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 
 	range.start_gfn = start_gfn;
 	range.pages = pages;
+	range.flush_list = NULL;
+
+	kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+		struct hlist_head *flush_list)
+{
+	struct kvm_tlb_range range;
+
+	if (hlist_empty(flush_list))
+		return;
+
+	range.flush_list = flush_list;
 
 	kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2722,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list)
 {
 	struct kvm_mmu_page *sp, *nsp;
+	HLIST_HEAD(flush_list);
 
 	if (list_empty(invalid_list))
 		return;
@@ -2721,7 +2736,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
 	 * guest mode and/or lockless shadow page table walks.
 	 */
-	kvm_flush_remote_tlbs(kvm);
+	if (kvm_available_flush_tlb_with_range()) {
+		list_for_each_entry(sp, invalid_list, link)
+			if (sp->last_level)
+				hlist_add_head(&sp->flush_link, &flush_list);
+
+		kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+	} else {
+		kvm_flush_remote_tlbs(kvm);
+	}
 
 	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
 		WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4

  parent reply	other threads:[~2019-02-02  1:39 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-02  1:38 [PATCH V2 00/10] X86/KVM/Hyper-V: Add HV ept tlb range list flush support in KVM lantianyu1986
2019-02-02  1:38 ` lantianyu1986
2019-02-02  1:38 ` lantianyu1986
2019-02-02  1:38 ` lantianyu1986
2019-02-02  1:38 ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 1/10] X86/Hyper-V: Add parameter offset for hyperv_fill_flush_guest_mapping_list() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 2/10] KVM/VMX: Fill range list in kvm_fill_hv_flush_list_func() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 3/10] KVM/MMU: Add last_level in the struct mmu_spte_page lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-14 16:12   ` Paolo Bonzini
2019-02-14 16:12     ` Paolo Bonzini
2019-02-14 16:12     ` Paolo Bonzini
2019-02-14 16:12     ` Paolo Bonzini
2019-02-14 16:32   ` Paolo Bonzini
2019-02-14 16:32     ` Paolo Bonzini
2019-02-14 16:32     ` Paolo Bonzini
2019-02-14 16:32     ` Paolo Bonzini
2019-02-14 16:32     ` Paolo Bonzini
2019-02-15 15:05     ` Tianyu Lan
2019-02-15 15:05       ` Tianyu Lan
2019-02-15 15:05       ` Tianyu Lan
2019-02-15 15:05       ` Tianyu Lan
2019-02-15 15:05       ` Tianyu Lan
2019-02-15 15:22       ` Paolo Bonzini
2019-02-15 15:22         ` Paolo Bonzini
2019-02-15 15:22         ` Paolo Bonzini
2019-02-15 15:22         ` Paolo Bonzini
2019-02-15 15:22         ` Paolo Bonzini
2019-02-22 15:16         ` Tianyu Lan
2019-02-22 15:16           ` Tianyu Lan
2019-02-22 15:16           ` Tianyu Lan
2019-02-22 15:16           ` Tianyu Lan
2019-02-22 15:16           ` Tianyu Lan
2019-02-02  1:38 ` lantianyu1986 [this message]
2019-02-02  1:38   ` [PATCH V2 4/10] KVM/MMU: Introduce tlb flush with range list lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 5/10] KVM/MMU: Flush tlb with range list in sync_page() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 6/10] KVM/MMU: Flush tlb directly in the kvm_mmu_slot_gfn_write_protect() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 7/10] KVM: Add kvm_get_memslot() to get memslot via slot id lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 8/10] KVM: Use tlb range flush in the kvm_vm_ioctl_get/clear_dirty_log() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38 ` [PATCH V2 9/10] KVM: Add flush parameter for kvm_age_hva() lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-02  1:38   ` lantianyu1986
2019-02-14 16:40 ` [PATCH V2 00/10] X86/KVM/Hyper-V: Add HV ept tlb range list flush support in KVM Paolo Bonzini
2019-02-14 16:40   ` Paolo Bonzini
2019-02-14 16:40   ` Paolo Bonzini
2019-02-14 16:40   ` Paolo Bonzini
2019-02-14 16:40   ` Paolo Bonzini
  -- strict thread matches above, loose matches on Subject: below --
2019-02-01 14:48 lantianyu1986
2019-02-01 14:48 ` lantianyu1986
2019-02-01 14:48 ` lantianyu1986
2019-02-01 14:48 ` lantianyu1986
2019-02-01 14:48 ` lantianyu1986

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190202013825.51261-5-Tianyu.Lan@microsoft.com \
    --to=lantianyu1986@gmail.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=hpa@zytor.com \
    --cc=jhogan@kernel.org \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=kys@microsoft.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=marc.zyngier@arm.com \
    --cc=michael.h.kelley@microsoft.com \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=paul.burton@mips.com \
    --cc=paulus@ozlabs.org \
    --cc=pbonzini@redhat.com \
    --cc=ralf@linux-mips.org \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=will.deacon@arm.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.