All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chao Peng <chao.p.peng@linux.intel.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	x86@kernel.org, "H . Peter Anvin" <hpa@zytor.com>,
	Hugh Dickins <hughd@google.com>, Jeff Layton <jlayton@kernel.org>,
	"J . Bruce Fields" <bfields@fieldses.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	luto@kernel.org, john.ji@intel.com, susie.li@intel.com,
	jun.nakajima@intel.com, dave.hansen@intel.com,
	ak@linux.intel.com, david@redhat.com
Subject: [RFC v2 PATCH 08/13] KVM: Rename hva memory invalidation code to cover fd-based offset
Date: Fri, 19 Nov 2021 21:47:34 +0800	[thread overview]
Message-ID: <20211119134739.20218-9-chao.p.peng@linux.intel.com> (raw)
In-Reply-To: <20211119134739.20218-1-chao.p.peng@linux.intel.com>

The poupose is for fd-based memslot reusing the same code for memory
invalidation. The code can be reused except changing 'hva' to more
neutral naming 'useraddr'.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 include/linux/kvm_host.h |  4 ++--
 virt/kvm/kvm_main.c      | 44 ++++++++++++++++++++--------------------
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e8646103356b..925c4d9f0a31 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1340,9 +1340,9 @@ static inline bool memslot_has_private(const struct kvm_memory_slot *slot)
 }
 
 static inline gfn_t
-hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
+useraddr_to_gfn_memslot(unsigned long useraddr, struct kvm_memory_slot *slot)
 {
-	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
+	gfn_t gfn_offset = (useraddr - slot->userspace_addr) >> PAGE_SHIFT;
 
 	return slot->base_gfn + gfn_offset;
 }
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b8673490d301..d9a6890dd18a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,16 +471,16 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
 			     unsigned long end);
 
-struct kvm_hva_range {
+struct kvm_useraddr_range {
 	unsigned long start;
 	unsigned long end;
 	pte_t pte;
-	hva_handler_t handler;
+	gfn_handler_t handler;
 	on_lock_fn_t on_lock;
 	bool flush_on_ret;
 	bool may_block;
@@ -499,8 +499,8 @@ static void kvm_null_fn(void)
 }
 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
 
-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
-						  const struct kvm_hva_range *range)
+static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
+					const struct kvm_useraddr_range *range)
 {
 	bool ret = false, locked = false;
 	struct kvm_gfn_range gfn_range;
@@ -518,12 +518,12 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
 		slots = __kvm_memslots(kvm, i);
 		kvm_for_each_memslot(slot, slots) {
-			unsigned long hva_start, hva_end;
+			unsigned long useraddr_start, useraddr_end;
 
-			hva_start = max(range->start, slot->userspace_addr);
-			hva_end = min(range->end, slot->userspace_addr +
+			useraddr_start = max(range->start, slot->userspace_addr);
+			useraddr_end = min(range->end, slot->userspace_addr +
 						  (slot->npages << PAGE_SHIFT));
-			if (hva_start >= hva_end)
+			if (useraddr_start >= useraddr_end)
 				continue;
 
 			/*
@@ -536,11 +536,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 			gfn_range.may_block = range->may_block;
 
 			/*
-			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
+			 * {gfn(page) | page intersects with [useraddr_start, useraddr_end)} =
 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
 			 */
-			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
-			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
+			gfn_range.start = useraddr_to_gfn_memslot(useraddr_start, slot);
+			gfn_range.end = useraddr_to_gfn_memslot(useraddr_end + PAGE_SIZE - 1, slot);
 			gfn_range.slot = slot;
 
 			if (!locked) {
@@ -571,10 +571,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
 						unsigned long start,
 						unsigned long end,
 						pte_t pte,
-						hva_handler_t handler)
+						gfn_handler_t handler)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range range = {
+	const struct kvm_useraddr_range range = {
 		.start		= start,
 		.end		= end,
 		.pte		= pte,
@@ -584,16 +584,16 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
 		.may_block	= false,
 	};
 
-	return __kvm_handle_hva_range(kvm, &range);
+	return __kvm_handle_useraddr_range(kvm, &range);
 }
 
 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
 							 unsigned long start,
 							 unsigned long end,
-							 hva_handler_t handler)
+							 gfn_handler_t handler)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range range = {
+	const struct kvm_useraddr_range range = {
 		.start		= start,
 		.end		= end,
 		.pte		= __pte(0),
@@ -603,7 +603,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
 		.may_block	= false,
 	};
 
-	return __kvm_handle_hva_range(kvm, &range);
+	return __kvm_handle_useraddr_range(kvm, &range);
 }
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 					struct mm_struct *mm,
@@ -661,7 +661,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 					const struct mmu_notifier_range *range)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range hva_range = {
+	const struct kvm_useraddr_range useraddr_range = {
 		.start		= range->start,
 		.end		= range->end,
 		.pte		= __pte(0),
@@ -685,7 +685,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 	kvm->mn_active_invalidate_count++;
 	spin_unlock(&kvm->mn_invalidate_lock);
 
-	__kvm_handle_hva_range(kvm, &hva_range);
+	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	return 0;
 }
@@ -712,7 +712,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 					const struct mmu_notifier_range *range)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range hva_range = {
+	const struct kvm_useraddr_range useraddr_range = {
 		.start		= range->start,
 		.end		= range->end,
 		.pte		= __pte(0),
@@ -723,7 +723,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 	};
 	bool wake;
 
-	__kvm_handle_hva_range(kvm, &hva_range);
+	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	/* Pairs with the increment in range_start(). */
 	spin_lock(&kvm->mn_invalidate_lock);
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Chao Peng <chao.p.peng@linux.intel.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	qemu-devel@nongnu.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
	jun.nakajima@intel.com, david@redhat.com,
	"J . Bruce Fields" <bfields@fieldses.org>,
	dave.hansen@intel.com, "H . Peter Anvin" <hpa@zytor.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	ak@linux.intel.com, Jonathan Corbet <corbet@lwn.net>,
	Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, Hugh Dickins <hughd@google.com>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	luto@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Jim Mattson <jmattson@google.com>,
	Sean Christopherson <seanjc@google.com>,
	susie.li@intel.com, Jeff Layton <jlayton@kernel.org>,
	john.ji@intel.com, Yu Zhang <yu.c.zhang@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [RFC v2 PATCH 08/13] KVM: Rename hva memory invalidation code to cover fd-based offset
Date: Fri, 19 Nov 2021 21:47:34 +0800	[thread overview]
Message-ID: <20211119134739.20218-9-chao.p.peng@linux.intel.com> (raw)
In-Reply-To: <20211119134739.20218-1-chao.p.peng@linux.intel.com>

The poupose is for fd-based memslot reusing the same code for memory
invalidation. The code can be reused except changing 'hva' to more
neutral naming 'useraddr'.

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 include/linux/kvm_host.h |  4 ++--
 virt/kvm/kvm_main.c      | 44 ++++++++++++++++++++--------------------
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e8646103356b..925c4d9f0a31 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1340,9 +1340,9 @@ static inline bool memslot_has_private(const struct kvm_memory_slot *slot)
 }
 
 static inline gfn_t
-hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
+useraddr_to_gfn_memslot(unsigned long useraddr, struct kvm_memory_slot *slot)
 {
-	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
+	gfn_t gfn_offset = (useraddr - slot->userspace_addr) >> PAGE_SHIFT;
 
 	return slot->base_gfn + gfn_offset;
 }
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b8673490d301..d9a6890dd18a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -471,16 +471,16 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
 	srcu_read_unlock(&kvm->srcu, idx);
 }
 
-typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
+typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
 			     unsigned long end);
 
-struct kvm_hva_range {
+struct kvm_useraddr_range {
 	unsigned long start;
 	unsigned long end;
 	pte_t pte;
-	hva_handler_t handler;
+	gfn_handler_t handler;
 	on_lock_fn_t on_lock;
 	bool flush_on_ret;
 	bool may_block;
@@ -499,8 +499,8 @@ static void kvm_null_fn(void)
 }
 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
 
-static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
-						  const struct kvm_hva_range *range)
+static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
+					const struct kvm_useraddr_range *range)
 {
 	bool ret = false, locked = false;
 	struct kvm_gfn_range gfn_range;
@@ -518,12 +518,12 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
 		slots = __kvm_memslots(kvm, i);
 		kvm_for_each_memslot(slot, slots) {
-			unsigned long hva_start, hva_end;
+			unsigned long useraddr_start, useraddr_end;
 
-			hva_start = max(range->start, slot->userspace_addr);
-			hva_end = min(range->end, slot->userspace_addr +
+			useraddr_start = max(range->start, slot->userspace_addr);
+			useraddr_end = min(range->end, slot->userspace_addr +
 						  (slot->npages << PAGE_SHIFT));
-			if (hva_start >= hva_end)
+			if (useraddr_start >= useraddr_end)
 				continue;
 
 			/*
@@ -536,11 +536,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
 			gfn_range.may_block = range->may_block;
 
 			/*
-			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
+			 * {gfn(page) | page intersects with [useraddr_start, useraddr_end)} =
 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
 			 */
-			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
-			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
+			gfn_range.start = useraddr_to_gfn_memslot(useraddr_start, slot);
+			gfn_range.end = useraddr_to_gfn_memslot(useraddr_end + PAGE_SIZE - 1, slot);
 			gfn_range.slot = slot;
 
 			if (!locked) {
@@ -571,10 +571,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
 						unsigned long start,
 						unsigned long end,
 						pte_t pte,
-						hva_handler_t handler)
+						gfn_handler_t handler)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range range = {
+	const struct kvm_useraddr_range range = {
 		.start		= start,
 		.end		= end,
 		.pte		= pte,
@@ -584,16 +584,16 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
 		.may_block	= false,
 	};
 
-	return __kvm_handle_hva_range(kvm, &range);
+	return __kvm_handle_useraddr_range(kvm, &range);
 }
 
 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
 							 unsigned long start,
 							 unsigned long end,
-							 hva_handler_t handler)
+							 gfn_handler_t handler)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range range = {
+	const struct kvm_useraddr_range range = {
 		.start		= start,
 		.end		= end,
 		.pte		= __pte(0),
@@ -603,7 +603,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
 		.may_block	= false,
 	};
 
-	return __kvm_handle_hva_range(kvm, &range);
+	return __kvm_handle_useraddr_range(kvm, &range);
 }
 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
 					struct mm_struct *mm,
@@ -661,7 +661,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 					const struct mmu_notifier_range *range)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range hva_range = {
+	const struct kvm_useraddr_range useraddr_range = {
 		.start		= range->start,
 		.end		= range->end,
 		.pte		= __pte(0),
@@ -685,7 +685,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 	kvm->mn_active_invalidate_count++;
 	spin_unlock(&kvm->mn_invalidate_lock);
 
-	__kvm_handle_hva_range(kvm, &hva_range);
+	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	return 0;
 }
@@ -712,7 +712,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 					const struct mmu_notifier_range *range)
 {
 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
-	const struct kvm_hva_range hva_range = {
+	const struct kvm_useraddr_range useraddr_range = {
 		.start		= range->start,
 		.end		= range->end,
 		.pte		= __pte(0),
@@ -723,7 +723,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 	};
 	bool wake;
 
-	__kvm_handle_hva_range(kvm, &hva_range);
+	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	/* Pairs with the increment in range_start(). */
 	spin_lock(&kvm->mn_invalidate_lock);
-- 
2.17.1



  parent reply	other threads:[~2021-11-19 13:49 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-19 13:47 [RFC v2 PATCH 00/13] KVM: mm: fd-based approach for supporting KVM guest private memory Chao Peng
2021-11-19 13:47 ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 01/13] mm/shmem: Introduce F_SEAL_GUEST Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:51   ` David Hildenbrand
2021-11-19 13:51     ` David Hildenbrand
2021-11-22 13:59     ` Kirill A. Shutemov
2021-11-22 13:59       ` Kirill A. Shutemov
2021-11-19 15:19   ` Jason Gunthorpe
2021-11-19 15:19     ` Jason Gunthorpe
2021-11-19 15:39     ` David Hildenbrand
2021-11-19 15:39       ` David Hildenbrand
2021-11-19 16:00       ` Jason Gunthorpe
2021-11-19 16:00         ` Jason Gunthorpe
2021-11-22  9:26         ` David Hildenbrand
2021-11-22  9:26           ` David Hildenbrand
2021-11-22 13:31           ` Jason Gunthorpe
2021-11-22 13:31             ` Jason Gunthorpe
2021-11-22 13:35             ` David Hildenbrand
2021-11-22 13:35               ` David Hildenbrand
2021-11-22 14:01               ` Jason Gunthorpe
2021-11-22 14:01                 ` Jason Gunthorpe
2021-11-22 14:57                 ` David Hildenbrand
2021-11-22 14:57                   ` David Hildenbrand
2021-11-22 15:09                   ` Jason Gunthorpe
2021-11-22 15:09                     ` Jason Gunthorpe
2021-11-22 15:15                     ` David Hildenbrand
2021-11-22 15:15                       ` David Hildenbrand
2021-11-19 19:18       ` Sean Christopherson
2021-11-19 19:47         ` Jason Gunthorpe
2021-11-19 19:47           ` Jason Gunthorpe
2021-11-19 22:21           ` Sean Christopherson
2021-11-19 23:33             ` Jason Gunthorpe
2021-11-19 23:33               ` Jason Gunthorpe
2021-11-20  1:23               ` Sean Christopherson
2021-11-21  0:05                 ` Jason Gunthorpe
2021-11-21  0:05                   ` Jason Gunthorpe
2021-11-23  9:06       ` Paolo Bonzini
2021-11-23  9:06         ` Paolo Bonzini
2021-11-23 14:33         ` Chao Peng
2021-11-23 14:33           ` Chao Peng
2021-11-23 15:20         ` David Hildenbrand
2021-11-23 15:20           ` David Hildenbrand
2021-11-23 17:17         ` Jason Gunthorpe
2021-11-23 17:17           ` Jason Gunthorpe
2021-11-23  8:54   ` Paolo Bonzini
2021-11-23  8:54     ` Paolo Bonzini
2021-12-03  1:11   ` Andy Lutomirski
2021-12-03  1:11     ` Andy Lutomirski
2021-11-19 13:47 ` [RFC v2 PATCH 02/13] KVM: Add KVM_EXIT_MEMORY_ERROR exit Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 03/13] KVM: Extend kvm_userspace_memory_region to support fd based memslot Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 04/13] KVM: Add fd-based memslot data structure and utils Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-23  8:41   ` Paolo Bonzini
2021-11-23  8:41     ` Paolo Bonzini
2021-11-23 14:30     ` Chao Peng
2021-11-23 14:30       ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 05/13] KVM: Implement fd-based memory using new memfd interfaces Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 06/13] KVM: Register/unregister memfd backed memslot Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-25 16:55   ` Steven Price
2021-11-25 16:55     ` Steven Price
2021-11-19 13:47 ` [RFC v2 PATCH 07/13] KVM: Handle page fault for fd based memslot Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-20  1:55   ` Yao Yuan
2021-11-20  1:55     ` Yao Yuan
2021-11-22  9:18     ` Chao Peng
2021-11-22  9:18       ` Chao Peng
2021-11-19 13:47 ` Chao Peng [this message]
2021-11-19 13:47   ` [RFC v2 PATCH 08/13] KVM: Rename hva memory invalidation code to cover fd-based offset Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 09/13] KVM: Introduce kvm_memfd_invalidate_range Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-23  8:46   ` Paolo Bonzini
2021-11-23  8:46     ` Paolo Bonzini
2021-11-23 14:24     ` Chao Peng
2021-11-23 14:24       ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 10/13] KVM: Match inode for invalidation of fd-based slot Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 11/13] KVM: Add kvm_map_gfn_range Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 12/13] KVM: Introduce kvm_memfd_fallocate_range Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-19 13:47 ` [RFC v2 PATCH 13/13] KVM: Enable memfd based page invalidation/fallocate Chao Peng
2021-11-19 13:47   ` Chao Peng
2021-11-22 14:16   ` Kirill A. Shutemov
2021-11-22 14:16     ` Kirill A. Shutemov
2021-11-23  1:06     ` Chao Peng
2021-11-23  1:06       ` Chao Peng
2021-11-23  9:09       ` Paolo Bonzini
2021-11-23  9:09         ` Paolo Bonzini
2021-11-23 15:00         ` Chao Peng
2021-11-23 15:00           ` Chao Peng
2021-11-23  8:51   ` Paolo Bonzini
2021-11-23  8:51     ` Paolo Bonzini
2021-12-03  1:08 ` [RFC v2 PATCH 00/13] KVM: mm: fd-based approach for supporting KVM guest private memory Andy Lutomirski
2021-12-03  1:08   ` Andy Lutomirski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211119134739.20218-9-chao.p.peng@linux.intel.com \
    --to=chao.p.peng@linux.intel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=bfields@fieldses.org \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@intel.com \
    --cc=david@redhat.com \
    --cc=hpa@zytor.com \
    --cc=hughd@google.com \
    --cc=jlayton@kernel.org \
    --cc=jmattson@google.com \
    --cc=john.ji@intel.com \
    --cc=joro@8bytes.org \
    --cc=jun.nakajima@intel.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=seanjc@google.com \
    --cc=susie.li@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.