All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chao Peng <chao.p.peng@linux.intel.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	x86@kernel.org, "H . Peter Anvin" <hpa@zytor.com>,
	Hugh Dickins <hughd@google.com>, Jeff Layton <jlayton@kernel.org>,
	"J . Bruce Fields" <bfields@fieldses.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Yu Zhang <yu.c.zhang@linux.intel.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	luto@kernel.org, john.ji@intel.com, susie.li@intel.com,
	jun.nakajima@intel.com, dave.hansen@intel.com,
	ak@linux.intel.com, david@redhat.com
Subject: [PATCH v3 09/15] KVM: Implement fd-based memory invalidation
Date: Tue, 21 Dec 2021 23:11:19 +0800	[thread overview]
Message-ID: <20211221151125.19446-10-chao.p.peng@linux.intel.com> (raw)
In-Reply-To: <20211221151125.19446-1-chao.p.peng@linux.intel.com>

KVM gets notified when userspace punches a hole in a fd which is used
for guest memory. KVM should invalidate the mapping in the second MMU
page tables. This is the same logic as MMU notifier invalidation except
the fd related information is carried around to indicate the memory
range. KVM hence can reuse most of existing MMU notifier invalidation
code including looping through the memslots and then calling into
kvm_unmap_gfn_range() which should do whatever needed for fd-based
memory unmapping (e.g. for private memory managed by TDX it may need
call into SEAM-MODULE).

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 include/linux/kvm_host.h |  8 ++++-
 virt/kvm/kvm_main.c      | 69 +++++++++++++++++++++++++++++++---------
 virt/kvm/memfd.c         |  2 ++
 3 files changed, 63 insertions(+), 16 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7279f46f35d3..d9573305e273 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -229,7 +229,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || defined(CONFIG_MEMFD_OPS)
 struct kvm_gfn_range {
 	struct kvm_memory_slot *slot;
 	gfn_t start;
@@ -1874,4 +1874,10 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
 /* Max number of entries allowed for each kvm dirty ring */
 #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
 
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+			       unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMFD_OPS */
+
+
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 59f01e68337b..d84cb867b686 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -453,7 +453,8 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
 
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#if defined(CONFIG_MEMFD_OPS) ||\
+	(defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
 
 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
@@ -564,6 +565,30 @@ static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
 	/* The notifiers are averse to booleans. :-( */
 	return (int)ret;
 }
+
+static void mn_active_invalidate_count_inc(struct kvm *kvm)
+{
+	spin_lock(&kvm->mn_invalidate_lock);
+	kvm->mn_active_invalidate_count++;
+	spin_unlock(&kvm->mn_invalidate_lock);
+
+}
+
+static void mn_active_invalidate_count_dec(struct kvm *kvm)
+{
+	bool wake;
+
+	spin_lock(&kvm->mn_invalidate_lock);
+	wake = (--kvm->mn_active_invalidate_count == 0);
+	spin_unlock(&kvm->mn_invalidate_lock);
+
+	/*
+	 * There can only be one waiter, since the wait happens under
+	 * slots_lock.
+	 */
+	if (wake)
+		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+}
 #endif
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
@@ -701,9 +726,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 	 *
 	 * Pairs with the decrement in range_end().
 	 */
-	spin_lock(&kvm->mn_invalidate_lock);
-	kvm->mn_active_invalidate_count++;
-	spin_unlock(&kvm->mn_invalidate_lock);
+	mn_active_invalidate_count_inc(kvm);
 
 	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
@@ -742,21 +765,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 		.may_block	= mmu_notifier_range_blockable(range),
 		.inode		= NULL,
 	};
-	bool wake;
 
 	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	/* Pairs with the increment in range_start(). */
-	spin_lock(&kvm->mn_invalidate_lock);
-	wake = (--kvm->mn_active_invalidate_count == 0);
-	spin_unlock(&kvm->mn_invalidate_lock);
-
-	/*
-	 * There can only be one waiter, since the wait happens under
-	 * slots_lock.
-	 */
-	if (wake)
-		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+	mn_active_invalidate_count_dec(kvm);
 
 	BUG_ON(kvm->mmu_notifier_count < 0);
 }
@@ -841,6 +854,32 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
 
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+			       unsigned long start, unsigned long end)
+{
+	int ret;
+	const struct kvm_useraddr_range useraddr_range = {
+		.start		= start,
+		.end		= end,
+		.pte		= __pte(0),
+		.handler	= kvm_unmap_gfn_range,
+		.on_lock	= (void *)kvm_null_fn,
+		.flush_on_ret	= true,
+		.may_block	= false,
+		.inode		= inode,
+	};
+
+
+	/* Prevent memslot modification */
+	mn_active_invalidate_count_inc(kvm);
+	ret = __kvm_handle_useraddr_range(kvm, &useraddr_range);
+	mn_active_invalidate_count_dec(kvm);
+
+	return ret;
+}
+#endif /* CONFIG_MEMFD_OPS */
+
 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 static int kvm_pm_notifier_call(struct notifier_block *bl,
 				unsigned long state,
diff --git a/virt/kvm/memfd.c b/virt/kvm/memfd.c
index 96a1a5bee0f7..d092a9b6f496 100644
--- a/virt/kvm/memfd.c
+++ b/virt/kvm/memfd.c
@@ -16,6 +16,8 @@ static const struct memfd_pfn_ops *memfd_ops;
 static void memfd_invalidate_page_range(struct inode *inode, void *owner,
 					pgoff_t start, pgoff_t end)
 {
+	kvm_memfd_invalidate_range(owner, inode, start >> PAGE_SHIFT,
+						 end >> PAGE_SHIFT);
 }
 
 static void memfd_fallocate(struct inode *inode, void *owner,
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Chao Peng <chao.p.peng@linux.intel.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	qemu-devel@nongnu.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
	jun.nakajima@intel.com, david@redhat.com,
	"J . Bruce Fields" <bfields@fieldses.org>,
	dave.hansen@intel.com, "H . Peter Anvin" <hpa@zytor.com>,
	Chao Peng <chao.p.peng@linux.intel.com>,
	ak@linux.intel.com, Jonathan Corbet <corbet@lwn.net>,
	Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, Hugh Dickins <hughd@google.com>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	luto@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Jim Mattson <jmattson@google.com>,
	Sean Christopherson <seanjc@google.com>,
	susie.li@intel.com, Jeff Layton <jlayton@kernel.org>,
	john.ji@intel.com, Yu Zhang <yu.c.zhang@linux.intel.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v3 09/15] KVM: Implement fd-based memory invalidation
Date: Tue, 21 Dec 2021 23:11:19 +0800	[thread overview]
Message-ID: <20211221151125.19446-10-chao.p.peng@linux.intel.com> (raw)
In-Reply-To: <20211221151125.19446-1-chao.p.peng@linux.intel.com>

KVM gets notified when userspace punches a hole in a fd which is used
for guest memory. KVM should invalidate the mapping in the second MMU
page tables. This is the same logic as MMU notifier invalidation except
the fd related information is carried around to indicate the memory
range. KVM hence can reuse most of existing MMU notifier invalidation
code including looping through the memslots and then calling into
kvm_unmap_gfn_range() which should do whatever needed for fd-based
memory unmapping (e.g. for private memory managed by TDX it may need
call into SEAM-MODULE).

Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com>
---
 include/linux/kvm_host.h |  8 ++++-
 virt/kvm/kvm_main.c      | 69 +++++++++++++++++++++++++++++++---------
 virt/kvm/memfd.c         |  2 ++
 3 files changed, 63 insertions(+), 16 deletions(-)

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 7279f46f35d3..d9573305e273 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -229,7 +229,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 #endif
 
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(KVM_ARCH_WANT_MMU_NOTIFIER) || defined(CONFIG_MEMFD_OPS)
 struct kvm_gfn_range {
 	struct kvm_memory_slot *slot;
 	gfn_t start;
@@ -1874,4 +1874,10 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
 /* Max number of entries allowed for each kvm dirty ring */
 #define  KVM_DIRTY_RING_MAX_ENTRIES  65536
 
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+			       unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMFD_OPS */
+
+
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 59f01e68337b..d84cb867b686 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -453,7 +453,8 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
 
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#if defined(CONFIG_MEMFD_OPS) ||\
+	(defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
 
 typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 
@@ -564,6 +565,30 @@ static __always_inline int __kvm_handle_useraddr_range(struct kvm *kvm,
 	/* The notifiers are averse to booleans. :-( */
 	return (int)ret;
 }
+
+static void mn_active_invalidate_count_inc(struct kvm *kvm)
+{
+	spin_lock(&kvm->mn_invalidate_lock);
+	kvm->mn_active_invalidate_count++;
+	spin_unlock(&kvm->mn_invalidate_lock);
+
+}
+
+static void mn_active_invalidate_count_dec(struct kvm *kvm)
+{
+	bool wake;
+
+	spin_lock(&kvm->mn_invalidate_lock);
+	wake = (--kvm->mn_active_invalidate_count == 0);
+	spin_unlock(&kvm->mn_invalidate_lock);
+
+	/*
+	 * There can only be one waiter, since the wait happens under
+	 * slots_lock.
+	 */
+	if (wake)
+		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+}
 #endif
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
@@ -701,9 +726,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
 	 *
 	 * Pairs with the decrement in range_end().
 	 */
-	spin_lock(&kvm->mn_invalidate_lock);
-	kvm->mn_active_invalidate_count++;
-	spin_unlock(&kvm->mn_invalidate_lock);
+	mn_active_invalidate_count_inc(kvm);
 
 	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
@@ -742,21 +765,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
 		.may_block	= mmu_notifier_range_blockable(range),
 		.inode		= NULL,
 	};
-	bool wake;
 
 	__kvm_handle_useraddr_range(kvm, &useraddr_range);
 
 	/* Pairs with the increment in range_start(). */
-	spin_lock(&kvm->mn_invalidate_lock);
-	wake = (--kvm->mn_active_invalidate_count == 0);
-	spin_unlock(&kvm->mn_invalidate_lock);
-
-	/*
-	 * There can only be one waiter, since the wait happens under
-	 * slots_lock.
-	 */
-	if (wake)
-		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
+	mn_active_invalidate_count_dec(kvm);
 
 	BUG_ON(kvm->mmu_notifier_count < 0);
 }
@@ -841,6 +854,32 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)
 
 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
 
+#ifdef CONFIG_MEMFD_OPS
+int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode,
+			       unsigned long start, unsigned long end)
+{
+	int ret;
+	const struct kvm_useraddr_range useraddr_range = {
+		.start		= start,
+		.end		= end,
+		.pte		= __pte(0),
+		.handler	= kvm_unmap_gfn_range,
+		.on_lock	= (void *)kvm_null_fn,
+		.flush_on_ret	= true,
+		.may_block	= false,
+		.inode		= inode,
+	};
+
+
+	/* Prevent memslot modification */
+	mn_active_invalidate_count_inc(kvm);
+	ret = __kvm_handle_useraddr_range(kvm, &useraddr_range);
+	mn_active_invalidate_count_dec(kvm);
+
+	return ret;
+}
+#endif /* CONFIG_MEMFD_OPS */
+
 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
 static int kvm_pm_notifier_call(struct notifier_block *bl,
 				unsigned long state,
diff --git a/virt/kvm/memfd.c b/virt/kvm/memfd.c
index 96a1a5bee0f7..d092a9b6f496 100644
--- a/virt/kvm/memfd.c
+++ b/virt/kvm/memfd.c
@@ -16,6 +16,8 @@ static const struct memfd_pfn_ops *memfd_ops;
 static void memfd_invalidate_page_range(struct inode *inode, void *owner,
 					pgoff_t start, pgoff_t end)
 {
+	kvm_memfd_invalidate_range(owner, inode, start >> PAGE_SHIFT,
+						 end >> PAGE_SHIFT);
 }
 
 static void memfd_fallocate(struct inode *inode, void *owner,
-- 
2.17.1



  parent reply	other threads:[~2021-12-21 15:13 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-21 15:11 [PATCH v3 00/15] KVM: mm: fd-based approach for supporting KVM guest private memory Chao Peng
2021-12-21 15:11 ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 01/15] mm/shmem: Introduce F_SEAL_INACCESSIBLE Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 02/15] mm/memfd: Introduce MFD_INACCESSIBLE flag Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 03/15] mm/memfd: Introduce MEMFD_OPS Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 04/15] KVM: Extend the memslot to support fd-based private memory Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 05/15] KVM: Implement fd-based memory using MEMFD_OPS interfaces Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 06/15] KVM: Refactor hva based memory invalidation code Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 07/15] KVM: Special handling for fd-based memory invalidation Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 08/15] KVM: Split out common memory invalidation code Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` Chao Peng [this message]
2021-12-21 15:11   ` [PATCH v3 09/15] KVM: Implement fd-based memory invalidation Chao Peng
2021-12-21 15:11 ` [PATCH v3 10/15] KVM: Add kvm_map_gfn_range Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 11/15] KVM: Implement fd-based memory fallocation Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 12/15] KVM: Add KVM_EXIT_MEMORY_ERROR exit Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 13/15] KVM: Handle page fault for private memory Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 14/15] KVM: Use kvm_userspace_memory_region_ext Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:11 ` [PATCH v3 15/15] KVM: Register/unregister private memory slot to memfd Chao Peng
2021-12-21 15:11   ` Chao Peng
2021-12-21 15:44 ` [PATCH v3 00/15] KVM: mm: fd-based approach for supporting KVM guest private memory Sean Christopherson
2021-12-22  1:22   ` Chao Peng
2021-12-22  1:22     ` Chao Peng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211221151125.19446-10-chao.p.peng@linux.intel.com \
    --to=chao.p.peng@linux.intel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=bfields@fieldses.org \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@intel.com \
    --cc=david@redhat.com \
    --cc=hpa@zytor.com \
    --cc=hughd@google.com \
    --cc=jlayton@kernel.org \
    --cc=jmattson@google.com \
    --cc=john.ji@intel.com \
    --cc=joro@8bytes.org \
    --cc=jun.nakajima@intel.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=seanjc@google.com \
    --cc=susie.li@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    --cc=yu.c.zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.