All of lore.kernel.org
 help / color / mirror / Atom feed
From: Emanuele Giuseppe Esposito <eesposit@redhat.com>
To: kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	David Hildenbrand <david@redhat.com>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	linux-kernel@vger.kernel.org,
	Emanuele Giuseppe Esposito <eesposit@redhat.com>
Subject: [RFC PATCH 3/9] kvm_main.c: introduce kvm_internal_memory_region_list
Date: Fri,  9 Sep 2022 06:45:00 -0400	[thread overview]
Message-ID: <20220909104506.738478-4-eesposit@redhat.com> (raw)
In-Reply-To: <20220909104506.738478-1-eesposit@redhat.com>

For now this struct is only used to pass new,old and change
variable in a single parameter instead of three.

In future, it will be used to also carry additional information
and handle atomic memslot updates.

Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
---
 arch/x86/kvm/x86.c       |  3 ++-
 include/linux/kvm_host.h | 15 +++++++++++-
 virt/kvm/kvm_main.c      | 52 +++++++++++++++++++++++++++-------------
 3 files changed, 51 insertions(+), 19 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 567d13405445..da5a5dd3d4bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12155,13 +12155,14 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
 
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
 		struct kvm_userspace_memory_region m;
+		struct kvm_internal_memory_region_list b = { 0 };
 
 		m.slot = id | (i << 16);
 		m.flags = 0;
 		m.guest_phys_addr = gpa;
 		m.userspace_addr = hva;
 		m.memory_size = size;
-		r = __kvm_set_memory_region(kvm, &m);
+		r = __kvm_set_memory_region(kvm, &m, &b);
 		if (r < 0)
 			return ERR_PTR_USR(r);
 	}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c5b7b2e35dd..69af94472b39 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1108,8 +1108,21 @@ enum kvm_mr_change {
 	KVM_MR_FLAGS_ONLY,
 };
 
+/*
+ * Internally used to atomically update multiple memslots.
+ * Must be always zeroed by the caller.
+ */
+struct kvm_internal_memory_region_list {
+	/* Fields initialized in __kvm_set_memory_region() */
+	struct kvm_memory_slot *old;
+	struct kvm_memory_slot *new;
+	struct kvm_memory_slot *invalid;
+	enum kvm_mr_change change;
+};
+
 int __kvm_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem);
+			    const struct kvm_userspace_memory_region *mem,
+			    struct kvm_internal_memory_region_list *batch);
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 339de0ed4557..e4fab15d0d4b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1583,10 +1583,11 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
 }
 
 static int kvm_prepare_memory_region(struct kvm *kvm,
-				     const struct kvm_memory_slot *old,
-				     struct kvm_memory_slot *new,
-				     enum kvm_mr_change change)
+				     struct kvm_internal_memory_region_list *batch)
 {
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
 	int r;
 
 	/*
@@ -1621,10 +1622,12 @@ static int kvm_prepare_memory_region(struct kvm *kvm,
 }
 
 static void kvm_commit_memory_region(struct kvm *kvm,
-				     struct kvm_memory_slot *old,
-				     const struct kvm_memory_slot *new,
-				     enum kvm_mr_change change)
+				     struct kvm_internal_memory_region_list *batch)
 {
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
+
 	/*
 	 * Update the total number of memslot pages before calling the arch
 	 * hook so that architectures can consume the result directly.
@@ -1788,11 +1791,12 @@ static void kvm_update_flags_memslot(struct kvm *kvm,
 }
 
 static int kvm_set_memslot(struct kvm *kvm,
-			   struct kvm_memory_slot *old,
-			   struct kvm_memory_slot *new,
-			   enum kvm_mr_change change)
+			   struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *invalid_slot;
+	struct kvm_memory_slot *old = batch->old;
+	struct kvm_memory_slot *new = batch->new;
+	enum kvm_mr_change change = batch->change;
 	int r;
 
 	/*
@@ -1830,10 +1834,11 @@ static int kvm_set_memslot(struct kvm *kvm,
 			mutex_unlock(&kvm->slots_arch_lock);
 			return -ENOMEM;
 		}
+		batch->invalid = invalid_slot;
 		kvm_invalidate_memslot(kvm, old, invalid_slot);
 	}
 
-	r = kvm_prepare_memory_region(kvm, old, new, change);
+	r = kvm_prepare_memory_region(kvm, batch);
 	if (r) {
 		/*
 		 * For DELETE/MOVE, revert the above INVALID change.  No
@@ -1877,7 +1882,7 @@ static int kvm_set_memslot(struct kvm *kvm,
 	 * will directly hit the final, active memslot.  Architectures are
 	 * responsible for knowing that new->arch may be stale.
 	 */
-	kvm_commit_memory_region(kvm, old, new, change);
+	kvm_commit_memory_region(kvm, batch);
 
 	return 0;
 }
@@ -1900,11 +1905,14 @@ static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
  * space.
  *
  * Discontiguous memory is allowed, mostly for framebuffers.
+ * This function takes also care of initializing batch->new/old/invalid/change
+ * fields.
  *
  * Must be called holding kvm->slots_lock for write.
  */
 int __kvm_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem)
+			    const struct kvm_userspace_memory_region *mem,
+			    struct kvm_internal_memory_region_list *batch)
 {
 	struct kvm_memory_slot *old, *new;
 	struct kvm_memslots *slots;
@@ -1947,6 +1955,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
 	 * and/or destroyed by kvm_set_memslot().
 	 */
 	old = id_to_memslot(slots, id);
+	batch->old = old;
 
 	if (!mem->memory_size) {
 		if (!old || !old->npages)
@@ -1955,7 +1964,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
 			return -EIO;
 
-		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
+		batch->change = KVM_MR_DELETE;
+		batch->new = NULL;
+		return kvm_set_memslot(kvm, batch);
 	}
 
 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
@@ -1963,6 +1974,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
 
 	if (!old || !old->npages) {
 		change = KVM_MR_CREATE;
+		batch->old = NULL;
 
 		/*
 		 * To simplify KVM internals, the total number of pages across
@@ -2000,7 +2012,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
 	new->flags = mem->flags;
 	new->userspace_addr = mem->userspace_addr;
 
-	r = kvm_set_memslot(kvm, old, new, change);
+	batch->new = new;
+	batch->change = change;
+
+	r = kvm_set_memslot(kvm, batch);
 	if (r)
 		kfree(new);
 	return r;
@@ -2008,7 +2023,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
 
 static int kvm_set_memory_region(struct kvm *kvm,
-				 const struct kvm_userspace_memory_region *mem)
+				 const struct kvm_userspace_memory_region *mem,
+				 struct kvm_internal_memory_region_list *batch)
 {
 	int r;
 
@@ -2016,7 +2032,7 @@ static int kvm_set_memory_region(struct kvm *kvm,
 		return -EINVAL;
 
 	mutex_lock(&kvm->slots_lock);
-	r = __kvm_set_memory_region(kvm, mem);
+	r = __kvm_set_memory_region(kvm, mem, batch);
 	mutex_unlock(&kvm->slots_lock);
 	return r;
 }
@@ -2024,7 +2040,9 @@ static int kvm_set_memory_region(struct kvm *kvm,
 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 					  struct kvm_userspace_memory_region *mem)
 {
-	return kvm_set_memory_region(kvm, mem);
+	struct kvm_internal_memory_region_list batch = { 0 };
+
+	return kvm_set_memory_region(kvm, mem, &batch);
 }
 
 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-- 
2.31.1


  parent reply	other threads:[~2022-09-09 10:45 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-09 10:44 [RFC PATCH 0/9] kvm: implement atomic memslot updates Emanuele Giuseppe Esposito
2022-09-09 10:44 ` [RFC PATCH 1/9] kvm_main.c: move slot check in kvm_set_memory_region Emanuele Giuseppe Esposito
2022-09-28 16:41   ` Paolo Bonzini
2022-09-09 10:44 ` [RFC PATCH 2/9] kvm.h: introduce KVM_SET_USER_MEMORY_REGION_LIST ioctl Emanuele Giuseppe Esposito
2022-09-28 16:42   ` Paolo Bonzini
2022-09-09 10:45 ` Emanuele Giuseppe Esposito [this message]
2022-09-28 16:48   ` [RFC PATCH 3/9] kvm_main.c: introduce kvm_internal_memory_region_list Paolo Bonzini
2022-09-09 10:45 ` [RFC PATCH 4/9] kvm_main.c: split logic in kvm_set_memslots Emanuele Giuseppe Esposito
2022-09-28 17:04   ` Paolo Bonzini
2022-09-09 10:45 ` [RFC PATCH 5/9] kvm_main.c: split __kvm_set_memory_region logic in kvm_check_mem and kvm_prepare_batch Emanuele Giuseppe Esposito
2022-09-13  2:56   ` Yang, Weijiang
2022-09-18 16:22     ` Emanuele Giuseppe Esposito
2022-09-28 17:11   ` Paolo Bonzini
2022-09-09 10:45 ` [RFC PATCH 6/9] kvm_main.c: simplify change-specific callbacks Emanuele Giuseppe Esposito
2022-09-09 10:45 ` [RFC PATCH 7/9] kvm_main.c: duplicate invalid memslot also in inactive list Emanuele Giuseppe Esposito
2022-09-28 17:18   ` Paolo Bonzini
2022-09-09 10:45 ` [RFC PATCH 8/9] kvm_main.c: find memslots from the inactive memslot list Emanuele Giuseppe Esposito
2022-09-09 10:45 ` [RFC PATCH 9/9] kvm_main.c: handle atomic memslot update Emanuele Giuseppe Esposito
2022-09-13  2:30   ` Yang, Weijiang
2022-09-18 16:18     ` Emanuele Giuseppe Esposito
2022-09-27  7:46   ` David Hildenbrand
2022-09-27  8:35     ` Emanuele Giuseppe Esposito
2022-09-27  9:22       ` David Hildenbrand
2022-09-27  9:32         ` Emanuele Giuseppe Esposito
2022-09-27 14:52           ` David Hildenbrand
2022-09-28 17:29   ` Paolo Bonzini
2022-09-09 14:30 ` [RFC PATCH 0/9] kvm: implement atomic memslot updates Sean Christopherson
2022-09-18 16:13   ` Emanuele Giuseppe Esposito
2022-09-19  7:38     ` Like Xu
2022-09-19  7:53     ` David Hildenbrand
2022-09-19 17:30       ` David Hildenbrand
2022-09-23 13:10         ` Emanuele Giuseppe Esposito
2022-09-23 13:21           ` David Hildenbrand
2022-09-23 13:38             ` Emanuele Giuseppe Esposito
2022-09-26  9:03               ` David Hildenbrand
2022-09-26 21:28                 ` Sean Christopherson
2022-09-27  7:38                   ` Emanuele Giuseppe Esposito
2022-09-27 15:58                     ` Sean Christopherson
2022-09-28  9:11                       ` Emanuele Giuseppe Esposito
2022-09-28 11:14                         ` Maxim Levitsky
2022-09-28 12:52                           ` David Hildenbrand
2022-09-28 15:07                       ` Paolo Bonzini
2022-09-28 15:33                         ` David Hildenbrand
2022-09-28 15:58                         ` Sean Christopherson
2022-09-28 16:38                           ` Paolo Bonzini
2022-09-28 20:41                             ` Sean Christopherson
2022-09-29  8:05                               ` Emanuele Giuseppe Esposito
2022-09-29  8:24                                 ` David Hildenbrand
2022-09-29 15:18                                 ` Sean Christopherson
2022-09-29 15:41                                   ` Paolo Bonzini
2022-09-29 15:28                               ` Paolo Bonzini
2022-09-29 15:40                                 ` Maxim Levitsky
2022-09-29 16:00                                 ` David Hildenbrand
2022-09-29 21:39                                 ` Sean Christopherson
2022-10-13  7:43                                   ` Emanuele Giuseppe Esposito
2022-10-13  8:44                                     ` David Hildenbrand
2022-10-13 11:12                                       ` Emanuele Giuseppe Esposito
2022-10-13 14:45                                         ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220909104506.738478-4-eesposit@redhat.com \
    --to=eesposit@redhat.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=hpa@zytor.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mlevitsk@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.