All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Peter Xu <peterx@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Peter Shier <pshier@google.com>,
	Yulei Zhang <yulei.kernel@gmail.com>,
	Wanpeng Li <kernellwp@gmail.com>,
	Xiao Guangrong <xiaoguangrong.eric@gmail.com>,
	Kai Huang <kai.huang@intel.com>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Ben Gardon <bgardon@google.com>
Subject: [PATCH v3 7/8] KVM: x86/mmu: Protect rmaps independently with SRCU
Date: Thu,  6 May 2021 11:42:40 -0700	[thread overview]
Message-ID: <20210506184241.618958-8-bgardon@google.com> (raw)
In-Reply-To: <20210506184241.618958-1-bgardon@google.com>

In preparation for lazily allocating the rmaps when the TDP MMU is in
use, protect the rmaps with SRCU. Unfortunately, this requires
propagating a pointer to struct kvm around to several functions.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 57 +++++++++++++++++++++++++-----------------
 arch/x86/kvm/x86.c     |  6 ++---
 2 files changed, 37 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 730ea84bf7e7..48067c572c02 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -927,13 +927,18 @@ static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
 	__pte_list_remove(sptep, rmap_head);
 }
 
-static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
+static struct kvm_rmap_head *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
+					   int level,
 					   struct kvm_memory_slot *slot)
 {
+	struct kvm_rmap_head *head;
 	unsigned long idx;
 
 	idx = gfn_to_index(gfn, slot->base_gfn, level);
-	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
+	head = srcu_dereference_check(slot->arch.rmap[level - PG_LEVEL_4K],
+				      &kvm->srcu,
+				      lockdep_is_held(&kvm->slots_arch_lock));
+	return &head[idx];
 }
 
 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
@@ -944,7 +949,7 @@ static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
 
 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
 	slot = __gfn_to_memslot(slots, gfn);
-	return __gfn_to_rmap(gfn, sp->role.level, slot);
+	return __gfn_to_rmap(kvm, gfn, sp->role.level, slot);
 }
 
 static bool rmap_can_add(struct kvm_vcpu *vcpu)
@@ -1194,7 +1199,8 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 		return;
 
 	while (mask) {
-		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+		rmap_head = __gfn_to_rmap(kvm,
+					  slot->base_gfn + gfn_offset + __ffs(mask),
 					  PG_LEVEL_4K, slot);
 		__rmap_write_protect(kvm, rmap_head, false);
 
@@ -1227,7 +1233,8 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
 		return;
 
 	while (mask) {
-		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+		rmap_head = __gfn_to_rmap(kvm,
+					  slot->base_gfn + gfn_offset + __ffs(mask),
 					  PG_LEVEL_4K, slot);
 		__rmap_clear_dirty(kvm, rmap_head, slot);
 
@@ -1270,7 +1277,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 
 	if (kvm_memslots_have_rmaps(kvm)) {
 		for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
-			rmap_head = __gfn_to_rmap(gfn, i, slot);
+			rmap_head = __gfn_to_rmap(kvm, gfn, i, slot);
 			write_protected |= __rmap_write_protect(kvm, rmap_head,
 								true);
 		}
@@ -1373,17 +1380,19 @@ struct slot_rmap_walk_iterator {
 };
 
 static void
-rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
+rmap_walk_init_level(struct kvm *kvm, struct slot_rmap_walk_iterator *iterator,
+		     int level)
 {
 	iterator->level = level;
 	iterator->gfn = iterator->start_gfn;
-	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
-	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
+	iterator->rmap = __gfn_to_rmap(kvm, iterator->gfn, level,
+				       iterator->slot);
+	iterator->end_rmap = __gfn_to_rmap(kvm, iterator->end_gfn, level,
 					   iterator->slot);
 }
 
 static void
-slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
+slot_rmap_walk_init(struct kvm *kvm, struct slot_rmap_walk_iterator *iterator,
 		    struct kvm_memory_slot *slot, int start_level,
 		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
 {
@@ -1393,7 +1402,7 @@ slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
 	iterator->start_gfn = start_gfn;
 	iterator->end_gfn = end_gfn;
 
-	rmap_walk_init_level(iterator, iterator->start_level);
+	rmap_walk_init_level(kvm, iterator, iterator->start_level);
 }
 
 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
@@ -1401,7 +1410,8 @@ static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
 	return !!iterator->rmap;
 }
 
-static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
+static void slot_rmap_walk_next(struct kvm *kvm,
+				struct slot_rmap_walk_iterator *iterator)
 {
 	if (++iterator->rmap <= iterator->end_rmap) {
 		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
@@ -1413,15 +1423,15 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
 		return;
 	}
 
-	rmap_walk_init_level(iterator, iterator->level);
+	rmap_walk_init_level(kvm, iterator, iterator->level);
 }
 
-#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
-	   _start_gfn, _end_gfn, _iter_)				\
-	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
-				 _end_level_, _start_gfn, _end_gfn);	\
-	     slot_rmap_walk_okay(_iter_);				\
-	     slot_rmap_walk_next(_iter_))
+#define for_each_slot_rmap_range(_kvm_, _slot_, _start_level_, _end_level_,	\
+				 _start_gfn, _end_gfn, _iter_)			\
+	for (slot_rmap_walk_init(_kvm_, _iter_, _slot_, _start_level_,		\
+				 _end_level_, _start_gfn, _end_gfn);		\
+	     slot_rmap_walk_okay(_iter_);					\
+	     slot_rmap_walk_next(_kvm_, _iter_))
 
 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 			       struct kvm_memory_slot *slot, gfn_t gfn,
@@ -1434,8 +1444,9 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
 	struct slot_rmap_walk_iterator iterator;
 	bool ret = false;
 
-	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
-				 range->start, range->end - 1, &iterator)
+	for_each_slot_rmap_range(kvm, range->slot, PG_LEVEL_4K,
+				 KVM_MAX_HUGEPAGE_LEVEL, range->start,
+				 range->end - 1, &iterator)
 		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
 			       iterator.level, range->pte);
 
@@ -5233,8 +5244,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
 {
 	struct slot_rmap_walk_iterator iterator;
 
-	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
-			end_gfn, &iterator) {
+	for_each_slot_rmap_range(kvm, memslot, start_level, end_level,
+				 start_gfn, end_gfn, &iterator) {
 		if (iterator.rmap)
 			flush |= fn(kvm, iterator.rmap, memslot);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d7a40ce342cc..1098ab73a704 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10854,9 +10854,9 @@ static int alloc_memslot_rmap(struct kvm_memory_slot *slot,
 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
 				      slot->base_gfn, level) + 1;
 
-		slot->arch.rmap[i] =
-			kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
-				 GFP_KERNEL_ACCOUNT);
+		rcu_assign_pointer(slot->arch.rmap[i],
+				   kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
+					    GFP_KERNEL_ACCOUNT));
 		if (!slot->arch.rmap[i])
 			goto out_free;
 	}
-- 
2.31.1.607.g51e8a6a459-goog


  parent reply	other threads:[~2021-05-06 18:43 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-06 18:42 [PATCH v3 0/8] Lazily allocate memslot rmaps Ben Gardon
2021-05-06 18:42 ` [PATCH v3 1/8] KVM: x86/mmu: Deduplicate rmap freeing Ben Gardon
2021-05-07  7:42   ` David Hildenbrand
2021-05-06 18:42 ` [PATCH v3 2/8] KVM: x86/mmu: Factor out allocating memslot rmap Ben Gardon
2021-05-07  7:46   ` David Hildenbrand
2021-05-10 16:29     ` Ben Gardon
2021-05-06 18:42 ` [PATCH v3 3/8] KVM: mmu: Refactor memslot copy Ben Gardon
2021-05-07  7:48   ` David Hildenbrand
2021-05-06 18:42 ` [PATCH v3 4/8] KVM: mmu: Add slots_arch_lock for memslot arch fields Ben Gardon
2021-05-06 18:42 ` [PATCH v3 5/8] KVM: x86/mmu: Add a field to control memslot rmap allocation Ben Gardon
2021-05-06 23:44   ` Ben Gardon
2021-05-07  7:50     ` David Hildenbrand
2021-05-07  8:28     ` Paolo Bonzini
2021-05-10 16:14       ` Ben Gardon
2021-05-10 16:33         ` Paolo Bonzini
2021-05-10 16:37           ` Ben Gardon
2021-05-06 18:42 ` [PATCH v3 6/8] KVM: x86/mmu: Skip rmap operations if rmaps not allocated Ben Gardon
2021-05-06 23:07   ` kernel test robot
2021-05-06 23:07     ` kernel test robot
2021-05-06 18:42 ` Ben Gardon [this message]
2021-05-06 23:58   ` [PATCH v3 7/8] KVM: x86/mmu: Protect rmaps independently with SRCU kernel test robot
2021-05-06 23:58     ` kernel test robot
2021-05-07  0:56   ` kernel test robot
2021-05-07  0:56     ` kernel test robot
2021-05-07  8:42   ` Paolo Bonzini
2021-05-10 17:45     ` Sean Christopherson
2021-05-10 17:53       ` Paolo Bonzini
2021-05-10 18:28         ` Sean Christopherson
2021-05-11 16:22           ` Ben Gardon
2021-05-11 16:45             ` Paolo Bonzini
2021-05-06 18:42 ` [PATCH v3 8/8] KVM: x86/mmu: Lazily allocate memslot rmaps Ben Gardon
2021-05-07  1:10   ` kernel test robot
2021-05-07  1:10     ` kernel test robot
2021-05-07  8:28   ` Paolo Bonzini
2021-05-07  7:40 ` [PATCH v3 0/8] " David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210506184241.618958-8-bgardon@google.com \
    --to=bgardon@google.com \
    --cc=kai.huang@intel.com \
    --cc=kernellwp@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=pshier@google.com \
    --cc=seanjc@google.com \
    --cc=xiaoguangrong.eric@gmail.com \
    --cc=yulei.kernel@gmail.com \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.