linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Avi Kivity <avi.kivity@gmail.com>, Gleb Natapov <gleb@redhat.com>,
	LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH v2 10/12] KVM: MMU: unify the code of walking pte list
Date: Wed, 23 Jan 2013 18:09:34 +0800	[thread overview]
Message-ID: <50FFB6DE.8060709@linux.vnet.ibm.com> (raw)
In-Reply-To: <50FFB5A1.5090708@linux.vnet.ibm.com>

Walking parent spte and walking ramp have same logic, this patch introduces
for_each_spte_in_pte_list to integrate their code

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c       |  199 ++++++++++++++++++++++------------------------
 arch/x86/kvm/mmu_audit.c |    5 +-
 2 files changed, 97 insertions(+), 107 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 55198a1..b7da3fb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -968,26 +968,75 @@ static void pte_list_remove(u64 *spte, unsigned long *pte_list)
 	}
 }

-typedef void (*pte_list_walk_fn) (u64 *spte);
-static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
+/*
+ * Used by the following functions to iterate through the sptes linked by a
+ * pte_list.  All fields are private and not assumed to be used outside.
+ */
+struct pte_list_iterator {
+	/* private fields */
+	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
+	int pos;			/* index of the sptep */
+};
+
+/*
+ * Iteration must be started by this function.  This should also be used after
+ * removing/dropping sptes from the pte_list link because in such cases the
+ * information in the itererator may not be valid.
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *pte_list_get_first(unsigned long pte_list,
+			       struct pte_list_iterator *iter)
 {
-	struct pte_list_desc *desc;
-	int i;
+	if (!pte_list)
+		return NULL;

-	if (!*pte_list)
-		return;
+	if (!(pte_list & 1)) {
+		iter->desc = NULL;
+		return (u64 *)pte_list;
+	}

-	if (!(*pte_list & 1))
-		return fn((u64 *)*pte_list);
+	iter->desc = (struct pte_list_desc *)(pte_list & ~1ul);
+	iter->pos = 0;
+	return iter->desc->sptes[iter->pos];
+}

-	desc = (struct pte_list_desc *)(*pte_list & ~1ul);
-	while (desc) {
-		for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
-			fn(desc->sptes[i]);
-		desc = desc->more;
+/*
+ * Must be used with a valid iterator: e.g. after pte_list_get_next().
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *pte_list_get_next(struct pte_list_iterator *iter)
+{
+	if (iter->desc) {
+		if (iter->pos < PTE_LIST_EXT - 1) {
+			u64 *sptep;
+
+			++iter->pos;
+			sptep = iter->desc->sptes[iter->pos];
+			if (sptep)
+				return sptep;
+		}
+
+		iter->desc = iter->desc->more;
+
+		if (iter->desc) {
+			iter->pos = 0;
+			/* desc->sptes[0] cannot be NULL */
+			return iter->desc->sptes[iter->pos];
+		}
 	}
+
+	return NULL;
 }

+#define for_each_spte_in_pte_list(pte_list, iter, spte)		\
+	   for (spte = pte_list_get_first(pte_list, &(iter));	\
+	      spte != NULL; spte = pte_list_get_next(&(iter)))
+
+#define for_each_spte_in_rmap(rmap, iter, spte)			\
+	   for_each_spte_in_pte_list(rmap, iter, spte)
+
 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
 				    struct kvm_memory_slot *slot)
 {
@@ -1039,67 +1088,6 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
 	pte_list_remove(spte, rmapp);
 }

-/*
- * Used by the following functions to iterate through the sptes linked by a
- * rmap.  All fields are private and not assumed to be used outside.
- */
-struct rmap_iterator {
-	/* private fields */
-	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
-	int pos;			/* index of the sptep */
-};
-
-/*
- * Iteration must be started by this function.  This should also be used after
- * removing/dropping sptes from the rmap link because in such cases the
- * information in the itererator may not be valid.
- *
- * Returns sptep if found, NULL otherwise.
- */
-static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
-{
-	if (!rmap)
-		return NULL;
-
-	if (!(rmap & 1)) {
-		iter->desc = NULL;
-		return (u64 *)rmap;
-	}
-
-	iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
-	iter->pos = 0;
-	return iter->desc->sptes[iter->pos];
-}
-
-/*
- * Must be used with a valid iterator: e.g. after rmap_get_first().
- *
- * Returns sptep if found, NULL otherwise.
- */
-static u64 *rmap_get_next(struct rmap_iterator *iter)
-{
-	if (iter->desc) {
-		if (iter->pos < PTE_LIST_EXT - 1) {
-			u64 *sptep;
-
-			++iter->pos;
-			sptep = iter->desc->sptes[iter->pos];
-			if (sptep)
-				return sptep;
-		}
-
-		iter->desc = iter->desc->more;
-
-		if (iter->desc) {
-			iter->pos = 0;
-			/* desc->sptes[0] cannot be NULL */
-			return iter->desc->sptes[iter->pos];
-		}
-	}
-
-	return NULL;
-}
-
 static void drop_spte(struct kvm *kvm, u64 *sptep)
 {
 	if (mmu_spte_clear_track_bits(sptep))
@@ -1160,14 +1148,13 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
 				 bool pt_protect)
 {
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;
 	bool flush = false;

-	for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		BUG_ON(!(*sptep & PT_PRESENT_MASK));

 		spte_write_protect(kvm, sptep, &flush, pt_protect);
-		sptep = rmap_get_next(&iter);
 	}

 	return flush;
@@ -1221,15 +1208,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
 			   struct kvm_memory_slot *slot, unsigned long data)
 {
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;
 	int need_tlb_flush = 0;

-	while ((sptep = rmap_get_first(*rmapp, &iter))) {
-		BUG_ON(!(*sptep & PT_PRESENT_MASK));
-		rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
-
+restart:
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		drop_spte(kvm, sptep);
 		need_tlb_flush = 1;
+		goto restart;
 	}

 	return need_tlb_flush;
@@ -1239,7 +1225,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
 			     struct kvm_memory_slot *slot, unsigned long data)
 {
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;
 	int need_flush = 0;
 	u64 new_spte;
 	pte_t *ptep = (pte_t *)data;
@@ -1248,7 +1234,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
 	WARN_ON(pte_huge(*ptep));
 	new_pfn = pte_pfn(*ptep);

-	for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+restart:
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		BUG_ON(!is_shadow_present_pte(*sptep));
 		rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);

@@ -1256,19 +1243,18 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,

 		if (pte_write(*ptep)) {
 			drop_spte(kvm, sptep);
-			sptep = rmap_get_first(*rmapp, &iter);
-		} else {
-			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
-			new_spte |= (u64)new_pfn << PAGE_SHIFT;
+			goto restart;
+		}

-			new_spte &= ~PT_WRITABLE_MASK;
-			new_spte &= ~SPTE_HOST_WRITEABLE;
-			new_spte &= ~shadow_accessed_mask;
+		new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
+		new_spte |= (u64)new_pfn << PAGE_SHIFT;

-			mmu_spte_clear_track_bits(sptep);
-			mmu_spte_set(sptep, new_spte);
-			sptep = rmap_get_next(&iter);
-		}
+		new_spte &= ~PT_WRITABLE_MASK;
+		new_spte &= ~SPTE_HOST_WRITEABLE;
+		new_spte &= ~shadow_accessed_mask;
+
+		mmu_spte_clear_track_bits(sptep);
+		mmu_spte_set(sptep, new_spte);
 	}

 	if (need_flush)
@@ -1359,7 +1345,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 			 struct kvm_memory_slot *slot, unsigned long data)
 {
 	u64 *sptep;
-	struct rmap_iterator uninitialized_var(iter);
+	struct pte_list_iterator uninitialized_var(iter);
 	int young = 0;

 	/*
@@ -1375,8 +1361,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 		goto out;
 	}

-	for (sptep = rmap_get_first(*rmapp, &iter); sptep;
-	     sptep = rmap_get_next(&iter)) {
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		BUG_ON(!is_shadow_present_pte(*sptep));

 		if (*sptep & shadow_accessed_mask) {
@@ -1395,7 +1380,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 			      struct kvm_memory_slot *slot, unsigned long data)
 {
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;
 	int young = 0;

 	/*
@@ -1406,8 +1391,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 	if (!shadow_accessed_mask)
 		goto out;

-	for (sptep = rmap_get_first(*rmapp, &iter); sptep;
-	     sptep = rmap_get_next(&iter)) {
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		BUG_ON(!is_shadow_present_pte(*sptep));

 		if (*sptep & shadow_accessed_mask) {
@@ -1539,7 +1523,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
 static void mark_unsync(u64 *spte);
 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
 {
-	pte_list_walk(&sp->parent_ptes, mark_unsync);
+	struct pte_list_iterator iter;
+	u64 *spte;
+
+	for_each_spte_in_pte_list(sp->parent_ptes, iter, spte)
+		mark_unsync(spte);
 }

 static void mark_unsync(u64 *spte)
@@ -2059,10 +2047,13 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;

-	while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
+restart:
+	for_each_spte_in_rmap(sp->parent_ptes, iter, sptep) {
 		drop_parent_pte(sp, sptep);
+		goto restart;
+	}
 }

 static int mmu_zap_unsync_children(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index daff69e..a08d384 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -190,15 +190,14 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	unsigned long *rmapp;
 	u64 *sptep;
-	struct rmap_iterator iter;
+	struct pte_list_iterator iter;

 	if (sp->role.direct || sp->unsync || sp->role.invalid)
 		return;

 	rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL);

-	for (sptep = rmap_get_first(*rmapp, &iter); sptep;
-	     sptep = rmap_get_next(&iter)) {
+	for_each_spte_in_rmap(*rmapp, iter, sptep) {
 		if (is_writable_pte(*sptep))
 			audit_printk(kvm, "shadow page has writable "
 				     "mappings: gfn %llx role %x\n",
-- 
1.7.7.6


  parent reply	other threads:[~2013-01-23 10:09 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-01-23 10:04 [PATCH v2 01/12] KVM: MMU: lazily drop large spte Xiao Guangrong
2013-01-23 10:04 ` [PATCH v2 02/12] KVM: MMU: cleanup mapping-level Xiao Guangrong
2013-01-23 10:05 ` [PATCH v2 03/12] KVM: MMU: simplify mmu_set_spte Xiao Guangrong
2013-01-29  0:21   ` Marcelo Tosatti
2013-01-29  2:55     ` Xiao Guangrong
2013-01-29 21:53       ` Marcelo Tosatti
2013-01-30  3:22         ` Xiao Guangrong
2013-01-23 10:06 ` [PATCH v2 04/12] KVM: MMU: simplify set_spte Xiao Guangrong
2013-01-23 10:06 ` [PATCH v2 05/12] KVM: MMU: introduce vcpu_adjust_access Xiao Guangrong
2013-01-24 10:36   ` Gleb Natapov
2013-01-24 11:33     ` Xiao Guangrong
2013-01-23 10:07 ` [PATCH v2 06/12] KVM: MMU: introduce a static table to map guest access to spte access Xiao Guangrong
2013-01-25  0:15   ` Marcelo Tosatti
2013-01-25  2:46     ` Xiao Guangrong
2013-01-29  0:07       ` Marcelo Tosatti
2013-01-29  1:07         ` Marcelo Tosatti
2013-01-29 13:16           ` Gleb Natapov
2013-01-30  3:53           ` Xiao Guangrong
2013-01-23 10:07 ` [PATCH v2 07/12] KVM: MMU: remove pt_access in mmu_set_spte Xiao Guangrong
2013-01-23 10:08 ` [PATCH v2 08/12] KVM: MMU: cleanup __direct_map Xiao Guangrong
2013-01-23 10:09 ` [PATCH v2 09/12] KVM: MMU: introduce mmu_spte_establish Xiao Guangrong
2013-01-23 10:09 ` Xiao Guangrong [this message]
2013-01-27 13:28   ` [PATCH v2 10/12] KVM: MMU: unify the code of walking pte list Gleb Natapov
2013-01-29  3:01     ` Xiao Guangrong
2013-01-23 10:10 ` [PATCH v2 11/12] KVM: MMU: fix spte assertion Xiao Guangrong
2013-01-23 10:10 ` [PATCH v2 12/12] KVM: MMU: fast drop all spte on the pte_list Xiao Guangrong
2013-01-27 12:06 ` [PATCH v2 01/12] KVM: MMU: lazily drop large spte Gleb Natapov
2013-01-29  2:57   ` Xiao Guangrong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=50FFB6DE.8060709@linux.vnet.ibm.com \
    --to=xiaoguangrong@linux.vnet.ibm.com \
    --cc=avi.kivity@gmail.com \
    --cc=gleb@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).