linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: gleb@redhat.com
Cc: avi.kivity@gmail.com, mtosatti@redhat.com, pbonzini@redhat.com,
	linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Subject: [PATCH v3 06/15] KVM: MMU: redesign the algorithm of pte_list
Date: Wed, 23 Oct 2013 21:29:24 +0800	[thread overview]
Message-ID: <1382534973-13197-7-git-send-email-xiaoguangrong@linux.vnet.ibm.com> (raw)
In-Reply-To: <1382534973-13197-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com>

Change the algorithm to:
1) always add new desc to the first desc (pointed by parent_ptes/rmap)
   that is good to implement rcu-nulls-list-like lockless rmap
   walking

2) always move the entry in the first desc to the the position we want
   to remove when delete a spte in the parent_ptes/rmap (backward-move).
   It is good for us to implement lockless rmap walk since in the current
   code, when a spte is deleted from the "desc", another spte in the last
   "desc" will be moved to this position to replace the deleted one. If the
   deleted one has been accessed and we do not access the replaced one, the
   replaced one is missed when we do lockless walk.
   To fix this case, we do not backward move the spte, instead, we forward
   move the entry: when a spte is deleted, we move the entry in the first
   desc to that position

Both of these also can reduce cache miss

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c | 179 ++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 123 insertions(+), 56 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e85eed6..5cce039 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -913,6 +913,50 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 	return level - 1;
 }
 
+static int __find_first_free(struct pte_list_desc *desc)
+{
+	int i;
+
+	for (i = 0; i < PTE_LIST_EXT; i++)
+		if (!desc->sptes[i])
+			break;
+	return i;
+}
+
+static int find_first_free(struct pte_list_desc *desc)
+{
+	int free = __find_first_free(desc);
+
+	WARN_ON(free >= PTE_LIST_EXT);
+	return free;
+}
+
+static int find_last_used(struct pte_list_desc *desc)
+{
+	int used = __find_first_free(desc) - 1;
+
+	WARN_ON(used < 0 || used >= PTE_LIST_EXT);
+	return used;
+}
+
+/*
+ * TODO: we can encode the desc number into the rmap/parent_ptes
+ * since at least 10 physical/virtual address bits are reserved
+ * on x86. It is worthwhile if it shows that the desc walking is
+ * a performance issue.
+ */
+static int count_spte_number(struct pte_list_desc *desc)
+{
+	int first_free, desc_num;
+
+	first_free = __find_first_free(desc);
+
+	for (desc_num = 0; desc->more; desc = desc->more)
+		desc_num++;
+
+	return first_free + desc_num * PTE_LIST_EXT;
+}
+
 /*
  * Pte mapping structures:
  *
@@ -923,98 +967,121 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
  *
  * Returns the number of pte entries before the spte was added or zero if
  * the spte was not added.
- *
  */
 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
 			unsigned long *pte_list)
 {
 	struct pte_list_desc *desc;
-	int i, count = 0;
+	int free_pos;
 
 	if (!*pte_list) {
 		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
 		*pte_list = (unsigned long)spte;
-	} else if (!(*pte_list & 1)) {
+		return 0;
+	}
+
+	if (!(*pte_list & 1)) {
 		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
 		desc = mmu_alloc_pte_list_desc(vcpu);
 		desc->sptes[0] = (u64 *)*pte_list;
 		desc->sptes[1] = spte;
 		*pte_list = (unsigned long)desc | 1;
-		++count;
-	} else {
-		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
-		desc = (struct pte_list_desc *)(*pte_list & ~1ul);
-		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
-			desc = desc->more;
-			count += PTE_LIST_EXT;
-		}
-		if (desc->sptes[PTE_LIST_EXT-1]) {
-			desc->more = mmu_alloc_pte_list_desc(vcpu);
-			desc = desc->more;
-		}
-		for (i = 0; desc->sptes[i]; ++i)
-			++count;
-		desc->sptes[i] = spte;
+		return 1;
 	}
-	return count;
+
+	rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
+	desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+
+	/* No empty entry in the desc. */
+	if (desc->sptes[PTE_LIST_EXT - 1]) {
+		struct pte_list_desc *new_desc;
+		new_desc = mmu_alloc_pte_list_desc(vcpu);
+		new_desc->more = desc;
+		desc = new_desc;
+		*pte_list = (unsigned long)desc | 1;
+	}
+
+	free_pos = find_first_free(desc);
+	desc->sptes[free_pos] = spte;
+	return count_spte_number(desc) - 1;
 }
 
 static void
-pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
-			   int i, struct pte_list_desc *prev_desc)
+pte_list_desc_remove_entry(unsigned long *pte_list,
+			   struct pte_list_desc *desc, int i)
 {
-	int j;
+	struct pte_list_desc *first_desc;
+	int last_used;
 
-	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
-		;
-	desc->sptes[i] = desc->sptes[j];
-	desc->sptes[j] = NULL;
-	if (j != 0)
+	first_desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+	last_used = find_last_used(first_desc);
+
+	/*
+	 * Move the entry from the first desc to this position we want
+	 * to remove.
+	 */
+	desc->sptes[i] = first_desc->sptes[last_used];
+	first_desc->sptes[last_used] = NULL;
+
+	/* No valid entry in this desc, we can free this desc now. */
+	if (!first_desc->sptes[0]) {
+		struct pte_list_desc *next_desc = first_desc->more;
+
+		/*
+		 * Only one entry existing but still use a desc to store it?
+		 */
+		WARN_ON(!next_desc);
+
+		mmu_free_pte_list_desc(first_desc);
+		*pte_list = (unsigned long)next_desc | 1ul;
 		return;
-	if (!prev_desc && !desc->more)
-		*pte_list = (unsigned long)desc->sptes[0];
-	else
-		if (prev_desc)
-			prev_desc->more = desc->more;
-		else
-			*pte_list = (unsigned long)desc->more | 1;
-	mmu_free_pte_list_desc(desc);
+	}
+
+	/*
+	 * Only one entry in this desc, move the entry to the head
+	 * then the desc can be freed.
+	 */
+	if (!first_desc->sptes[1] && !first_desc->more) {
+		*pte_list = (unsigned long)first_desc->sptes[0];
+		mmu_free_pte_list_desc(first_desc);
+	}
 }
 
 static void pte_list_remove(u64 *spte, unsigned long *pte_list)
 {
 	struct pte_list_desc *desc;
-	struct pte_list_desc *prev_desc;
 	int i;
 
 	if (!*pte_list) {
-		printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
+		pr_err("pte_list_remove: %p 0->BUG\n", spte);
 		BUG();
-	} else if (!(*pte_list & 1)) {
+		return;
+	}
+
+	if (!(*pte_list & 1)) {
 		rmap_printk("pte_list_remove:  %p 1->0\n", spte);
 		if ((u64 *)*pte_list != spte) {
-			printk(KERN_ERR "pte_list_remove:  %p 1->BUG\n", spte);
+			pr_err("pte_list_remove:  %p 1->BUG\n", spte);
 			BUG();
 		}
 		*pte_list = 0;
-	} else {
-		rmap_printk("pte_list_remove:  %p many->many\n", spte);
-		desc = (struct pte_list_desc *)(*pte_list & ~1ul);
-		prev_desc = NULL;
-		while (desc) {
-			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
-				if (desc->sptes[i] == spte) {
-					pte_list_desc_remove_entry(pte_list,
-							       desc, i,
-							       prev_desc);
-					return;
-				}
-			prev_desc = desc;
-			desc = desc->more;
-		}
-		pr_err("pte_list_remove: %p many->many\n", spte);
-		BUG();
+		return;
 	}
+
+	rmap_printk("pte_list_remove:  %p many->many\n", spte);
+	desc = (struct pte_list_desc *)(*pte_list & ~1ul);
+	while (desc) {
+		for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
+			if (desc->sptes[i] == spte) {
+				pte_list_desc_remove_entry(pte_list,
+							       desc, i);
+				return;
+			}
+		desc = desc->more;
+	}
+
+	pr_err("pte_list_remove: %p many->many\n", spte);
+	BUG();
 }
 
 typedef void (*pte_list_walk_fn) (u64 *spte);
-- 
1.8.1.4


  parent reply	other threads:[~2013-10-23 13:30 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-10-23 13:29 [PATCH v3 00/15] KVM: MMU: locklessly write-protect Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 01/15] KVM: MMU: properly check last spte in fast_page_fault() Xiao Guangrong
2013-11-12  0:25   ` Marcelo Tosatti
2013-10-23 13:29 ` [PATCH v3 02/15] KVM: MMU: lazily drop large spte Xiao Guangrong
2013-11-12 22:44   ` Marcelo Tosatti
2013-10-23 13:29 ` [PATCH v3 03/15] KVM: MMU: flush tlb if the spte can be locklessly modified Xiao Guangrong
2013-11-13  0:10   ` Marcelo Tosatti
2013-10-23 13:29 ` [PATCH v3 04/15] KVM: MMU: flush tlb out of mmu lock when write-protect the sptes Xiao Guangrong
2013-11-14  0:36   ` Marcelo Tosatti
2013-11-14  5:15     ` Xiao Guangrong
2013-11-14 18:39       ` Marcelo Tosatti
2013-11-15  7:09         ` Xiao Guangrong
2013-11-19  0:19           ` Marcelo Tosatti
2013-10-23 13:29 ` [PATCH v3 05/15] KVM: MMU: update spte and add it into rmap before dirty log Xiao Guangrong
2013-11-15  0:08   ` Marcelo Tosatti
2013-10-23 13:29 ` Xiao Guangrong [this message]
2013-11-19  0:48   ` [PATCH v3 06/15] KVM: MMU: redesign the algorithm of pte_list Marcelo Tosatti
2013-10-23 13:29 ` [PATCH v3 07/15] KVM: MMU: introduce nulls desc Xiao Guangrong
2013-11-22 19:14   ` Marcelo Tosatti
2013-11-25  6:11     ` Xiao Guangrong
2013-11-25  6:29       ` Xiao Guangrong
2013-11-25 18:12         ` Marcelo Tosatti
2013-11-26  3:21           ` Xiao Guangrong
2013-11-26 10:12             ` Gleb Natapov
2013-11-26 19:31             ` Marcelo Tosatti
2013-11-28  8:53               ` Xiao Guangrong
2013-12-03  7:10                 ` Xiao Guangrong
2013-12-05 13:50                   ` Marcelo Tosatti
2013-12-05 15:30                     ` Xiao Guangrong
2013-12-06  0:15                       ` Marcelo Tosatti
2013-12-06  0:22                       ` Marcelo Tosatti
2013-12-10  6:58                         ` Xiao Guangrong
2013-11-25 10:19       ` Gleb Natapov
2013-11-25 10:25         ` Xiao Guangrong
2013-11-25 12:48       ` Avi Kivity
2013-11-25 14:23         ` Marcelo Tosatti
2013-11-25 14:29           ` Gleb Natapov
2013-11-25 18:06             ` Marcelo Tosatti
2013-11-26  3:10           ` Xiao Guangrong
2013-11-26 10:15             ` Gleb Natapov
2013-11-26 19:58             ` Marcelo Tosatti
2013-11-28  8:32               ` Xiao Guangrong
2013-11-25 14:08       ` Marcelo Tosatti
2013-11-26  3:02         ` Xiao Guangrong
2013-11-25  9:31     ` Peter Zijlstra
2013-11-25 10:59       ` Xiao Guangrong
2013-11-25 11:05         ` Peter Zijlstra
2013-11-25 11:29           ` Peter Zijlstra
2013-10-23 13:29 ` [PATCH v3 08/15] KVM: MMU: introduce pte-list lockless walker Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 09/15] KVM: MMU: initialize the pointers in pte_list_desc properly Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 10/15] KVM: MMU: allocate shadow pages from slab Xiao Guangrong
2013-10-24  9:19   ` Gleb Natapov
2013-10-24  9:29     ` Xiao Guangrong
2013-10-24  9:52       ` Gleb Natapov
2013-10-24 10:10         ` Xiao Guangrong
2013-10-24 10:39           ` Gleb Natapov
2013-10-24 11:01             ` Xiao Guangrong
2013-10-24 12:32               ` Gleb Natapov
2013-10-28  3:16                 ` Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 11/15] KVM: MMU: locklessly access shadow page under rcu protection Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 12/15] KVM: MMU: check last spte with unawareness of mapping level Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 13/15] KVM: MMU: locklessly write-protect the page Xiao Guangrong
2013-10-24  9:17   ` Gleb Natapov
2013-10-24  9:24     ` Xiao Guangrong
2013-10-24  9:32       ` Gleb Natapov
2013-10-23 13:29 ` [PATCH v3 14/15] KVM: MMU: clean up spte_write_protect Xiao Guangrong
2013-10-23 13:29 ` [PATCH v3 15/15] KVM: MMU: use rcu functions to access the pointer Xiao Guangrong
2013-11-03 12:29 ` [PATCH v3 00/15] KVM: MMU: locklessly write-protect Gleb Natapov
2013-11-11  5:33   ` Xiao Guangrong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1382534973-13197-7-git-send-email-xiaoguangrong@linux.vnet.ibm.com \
    --to=xiaoguangrong@linux.vnet.ibm.com \
    --cc=avi.kivity@gmail.com \
    --cc=gleb@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mtosatti@redhat.com \
    --cc=pbonzini@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).