linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path
  2010-04-16 13:25 ` [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path Xiao Guangrong
@ 2010-04-15 17:54   ` Marcelo Tosatti
  2010-04-16 13:26   ` [PATCH v2 3/5] KVM MMU: cleanup for restart hlist walking Xiao Guangrong
  1 sibling, 0 replies; 6+ messages in thread
From: Marcelo Tosatti @ 2010-04-15 17:54 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, KVM list, LKML

On Fri, Apr 16, 2010 at 09:25:03PM +0800, Xiao Guangrong wrote:
> - calculate zapped page number properly in mmu_zap_unsync_children()
> - calculate freeed page number properly kvm_mmu_change_mmu_pages()
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
> ---
>  arch/x86/kvm/mmu.c |   12 ++++++++----
>  1 files changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index a23ca75..41cccd4 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1481,13 +1481,16 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
>  		struct kvm_mmu_page *sp;
>  
>  		for_each_sp(pages, sp, parents, i) {
> +			if (list_empty(&kvm->arch.active_mmu_pages))
> +				goto exit;

I meant to check for list_empty in kvm_mmu_change_mmu_pages, instead of
relying on the count returned by kvm_mmu_zap_page. Similarly to what 
__kvm_mmu_free_some_pages does.

Checking here is not needed because the pages returned in the array 
will not be zapped (mmu_lock is held).

Applied 1, 4 and 5 (so please regenerate against kvm.git -next branch),
thanks.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/5] KVM MMU: remove unused struct
@ 2010-04-16 13:23 Xiao Guangrong
  2010-04-16 13:25 ` [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path Xiao Guangrong
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Guangrong @ 2010-04-16 13:23 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, KVM list, LKML

Remove 'struct kvm_unsync_walk' since it's not used

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |    5 -----
 1 files changed, 0 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b44380b..a23ca75 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -172,11 +172,6 @@ struct kvm_shadow_walk_iterator {
 	     shadow_walk_okay(&(_walker));			\
 	     shadow_walk_next(&(_walker)))
 
-
-struct kvm_unsync_walk {
-	int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
-};
-
 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
 
 static struct kmem_cache *pte_chain_cache;
-- 
1.6.1.2

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path
  2010-04-16 13:23 [PATCH v2 1/5] KVM MMU: remove unused struct Xiao Guangrong
@ 2010-04-16 13:25 ` Xiao Guangrong
  2010-04-15 17:54   ` Marcelo Tosatti
  2010-04-16 13:26   ` [PATCH v2 3/5] KVM MMU: cleanup for restart hlist walking Xiao Guangrong
  0 siblings, 2 replies; 6+ messages in thread
From: Xiao Guangrong @ 2010-04-16 13:25 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, KVM list, LKML

- calculate zapped page number properly in mmu_zap_unsync_children()
- calculate freeed page number properly kvm_mmu_change_mmu_pages()

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   12 ++++++++----
 1 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a23ca75..41cccd4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1481,13 +1481,16 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
 		struct kvm_mmu_page *sp;
 
 		for_each_sp(pages, sp, parents, i) {
+			if (list_empty(&kvm->arch.active_mmu_pages))
+				goto exit;
+
 			kvm_mmu_zap_page(kvm, sp);
 			mmu_pages_clear_parents(&parents);
+			zapped++;
 		}
-		zapped += pages.nr;
 		kvm_mmu_pages_init(parent, &parents, &pages);
 	}
-
+exit:
 	return zapped;
 }
 
@@ -1540,7 +1543,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
 
 			page = container_of(kvm->arch.active_mmu_pages.prev,
 					    struct kvm_mmu_page, link);
-			kvm_mmu_zap_page(kvm, page);
+			used_pages -= kvm_mmu_zap_page(kvm, page);
 			used_pages--;
 		}
 		kvm->arch.n_free_mmu_pages = 0;
@@ -1589,7 +1592,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 		    && !sp->role.invalid) {
 			pgprintk("%s: zap %lx %x\n",
 				 __func__, gfn, sp->role.word);
-			kvm_mmu_zap_page(kvm, sp);
+			if (kvm_mmu_zap_page(kvm, sp))
+				nn = bucket->first;
 		}
 	}
 }
-- 
1.6.1.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 3/5] KVM MMU: cleanup for restart hlist walking
  2010-04-16 13:25 ` [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path Xiao Guangrong
  2010-04-15 17:54   ` Marcelo Tosatti
@ 2010-04-16 13:26   ` Xiao Guangrong
  2010-04-16 13:27     ` [PATCH v2 4/5] KVM MMU: smaller reduce 'struct kvm_mmu_page' size Xiao Guangrong
  1 sibling, 1 reply; 6+ messages in thread
From: Xiao Guangrong @ 2010-04-16 13:26 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, KVM list, LKML

Quote from Avi:

|Just change the assignment to a 'goto restart;' please,
|I don't like playing with list_for_each internals. 

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   15 ++++++++++-----
 1 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 41cccd4..a32c60c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1567,13 +1567,14 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 	r = 0;
 	index = kvm_page_table_hashfn(gfn);
 	bucket = &kvm->arch.mmu_page_hash[index];
+restart:
 	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
 		if (sp->gfn == gfn && !sp->role.direct) {
 			pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
 				 sp->role.word);
 			r = 1;
 			if (kvm_mmu_zap_page(kvm, sp))
-				n = bucket->first;
+				goto restart;
 		}
 	return r;
 }
@@ -1587,13 +1588,14 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 
 	index = kvm_page_table_hashfn(gfn);
 	bucket = &kvm->arch.mmu_page_hash[index];
+restart:
 	hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
 		if (sp->gfn == gfn && !sp->role.direct
 		    && !sp->role.invalid) {
 			pgprintk("%s: zap %lx %x\n",
 				 __func__, gfn, sp->role.word);
 			if (kvm_mmu_zap_page(kvm, sp))
-				nn = bucket->first;
+				goto restart;
 		}
 	}
 }
@@ -2673,6 +2675,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	}
 	index = kvm_page_table_hashfn(gfn);
 	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
+
+restart:
 	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
 		if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
 			continue;
@@ -2693,7 +2697,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
 				 gpa, bytes, sp->role.word);
 			if (kvm_mmu_zap_page(vcpu->kvm, sp))
-				n = bucket->first;
+				goto restart;
 			++vcpu->kvm->stat.mmu_flooded;
 			continue;
 		}
@@ -2902,10 +2906,11 @@ void kvm_mmu_zap_all(struct kvm *kvm)
 	struct kvm_mmu_page *sp, *node;
 
 	spin_lock(&kvm->mmu_lock);
+restart:
 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
 		if (kvm_mmu_zap_page(kvm, sp))
-			node = container_of(kvm->arch.active_mmu_pages.next,
-					    struct kvm_mmu_page, link);
+			goto restart;
+
 	spin_unlock(&kvm->mmu_lock);
 
 	kvm_flush_remote_tlbs(kvm);
-- 
1.6.1.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 4/5] KVM MMU: smaller reduce 'struct kvm_mmu_page' size
  2010-04-16 13:26   ` [PATCH v2 3/5] KVM MMU: cleanup for restart hlist walking Xiao Guangrong
@ 2010-04-16 13:27     ` Xiao Guangrong
  2010-04-16 13:29       ` [PATCH v2 5/5] KVM MMU: remove unused parameter in mmu_parent_walk() Xiao Guangrong
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Guangrong @ 2010-04-16 13:27 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, KVM list, LKML

define 'multimapped' as 'bool'

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0c49c88..cace232 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -202,9 +202,9 @@ struct kvm_mmu_page {
 	 * in this shadow page.
 	 */
 	DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
-	int multimapped;         /* More than one parent_pte? */
-	int root_count;          /* Currently serving as active root */
+	bool multimapped;         /* More than one parent_pte? */
 	bool unsync;
+	int root_count;          /* Currently serving as active root */
 	unsigned int unsync_children;
 	union {
 		u64 *parent_pte;               /* !multimapped */
-- 
1.6.1.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 5/5] KVM MMU: remove unused parameter in mmu_parent_walk()
  2010-04-16 13:27     ` [PATCH v2 4/5] KVM MMU: smaller reduce 'struct kvm_mmu_page' size Xiao Guangrong
@ 2010-04-16 13:29       ` Xiao Guangrong
  0 siblings, 0 replies; 6+ messages in thread
From: Xiao Guangrong @ 2010-04-16 13:29 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, KVM list, LKML

'vcpu' is unused, remove it

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   24 +++++++++++-------------
 1 files changed, 11 insertions(+), 13 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a32c60c..2f8ae9e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -172,7 +172,7 @@ struct kvm_shadow_walk_iterator {
 	     shadow_walk_okay(&(_walker));			\
 	     shadow_walk_next(&(_walker)))
 
-typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
+typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp);
 
 static struct kmem_cache *pte_chain_cache;
 static struct kmem_cache *rmap_desc_cache;
@@ -1000,8 +1000,7 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
 }
 
 
-static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			    mmu_parent_walk_fn fn)
+static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
 {
 	struct kvm_pte_chain *pte_chain;
 	struct hlist_node *node;
@@ -1010,8 +1009,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 
 	if (!sp->multimapped && sp->parent_pte) {
 		parent_sp = page_header(__pa(sp->parent_pte));
-		fn(vcpu, parent_sp);
-		mmu_parent_walk(vcpu, parent_sp, fn);
+		fn(parent_sp);
+		mmu_parent_walk(parent_sp, fn);
 		return;
 	}
 	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
@@ -1019,8 +1018,8 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			if (!pte_chain->parent_ptes[i])
 				break;
 			parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
-			fn(vcpu, parent_sp);
-			mmu_parent_walk(vcpu, parent_sp, fn);
+			fn(parent_sp);
+			mmu_parent_walk(parent_sp, fn);
 		}
 }
 
@@ -1057,16 +1056,15 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
 		}
 }
 
-static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static int unsync_walk_fn(struct kvm_mmu_page *sp)
 {
 	kvm_mmu_update_parents_unsync(sp);
 	return 1;
 }
 
-static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
-					struct kvm_mmu_page *sp)
+static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
 {
-	mmu_parent_walk(vcpu, sp, unsync_walk_fn);
+	mmu_parent_walk(sp, unsync_walk_fn);
 	kvm_mmu_update_parents_unsync(sp);
 }
 
@@ -1344,7 +1342,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
 			if (sp->unsync_children) {
 				set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
-				kvm_mmu_mark_parents_unsync(vcpu, sp);
+				kvm_mmu_mark_parents_unsync(sp);
 			}
 			trace_kvm_mmu_get_page(sp, false);
 			return sp;
@@ -1761,7 +1759,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	++vcpu->kvm->stat.mmu_unsync;
 	sp->unsync = 1;
 
-	kvm_mmu_mark_parents_unsync(vcpu, sp);
+	kvm_mmu_mark_parents_unsync(sp);
 
 	mmu_convert_notrap(sp);
 	return 0;
-- 
1.6.1.2


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2010-04-15 17:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-04-16 13:23 [PATCH v2 1/5] KVM MMU: remove unused struct Xiao Guangrong
2010-04-16 13:25 ` [PATCH v2 2/5] KVM MMU: fix kvm_mmu_zap_page() and its calling path Xiao Guangrong
2010-04-15 17:54   ` Marcelo Tosatti
2010-04-16 13:26   ` [PATCH v2 3/5] KVM MMU: cleanup for restart hlist walking Xiao Guangrong
2010-04-16 13:27     ` [PATCH v2 4/5] KVM MMU: smaller reduce 'struct kvm_mmu_page' size Xiao Guangrong
2010-04-16 13:29       ` [PATCH v2 5/5] KVM MMU: remove unused parameter in mmu_parent_walk() Xiao Guangrong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).