linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code
@ 2012-09-14  9:56 Xiao Guangrong
  2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
                   ` (4 more replies)
  0 siblings, 5 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:56 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Release pfn in the mmu code is little special for we allow no-slot pfn
go to spte walk on page fault path, that means, on page fault fail path,
we can not directly call kvm_release_pfn_clean.

This patchset fixes the bug which release no-slot pfn on fail path and
clean up all the paths where kvm_release_pfn_clean is called


^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly
  2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
@ 2012-09-14  9:57 ` Xiao Guangrong
  2012-09-15 15:13   ` Marcelo Tosatti
  2012-09-14  9:57 ` [PATCH v2 2/5] KVM: MMU: do not release pfn in mmu_set_spte Xiao Guangrong
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:57 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

We can not directly call kvm_release_pfn_clean to release the pfn
since we can meet noslot pfn which is used to cache mmio info into
spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |    6 ++++--
 arch/x86/kvm/paging_tmpl.h |    6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aa0b469..f74c63a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2877,7 +2877,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }

@@ -3345,7 +3346,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bf8c42b..c004ab6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -544,7 +544,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 out_gpte_changed:
 	if (sp)
 		kvm_mmu_put_page(sp, it.sptep);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return NULL;
 }

@@ -645,7 +646,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-	kvm_release_pfn_clean(pfn);
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 	return 0;
 }

-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 2/5] KVM: MMU: do not release pfn in mmu_set_spte
  2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
  2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
@ 2012-09-14  9:57 ` Xiao Guangrong
  2012-09-14  9:58 ` [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault) Xiao Guangrong
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:57 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

It helps us to cleanup release pfn in the later patches

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |   29 ++++++++++++++---------------
 arch/x86/kvm/paging_tmpl.h |   18 +++++++++++-------
 2 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f74c63a..29ce28b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2496,9 +2496,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 				rmap_recycle(vcpu, sptep, gfn);
 		}
 	}
-
-	if (!is_error_pfn(pfn))
-		kvm_release_pfn_clean(pfn);
 }

 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2535,12 +2532,15 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
 	if (ret <= 0)
 		return -1;

-	for (i = 0; i < ret; i++, gfn++, start++)
+	for (i = 0; i < ret; i++, gfn++, start++) {
 		mmu_set_spte(vcpu, start, ACC_ALL,
 			     access, 0, 0, NULL,
 			     sp->role.level, gfn,
 			     page_to_pfn(pages[i]), true, true);

+		kvm_release_page_clean(pages[i]);
+	}
+
 	return 0;
 }

@@ -2863,23 +2863,22 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 		return r;

 	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq))
+	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+		r = 0;
 		goto out_unlock;
+	}
+
 	kvm_mmu_free_some_pages(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
 			 prefault);
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-
-	return r;

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
-	return 0;
+	return r;
 }


@@ -3333,22 +3332,22 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 		return r;

 	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq))
+	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+		r = 0;
 		goto out_unlock;
+	}
+
 	kvm_mmu_free_some_pages(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, gpa, write, map_writable,
 			 level, gfn, pfn, prefault);
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	return r;

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
-	return 0;
+	return r;
 }

 static void nonpaging_free(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c004ab6..92f466c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -380,6 +380,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
 		     NULL, PT_PAGE_TABLE_LEVEL,
 		     gpte_to_gfn(gpte), pfn, true, true);
+
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
 }

 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
@@ -452,6 +455,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
 			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
 			     pfn, true, true);
+		if (!is_error_pfn(pfn))
+			kvm_release_pfn_clean(pfn);
 	}
 }

@@ -544,8 +549,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 out_gpte_changed:
 	if (sp)
 		kvm_mmu_put_page(sp, it.sptep);
-	if (!is_error_pfn(pfn))
-		kvm_release_pfn_clean(pfn);
+
 	return NULL;
 }

@@ -625,8 +629,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 		return r;

 	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq))
+	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+		r = 0;
 		goto out_unlock;
+	}

 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 	kvm_mmu_free_some_pages(vcpu);
@@ -640,15 +646,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,

 	++vcpu->stat.pf_fixed;
 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
-	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	return emulate;
+	r = emulate;

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
-	return 0;
+	return r;
 }

 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault)
  2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
  2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
  2012-09-14  9:57 ` [PATCH v2 2/5] KVM: MMU: do not release pfn in mmu_set_spte Xiao Guangrong
@ 2012-09-14  9:58 ` Xiao Guangrong
  2012-09-20 10:54   ` Avi Kivity
  2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
  2012-09-14  9:59 ` [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte) Xiao Guangrong
  4 siblings, 1 reply; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:58 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

Let it return emulate state instead of spte like __direct_map

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/paging_tmpl.h |   28 ++++++++++------------------
 1 files changed, 10 insertions(+), 18 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 92f466c..0adf376 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -463,20 +463,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 /*
  * Fetch a shadow pte for a specific level in the paging hierarchy.
  */
-static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
+static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			 struct guest_walker *gw,
 			 int user_fault, int write_fault, int hlevel,
-			 int *emulate, pfn_t pfn, bool map_writable,
-			 bool prefault)
+			 pfn_t pfn, bool map_writable, bool prefault)
 {
-	unsigned access = gw->pt_access;
 	struct kvm_mmu_page *sp = NULL;
-	int top_level;
-	unsigned direct_access;
 	struct kvm_shadow_walk_iterator it;
+	unsigned direct_access, access = gw->pt_access;
+	int top_level, emulate = 0;

 	if (!is_present_gpte(gw->ptes[gw->level - 1]))
-		return NULL;
+		return 0;

 	direct_access = gw->pte_access;

@@ -540,17 +538,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,

 	clear_sp_write_flooding_count(it.sptep);
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
-		     user_fault, write_fault, emulate, it.level,
+		     user_fault, write_fault, &emulate, it.level,
 		     gw->gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);

-	return it.sptep;
+	return emulate;

 out_gpte_changed:
 	if (sp)
 		kvm_mmu_put_page(sp, it.sptep);

-	return NULL;
+	return 0;
 }

 /*
@@ -573,8 +571,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	int write_fault = error_code & PFERR_WRITE_MASK;
 	int user_fault = error_code & PFERR_USER_MASK;
 	struct guest_walker walker;
-	u64 *sptep;
-	int emulate = 0;
 	int r;
 	pfn_t pfn;
 	int level = PT_PAGE_TABLE_LEVEL;
@@ -638,15 +634,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	kvm_mmu_free_some_pages(vcpu);
 	if (!force_pt_level)
 		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
-	sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-			     level, &emulate, pfn, map_writable, prefault);
-	(void)sptep;
-	pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
-		 sptep, *sptep, emulate);
+	r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
+			 level, pfn, map_writable, prefault);

 	++vcpu->stat.pf_fixed;
 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
-	r = emulate;

 out_unlock:
 	spin_unlock(&vcpu->kvm->mmu_lock);
-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
                   ` (2 preceding siblings ...)
  2012-09-14  9:58 ` [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault) Xiao Guangrong
@ 2012-09-14  9:59 ` Xiao Guangrong
  2012-09-15 15:25   ` Marcelo Tosatti
  2012-09-20 10:57   ` Avi Kivity
  2012-09-14  9:59 ` [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte) Xiao Guangrong
  4 siblings, 2 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:59 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

Wrap the common operations into these two functions

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c         |   53 +++++++++++++++++++++++++++----------------
 arch/x86/kvm/paging_tmpl.h |   16 +++++--------
 2 files changed, 39 insertions(+), 30 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 29ce28b..7e7b8cd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2825,6 +2825,29 @@ exit:
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 			 gva_t gva, pfn_t *pfn, bool write, bool *writable);

+static bool
+page_fault_start(struct kvm_vcpu *vcpu, gfn_t *gfnp, pfn_t *pfnp, int *levelp,
+		 bool force_pt_level, unsigned long mmu_seq)
+{
+	spin_lock(&vcpu->kvm->mmu_lock);
+	if (mmu_notifier_retry(vcpu, mmu_seq))
+		return false;
+
+	kvm_mmu_free_some_pages(vcpu);
+	if (likely(!force_pt_level))
+		transparent_hugepage_adjust(vcpu, gfnp, pfnp, levelp);
+
+	return true;
+}
+
+static void page_fault_end(struct kvm_vcpu *vcpu, pfn_t pfn)
+{
+	spin_unlock(&vcpu->kvm->mmu_lock);
+
+	if (!is_error_pfn(pfn))
+		kvm_release_pfn_clean(pfn);
+}
+
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 			 gfn_t gfn, bool prefault)
 {
@@ -2862,22 +2885,17 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 	if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
 		return r;

-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+	if (!page_fault_start(vcpu, &gfn, &pfn, &level, force_pt_level,
+	      mmu_seq)) {
 		r = 0;
-		goto out_unlock;
+		goto exit;
 	}

-	kvm_mmu_free_some_pages(vcpu);
-	if (likely(!force_pt_level))
-		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
 			 prefault);

-out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
-	if (!is_error_pfn(pfn))
-		kvm_release_pfn_clean(pfn);
+exit:
+	page_fault_end(vcpu, pfn);
 	return r;
 }

@@ -3331,22 +3349,17 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
 		return r;

-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+	if (!page_fault_start(vcpu, &gfn, &pfn, &level, force_pt_level,
+	      mmu_seq)) {
 		r = 0;
-		goto out_unlock;
+		goto exit;
 	}

-	kvm_mmu_free_some_pages(vcpu);
-	if (likely(!force_pt_level))
-		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, gpa, write, map_writable,
 			 level, gfn, pfn, prefault);

-out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
-	if (!is_error_pfn(pfn))
-		kvm_release_pfn_clean(pfn);
+exit:
+	page_fault_end(vcpu, pfn);
 	return r;
 }

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0adf376..1a738c5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -624,26 +624,22 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 				walker.gfn, pfn, walker.pte_access, &r))
 		return r;

-	spin_lock(&vcpu->kvm->mmu_lock);
-	if (mmu_notifier_retry(vcpu, mmu_seq)) {
+	if (!page_fault_start(vcpu, &walker.gfn, &pfn, &level,
+	      force_pt_level, mmu_seq)) {
 		r = 0;
-		goto out_unlock;
+		goto exit;
 	}

 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
-	kvm_mmu_free_some_pages(vcpu);
-	if (!force_pt_level)
-		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
+
 	r = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
 			 level, pfn, map_writable, prefault);

 	++vcpu->stat.pf_fixed;
 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);

-out_unlock:
-	spin_unlock(&vcpu->kvm->mmu_lock);
-	if (!is_error_pfn(pfn))
-		kvm_release_pfn_clean(pfn);
+exit:
+	page_fault_end(vcpu, pfn);
 	return r;
 }

-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
  2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
                   ` (3 preceding siblings ...)
  2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
@ 2012-09-14  9:59 ` Xiao Guangrong
  2012-09-14 10:13   ` Xiao Guangrong
  4 siblings, 1 reply; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14  9:59 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

The only different thing between FNAME(update_pte) and FNAME(pte_prefetch)
is that the former is allowed to prefetch gfn from dirty logged slot, so
introduce a common function to prefetch spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/paging_tmpl.h |   50 ++++++++++++++++---------------------------
 1 files changed, 19 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1a738c5..9742eab 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -356,35 +356,45 @@ no_present:
 	return true;
 }

-static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			      u64 *spte, const void *pte)
+static void
+FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 {
-	pt_element_t gpte;
 	unsigned pte_access;
+	gfn_t gfn;
 	pfn_t pfn;

-	gpte = *(const pt_element_t *)pte;
 	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;

 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
+
+	gfn = gpte_to_gfn(gpte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
-	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
+	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
+			no_dirty_log && (pte_access & ACC_WRITE_MASK));
 	if (mmu_invalid_pfn(pfn))
 		return;

 	/*
 	 * we call mmu_set_spte() with host_writable = true because that
-	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
+	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-		     NULL, PT_PAGE_TABLE_LEVEL,
-		     gpte_to_gfn(gpte), pfn, true, true);
+		     NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);

 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
 }

+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+			      u64 *spte, const void *pte)
+{
+	pt_element_t gpte = *(const pt_element_t *)pte;
+
+	return FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
+}
+
 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 				struct guest_walker *gw, int level)
 {
@@ -428,35 +438,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 	spte = sp->spt + i;

 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
-		pt_element_t gpte;
-		unsigned pte_access;
-		gfn_t gfn;
-		pfn_t pfn;
-
 		if (spte == sptep)
 			continue;

 		if (is_shadow_present_pte(*spte))
 			continue;

-		gpte = gptep[i];
-
-		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
-			continue;
-
-		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
-								  true);
-		gfn = gpte_to_gfn(gpte);
-		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
-				      pte_access & ACC_WRITE_MASK);
-		if (mmu_invalid_pfn(pfn))
-			break;
-
-		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
-			     pfn, true, true);
-		if (!is_error_pfn(pfn))
-			kvm_release_pfn_clean(pfn);
+		return FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);
 	}
 }

-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
  2012-09-14  9:59 ` [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte) Xiao Guangrong
@ 2012-09-14 10:13   ` Xiao Guangrong
  2012-09-15 15:31     ` Marcelo Tosatti
  0 siblings, 1 reply; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-14 10:13 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, Marcelo Tosatti, LKML, KVM

On 09/14/2012 05:59 PM, Xiao Guangrong wrote:

> +		return FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);

Sorry, this was wrong. Update this patch.

[PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)

The only different thing between FNAME(update_pte) and FNAME(pte_prefetch)
is that the former is allowed to prefetch gfn from dirty logged slot, so
introduce a common function to prefetch spte

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
 arch/x86/kvm/paging_tmpl.h |   50 ++++++++++++++++---------------------------
 1 files changed, 19 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 1a738c5..32facf7 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -356,35 +356,45 @@ no_present:
 	return true;
 }

-static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			      u64 *spte, const void *pte)
+static void
+FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 {
-	pt_element_t gpte;
 	unsigned pte_access;
+	gfn_t gfn;
 	pfn_t pfn;

-	gpte = *(const pt_element_t *)pte;
 	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;

 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
+
+	gfn = gpte_to_gfn(gpte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
-	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
+	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
+			no_dirty_log && (pte_access & ACC_WRITE_MASK));
 	if (mmu_invalid_pfn(pfn))
 		return;

 	/*
 	 * we call mmu_set_spte() with host_writable = true because that
-	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
+	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-		     NULL, PT_PAGE_TABLE_LEVEL,
-		     gpte_to_gfn(gpte), pfn, true, true);
+		     NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true);

 	if (!is_error_pfn(pfn))
 		kvm_release_pfn_clean(pfn);
 }

+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+			      u64 *spte, const void *pte)
+{
+	pt_element_t gpte = *(const pt_element_t *)pte;
+
+	return FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
+}
+
 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 				struct guest_walker *gw, int level)
 {
@@ -428,35 +438,13 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 	spte = sp->spt + i;

 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
-		pt_element_t gpte;
-		unsigned pte_access;
-		gfn_t gfn;
-		pfn_t pfn;
-
 		if (spte == sptep)
 			continue;

 		if (is_shadow_present_pte(*spte))
 			continue;

-		gpte = gptep[i];
-
-		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
-			continue;
-
-		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
-								  true);
-		gfn = gpte_to_gfn(gpte);
-		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
-				      pte_access & ACC_WRITE_MASK);
-		if (mmu_invalid_pfn(pfn))
-			break;
-
-		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
-			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
-			     pfn, true, true);
-		if (!is_error_pfn(pfn))
-			kvm_release_pfn_clean(pfn);
+		FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);
 	}
 }

-- 
1.7.7.6


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly
  2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
@ 2012-09-15 15:13   ` Marcelo Tosatti
  2012-09-18  7:46     ` Xiao Guangrong
  0 siblings, 1 reply; 17+ messages in thread
From: Marcelo Tosatti @ 2012-09-15 15:13 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Fri, Sep 14, 2012 at 05:57:22PM +0800, Xiao Guangrong wrote:
> We can not directly call kvm_release_pfn_clean to release the pfn
> since we can meet noslot pfn which is used to cache mmio info into
> spte
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/mmu.c         |    6 ++++--
>  arch/x86/kvm/paging_tmpl.h |    6 ++++--
>  2 files changed, 8 insertions(+), 4 deletions(-)

Its clearer to the reader if is_invalid_pfn() is used instead of 
is_error_pfn.

BTW how about killing this unused helper

static bool mmu_invalid_pfn(pfn_t pfn)
{
        return unlikely(is_invalid_pfn(pfn));
}

This can be done inlined.

> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index aa0b469..f74c63a 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2877,7 +2877,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
> 
>  out_unlock:
>  	spin_unlock(&vcpu->kvm->mmu_lock);
> -	kvm_release_pfn_clean(pfn);
> +	if (!is_error_pfn(pfn))
> +		kvm_release_pfn_clean(pfn);
>  	return 0;
>  }


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
@ 2012-09-15 15:25   ` Marcelo Tosatti
  2012-09-18  8:15     ` Xiao Guangrong
  2012-09-20 10:57   ` Avi Kivity
  1 sibling, 1 reply; 17+ messages in thread
From: Marcelo Tosatti @ 2012-09-15 15:25 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Fri, Sep 14, 2012 at 05:59:06PM +0800, Xiao Guangrong wrote:
> Wrap the common operations into these two functions
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>

Why? I think people are used to 

spin_lock(lock)
sequence
spin_unlock(lock)

So its easy to verify whether access to data structures are protected.

Unrelated to this patch, one opportunity i see to simplify this
code is:

- error pfn / mmio pfn / invalid pfn relation

Have the meaning of this bits unified in a single function/helper, see
comment to patch 1 (perhaps you can further improve).


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
  2012-09-14 10:13   ` Xiao Guangrong
@ 2012-09-15 15:31     ` Marcelo Tosatti
  2012-09-18  8:26       ` Xiao Guangrong
  0 siblings, 1 reply; 17+ messages in thread
From: Marcelo Tosatti @ 2012-09-15 15:31 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Fri, Sep 14, 2012 at 06:13:11PM +0800, Xiao Guangrong wrote:
> On 09/14/2012 05:59 PM, Xiao Guangrong wrote:
> 
> > +		return FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);
> 
> Sorry, this was wrong. Update this patch.
> 
> [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
> 
> The only different thing between FNAME(update_pte) and FNAME(pte_prefetch)
> is that the former is allowed to prefetch gfn from dirty logged slot, so
> introduce a common function to prefetch spte
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>

IMHO, for the human reader, the meaning of the two functions is
different and therefore separation is justified. Moreover they already
share common code via FNAME(prefetch_invalid_gpte) (which BTW, is a
confusing name because the function does not prefetch gpte, it validates
gpte).


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly
  2012-09-15 15:13   ` Marcelo Tosatti
@ 2012-09-18  7:46     ` Xiao Guangrong
  0 siblings, 0 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-18  7:46 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 09/15/2012 11:13 PM, Marcelo Tosatti wrote:
> On Fri, Sep 14, 2012 at 05:57:22PM +0800, Xiao Guangrong wrote:
>> We can not directly call kvm_release_pfn_clean to release the pfn
>> since we can meet noslot pfn which is used to cache mmio info into
>> spte
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
>> ---
>>  arch/x86/kvm/mmu.c         |    6 ++++--
>>  arch/x86/kvm/paging_tmpl.h |    6 ++++--
>>  2 files changed, 8 insertions(+), 4 deletions(-)
> 
> Its clearer to the reader if is_invalid_pfn() is used instead of 
> is_error_pfn.
> 
> BTW how about killing this unused helper
> 
> static bool mmu_invalid_pfn(pfn_t pfn)
> {
>         return unlikely(is_invalid_pfn(pfn));
> }
> 
> This can be done inlined.

Okay, will do. Thanks Marcelo!



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-15 15:25   ` Marcelo Tosatti
@ 2012-09-18  8:15     ` Xiao Guangrong
  2012-09-18 23:43       ` Marcelo Tosatti
  0 siblings, 1 reply; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-18  8:15 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 09/15/2012 11:25 PM, Marcelo Tosatti wrote:
> On Fri, Sep 14, 2012 at 05:59:06PM +0800, Xiao Guangrong wrote:
>> Wrap the common operations into these two functions
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> 
> Why? I think people are used to 
> 
> spin_lock(lock)
> sequence
> spin_unlock(lock)

Marcelo,

There are many functions use this style that wrap the lock into the
_start and _end functions in kernel (eg.: cgroup_pidlist_start and
cgroup_pidlist_stop in kernel/cgroup.c).

Actually, i just wanted to remove below duplicate ugly code:

	if (!is_error_pfn(pfn))
		kvm_release_pfn_clean(pfn);

> 
> So its easy to verify whether access to data structures are protected.
> 
> Unrelated to this patch, one opportunity i see to simplify this
> code is:
> 
> - error pfn / mmio pfn / invalid pfn relation
> 
> Have the meaning of this bits unified in a single function/helper, see
> comment to patch 1 (perhaps you can further improve).

Sorry, more detail?


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
  2012-09-15 15:31     ` Marcelo Tosatti
@ 2012-09-18  8:26       ` Xiao Guangrong
  0 siblings, 0 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-18  8:26 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 09/15/2012 11:31 PM, Marcelo Tosatti wrote:
> On Fri, Sep 14, 2012 at 06:13:11PM +0800, Xiao Guangrong wrote:
>> On 09/14/2012 05:59 PM, Xiao Guangrong wrote:
>>
>>> +		return FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true);
>>
>> Sorry, this was wrong. Update this patch.
>>
>> [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte)
>>
>> The only different thing between FNAME(update_pte) and FNAME(pte_prefetch)
>> is that the former is allowed to prefetch gfn from dirty logged slot, so
>> introduce a common function to prefetch spte
>>
>> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> 
> IMHO, for the human reader, the meaning of the two functions is
> different and therefore separation is justified. Moreover they already

Actually, these two functions do the same things, both of them prefetch
spte based on gpte. The only different is, in the case of update_pte, we
have high opportunity that the spte can be accessed soon (that why it is
allowed to prefetch dirty logged gfn).

> share common code via FNAME(prefetch_invalid_gpte) (which BTW, is a
> confusing name because the function does not prefetch gpte, it validates
> gpte).

I think it is not too bad for it not just validates gpte, it also can drop
spte if the gpte is invalid, it is also the behaviour that modify spte based
on gpte. ;)


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-18  8:15     ` Xiao Guangrong
@ 2012-09-18 23:43       ` Marcelo Tosatti
  2012-09-20  2:59         ` Xiao Guangrong
  0 siblings, 1 reply; 17+ messages in thread
From: Marcelo Tosatti @ 2012-09-18 23:43 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Sep 18, 2012 at 04:15:32PM +0800, Xiao Guangrong wrote:
> On 09/15/2012 11:25 PM, Marcelo Tosatti wrote:
> > On Fri, Sep 14, 2012 at 05:59:06PM +0800, Xiao Guangrong wrote:
> >> Wrap the common operations into these two functions
> >>
> >> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> > 
> > Why? I think people are used to 
> > 
> > spin_lock(lock)
> > sequence
> > spin_unlock(lock)
> 
> Marcelo,
> 
> There are many functions use this style that wrap the lock into the
> _start and _end functions in kernel (eg.: cgroup_pidlist_start and
> cgroup_pidlist_stop in kernel/cgroup.c).
> 
> Actually, i just wanted to remove below duplicate ugly code:
> 
> 	if (!is_error_pfn(pfn))
> 		kvm_release_pfn_clean(pfn);
> 
> > 
> > So its easy to verify whether access to data structures are protected.
> > 
> > Unrelated to this patch, one opportunity i see to simplify this
> > code is:
> > 
> > - error pfn / mmio pfn / invalid pfn relation
> > 
> > Have the meaning of this bits unified in a single function/helper, see
> > comment to patch 1 (perhaps you can further improve).
> 
> Sorry, more detail?

Should force the reader of the code to understand error pfn / mmio pfn /
invalid pfn in a single helper. That is, avoid using error pfn at all.

> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-18 23:43       ` Marcelo Tosatti
@ 2012-09-20  2:59         ` Xiao Guangrong
  0 siblings, 0 replies; 17+ messages in thread
From: Xiao Guangrong @ 2012-09-20  2:59 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 09/19/2012 07:43 AM, Marcelo Tosatti wrote:

>>> - error pfn / mmio pfn / invalid pfn relation
>>>
>>> Have the meaning of this bits unified in a single function/helper, see
>>> comment to patch 1 (perhaps you can further improve).
>>
>> Sorry, more detail?
> 
> Should force the reader of the code to understand error pfn / mmio pfn /
> invalid pfn in a single helper. That is, avoid using error pfn at all.

Reasonable suggestion, i will think about it more. Thank you, Marcelo!


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault)
  2012-09-14  9:58 ` [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault) Xiao Guangrong
@ 2012-09-20 10:54   ` Avi Kivity
  0 siblings, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2012-09-20 10:54 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM

On 09/14/2012 12:58 PM, Xiao Guangrong wrote:
> Let it return emulate state instead of spte like __direct_map
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/paging_tmpl.h |   28 ++++++++++------------------
>  1 files changed, 10 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 92f466c..0adf376 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -463,20 +463,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
>  /*
>   * Fetch a shadow pte for a specific level in the paging hierarchy.
>   */
> -static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
> +static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
>  			 struct guest_walker *gw,
>  			 int user_fault, int write_fault, int hlevel,
> -			 int *emulate, pfn_t pfn, bool map_writable,
> -			 bool prefault)
> +			 pfn_t pfn, bool map_writable, bool prefault)
>  {

Please document the return value in the comment.


-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end
  2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
  2012-09-15 15:25   ` Marcelo Tosatti
@ 2012-09-20 10:57   ` Avi Kivity
  1 sibling, 0 replies; 17+ messages in thread
From: Avi Kivity @ 2012-09-20 10:57 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM

On 09/14/2012 12:59 PM, Xiao Guangrong wrote:
> Wrap the common operations into these two functions
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
> ---
>  arch/x86/kvm/mmu.c         |   53 +++++++++++++++++++++++++++----------------
>  arch/x86/kvm/paging_tmpl.h |   16 +++++--------
>  2 files changed, 39 insertions(+), 30 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 29ce28b..7e7b8cd 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -2825,6 +2825,29 @@ exit:
>  static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>  			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
> 
> +static bool
> +page_fault_start(struct kvm_vcpu *vcpu, gfn_t *gfnp, pfn_t *pfnp, int *levelp,
> +		 bool force_pt_level, unsigned long mmu_seq)
> +{
> +	spin_lock(&vcpu->kvm->mmu_lock);
> +	if (mmu_notifier_retry(vcpu, mmu_seq))
> +		return false;
> +
> +	kvm_mmu_free_some_pages(vcpu);
> +	if (likely(!force_pt_level))
> +		transparent_hugepage_adjust(vcpu, gfnp, pfnp, levelp);
> +
> +	return true;
> +}
> +
> +static void page_fault_end(struct kvm_vcpu *vcpu, pfn_t pfn)
> +{
> +	spin_unlock(&vcpu->kvm->mmu_lock);
> +
> +	if (!is_error_pfn(pfn))
> +		kvm_release_pfn_clean(pfn);
> +}

Needs sparse annotations (__acquires, __releases).

These code blocks have nothing in common except for being shared.  Often
that's not good for maintainability because it means that further
changes can affect one path but not the other.  But we can try it out
and see.



-- 
error compiling committee.c: too many arguments to function

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2012-09-20 10:57 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-09-14  9:56 [PATCH v2 0/5] KVM: MMU: fix release pfn in mmu code Xiao Guangrong
2012-09-14  9:57 ` [PATCH v2 1/5] KVM: MMU: release noslot pfn on the fail path properly Xiao Guangrong
2012-09-15 15:13   ` Marcelo Tosatti
2012-09-18  7:46     ` Xiao Guangrong
2012-09-14  9:57 ` [PATCH v2 2/5] KVM: MMU: do not release pfn in mmu_set_spte Xiao Guangrong
2012-09-14  9:58 ` [PATCH v2 3/5] KVM: MMU: cleanup FNAME(page_fault) Xiao Guangrong
2012-09-20 10:54   ` Avi Kivity
2012-09-14  9:59 ` [PATCH v2 4/5] KVM: MMU: introduce page_fault_start and page_fault_end Xiao Guangrong
2012-09-15 15:25   ` Marcelo Tosatti
2012-09-18  8:15     ` Xiao Guangrong
2012-09-18 23:43       ` Marcelo Tosatti
2012-09-20  2:59         ` Xiao Guangrong
2012-09-20 10:57   ` Avi Kivity
2012-09-14  9:59 ` [PATCH v2 5/5] KVM: MMU: introduce FNAME(prefetch_gpte) Xiao Guangrong
2012-09-14 10:13   ` Xiao Guangrong
2012-09-15 15:31     ` Marcelo Tosatti
2012-09-18  8:26       ` Xiao Guangrong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).