All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] Revert "KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()"
@ 2013-03-29  5:05 Takuya Yoshikawa
  2013-04-04  7:27 ` Gleb Natapov
  0 siblings, 1 reply; 2+ messages in thread
From: Takuya Yoshikawa @ 2013-03-29  5:05 UTC (permalink / raw)
  To: mtosatti, gleb; +Cc: kvm

With the following commit, shadow pages can be zapped at random during
a shadow page talbe walk:
  KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()
  7ddca7e43c8f28f9419da81a0e7730b66aa60fe9

This patch reverts it and fixes __direct_map() and FNAME(fetch)().

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
---
 arch/x86/kvm/mmu.c         |   11 +++++++----
 arch/x86/kvm/paging_tmpl.h |    1 +
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 633e30c..004cc87 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1501,15 +1501,11 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
 	mmu_spte_clear_no_track(parent_pte);
 }
 
-static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
-
 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
 					       u64 *parent_pte, int direct)
 {
 	struct kvm_mmu_page *sp;
 
-	make_mmu_pages_available(vcpu);
-
 	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
 	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
 	if (!direct)
@@ -2806,6 +2802,7 @@ exit:
 
 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
 			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
+static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
 
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 			 gfn_t gfn, bool prefault)
@@ -2847,6 +2844,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 		goto out_unlock;
+	make_mmu_pages_available(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
@@ -2924,6 +2922,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 
 	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
 		spin_lock(&vcpu->kvm->mmu_lock);
+		make_mmu_pages_available(vcpu);
 		sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
 				      1, ACC_ALL, NULL);
 		++sp->root_count;
@@ -2935,6 +2934,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 
 			ASSERT(!VALID_PAGE(root));
 			spin_lock(&vcpu->kvm->mmu_lock);
+			make_mmu_pages_available(vcpu);
 			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
 					      i << 30,
 					      PT32_ROOT_LEVEL, 1, ACC_ALL,
@@ -2973,6 +2973,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		ASSERT(!VALID_PAGE(root));
 
 		spin_lock(&vcpu->kvm->mmu_lock);
+		make_mmu_pages_available(vcpu);
 		sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
 				      0, ACC_ALL, NULL);
 		root = __pa(sp->spt);
@@ -3006,6 +3007,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 				return 1;
 		}
 		spin_lock(&vcpu->kvm->mmu_lock);
+		make_mmu_pages_available(vcpu);
 		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
 				      PT32_ROOT_LEVEL, 0,
 				      ACC_ALL, NULL);
@@ -3311,6 +3313,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 		goto out_unlock;
+	make_mmu_pages_available(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
 	r = __direct_map(vcpu, gpa, write, map_writable,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index af143f0..da20860 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -627,6 +627,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 		goto out_unlock;
 
 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
+	make_mmu_pages_available(vcpu);
 	if (!force_pt_level)
 		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] Revert "KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()"
  2013-03-29  5:05 [PATCH] Revert "KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()" Takuya Yoshikawa
@ 2013-04-04  7:27 ` Gleb Natapov
  0 siblings, 0 replies; 2+ messages in thread
From: Gleb Natapov @ 2013-04-04  7:27 UTC (permalink / raw)
  To: Takuya Yoshikawa; +Cc: mtosatti, kvm

On Fri, Mar 29, 2013 at 02:05:26PM +0900, Takuya Yoshikawa wrote:
> With the following commit, shadow pages can be zapped at random during
> a shadow page talbe walk:
>   KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()
>   7ddca7e43c8f28f9419da81a0e7730b66aa60fe9
> 
> This patch reverts it and fixes __direct_map() and FNAME(fetch)().
> 
> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Applied, thanks.

> ---
>  arch/x86/kvm/mmu.c         |   11 +++++++----
>  arch/x86/kvm/paging_tmpl.h |    1 +
>  2 files changed, 8 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 633e30c..004cc87 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1501,15 +1501,11 @@ static void drop_parent_pte(struct kvm_mmu_page *sp,
>  	mmu_spte_clear_no_track(parent_pte);
>  }
>  
> -static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
> -
>  static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
>  					       u64 *parent_pte, int direct)
>  {
>  	struct kvm_mmu_page *sp;
>  
> -	make_mmu_pages_available(vcpu);
> -
>  	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
>  	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
>  	if (!direct)
> @@ -2806,6 +2802,7 @@ exit:
>  
>  static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>  			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
> +static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
>  
>  static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
>  			 gfn_t gfn, bool prefault)
> @@ -2847,6 +2844,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
>  	spin_lock(&vcpu->kvm->mmu_lock);
>  	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
>  		goto out_unlock;
> +	make_mmu_pages_available(vcpu);
>  	if (likely(!force_pt_level))
>  		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
>  	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
> @@ -2924,6 +2922,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>  
>  	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
>  		spin_lock(&vcpu->kvm->mmu_lock);
> +		make_mmu_pages_available(vcpu);
>  		sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
>  				      1, ACC_ALL, NULL);
>  		++sp->root_count;
> @@ -2935,6 +2934,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>  
>  			ASSERT(!VALID_PAGE(root));
>  			spin_lock(&vcpu->kvm->mmu_lock);
> +			make_mmu_pages_available(vcpu);
>  			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
>  					      i << 30,
>  					      PT32_ROOT_LEVEL, 1, ACC_ALL,
> @@ -2973,6 +2973,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  		ASSERT(!VALID_PAGE(root));
>  
>  		spin_lock(&vcpu->kvm->mmu_lock);
> +		make_mmu_pages_available(vcpu);
>  		sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
>  				      0, ACC_ALL, NULL);
>  		root = __pa(sp->spt);
> @@ -3006,6 +3007,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  				return 1;
>  		}
>  		spin_lock(&vcpu->kvm->mmu_lock);
> +		make_mmu_pages_available(vcpu);
>  		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
>  				      PT32_ROOT_LEVEL, 0,
>  				      ACC_ALL, NULL);
> @@ -3311,6 +3313,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
>  	spin_lock(&vcpu->kvm->mmu_lock);
>  	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
>  		goto out_unlock;
> +	make_mmu_pages_available(vcpu);
>  	if (likely(!force_pt_level))
>  		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
>  	r = __direct_map(vcpu, gpa, write, map_writable,
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index af143f0..da20860 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -627,6 +627,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
>  		goto out_unlock;
>  
>  	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
> +	make_mmu_pages_available(vcpu);
>  	if (!force_pt_level)
>  		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
>  	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
> -- 
> 1.7.5.4

--
			Gleb.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2013-04-04  7:27 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-03-29  5:05 [PATCH] Revert "KVM: MMU: Move kvm_mmu_free_some_pages() into kvm_mmu_alloc_page()" Takuya Yoshikawa
2013-04-04  7:27 ` Gleb Natapov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.