kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages
@ 2021-04-29  3:41 Keqian Zhu
  2021-04-29  3:41 ` [PATCH v3 1/2] KVM: x86: Support write protect gfn with min_level Keqian Zhu
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Keqian Zhu @ 2021-04-29  3:41 UTC (permalink / raw)
  To: linux-kernel, kvm, Paolo Bonzini, Sean Christopherson, Ben Gardon
  Cc: wanghaibin.wang

Hi,

Currently during start dirty logging, if we're with init-all-set,
we write protect huge pages and leave normal pages untouched, for
that we can enable dirty logging for these pages lazily.

Actually enable dirty logging lazily for huge pages is feasible
too, which not only reduces the time of start dirty logging, also
greatly reduces side-effect on guest when there is high dirty rate.

Thanks,
Keqian

Changelog:

v3:
 - Discussed with Ben and delete RFC comments. Thanks.

Keqian Zhu (2):
  KVM: x86: Support write protect gfn with min_level
  KVM: x86: Not wr-protect huge page with init_all_set dirty log

 arch/x86/kvm/mmu/mmu.c          | 38 ++++++++++++++++++++++++++-------
 arch/x86/kvm/mmu/mmu_internal.h |  3 ++-
 arch/x86/kvm/mmu/page_track.c   |  2 +-
 arch/x86/kvm/mmu/tdp_mmu.c      | 16 ++++++++++----
 arch/x86/kvm/mmu/tdp_mmu.h      |  3 ++-
 arch/x86/kvm/x86.c              | 37 +++++++++-----------------------
 6 files changed, 57 insertions(+), 42 deletions(-)

-- 
2.23.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3 1/2] KVM: x86: Support write protect gfn with min_level
  2021-04-29  3:41 [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
@ 2021-04-29  3:41 ` Keqian Zhu
  2021-04-29  3:41 ` [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log Keqian Zhu
  2021-05-11  6:49 ` [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
  2 siblings, 0 replies; 5+ messages in thread
From: Keqian Zhu @ 2021-04-29  3:41 UTC (permalink / raw)
  To: linux-kernel, kvm, Paolo Bonzini, Sean Christopherson, Ben Gardon
  Cc: wanghaibin.wang

Under some circumstances, we just need to write protect large page
gfn. This gets prepared for write protecting large page lazily during
dirty log tracking.

None function and performance change expected.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 arch/x86/kvm/mmu/mmu.c          |  9 +++++----
 arch/x86/kvm/mmu/mmu_internal.h |  3 ++-
 arch/x86/kvm/mmu/page_track.c   |  2 +-
 arch/x86/kvm/mmu/tdp_mmu.c      | 16 ++++++++++++----
 arch/x86/kvm/mmu/tdp_mmu.h      |  3 ++-
 5 files changed, 22 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 486aa94ecf1d..2ce5bc2ea46d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1265,20 +1265,21 @@ int kvm_cpu_dirty_log_size(void)
 }
 
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-				    struct kvm_memory_slot *slot, u64 gfn)
+				    struct kvm_memory_slot *slot, u64 gfn,
+				    int min_level)
 {
 	struct kvm_rmap_head *rmap_head;
 	int i;
 	bool write_protected = false;
 
-	for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+	for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
 		rmap_head = __gfn_to_rmap(gfn, i, slot);
 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
 	}
 
 	if (is_tdp_mmu_enabled(kvm))
 		write_protected |=
-			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
 
 	return write_protected;
 }
@@ -1288,7 +1289,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 	struct kvm_memory_slot *slot;
 
 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
 }
 
 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 1f6f98c76bdf..4c7c42bb8cf8 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -104,7 +104,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-				    struct kvm_memory_slot *slot, u64 gfn);
+				    struct kvm_memory_slot *slot, u64 gfn,
+				    int min_level);
 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 					u64 start_gfn, u64 pages);
 
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 34bb0ec69bd8..91a9f7e0fd91 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
 
 	if (mode == KVM_PAGE_TRACK_WRITE)
-		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
 			kvm_flush_remote_tlbs(kvm);
 }
 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 018d82e73e31..6cf0284e2e6a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1338,15 +1338,22 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
  * Returns true if an SPTE was set and a TLB flush is needed.
  */
 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
-			      gfn_t gfn)
+			      gfn_t gfn, int min_level)
 {
 	struct tdp_iter iter;
 	u64 new_spte;
 	bool spte_set = false;
 
+	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
 	rcu_read_lock();
 
-	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+				   min_level, gfn, gfn + 1) {
+		if (!is_shadow_present_pte(iter.old_spte) ||
+		    !is_last_spte(iter.old_spte, iter.level))
+			continue;
+
 		if (!is_writable_pte(iter.old_spte))
 			break;
 
@@ -1368,7 +1375,8 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
  * Returns true if an SPTE was set and a TLB flush is needed.
  */
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-				   struct kvm_memory_slot *slot, gfn_t gfn)
+				   struct kvm_memory_slot *slot, gfn_t gfn,
+				   int min_level)
 {
 	struct kvm_mmu_page *root;
 	int root_as_id;
@@ -1380,7 +1388,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
 		if (root_as_id != slot->as_id)
 			continue;
 
-		spte_set |= write_protect_gfn(kvm, root, gfn);
+		spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
 	}
 	return spte_set;
 }
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 31096ece9b14..cea787469016 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -59,7 +59,8 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 				       struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-				   struct kvm_memory_slot *slot, gfn_t gfn);
+				   struct kvm_memory_slot *slot, gfn_t gfn,
+				   int min_level);
 
 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
 			 int *root_level);
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log
  2021-04-29  3:41 [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
  2021-04-29  3:41 ` [PATCH v3 1/2] KVM: x86: Support write protect gfn with min_level Keqian Zhu
@ 2021-04-29  3:41 ` Keqian Zhu
  2021-05-24 12:12   ` Paolo Bonzini
  2021-05-11  6:49 ` [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
  2 siblings, 1 reply; 5+ messages in thread
From: Keqian Zhu @ 2021-04-29  3:41 UTC (permalink / raw)
  To: linux-kernel, kvm, Paolo Bonzini, Sean Christopherson, Ben Gardon
  Cc: wanghaibin.wang

Currently during start dirty logging, if we're with init-all-set,
we write protect huge pages and leave normal pages untouched, for
that we can enable dirty logging for these pages lazily.

Actually enable dirty logging lazily for huge pages is feasible
too, which not only reduces the time of start dirty logging, also
greatly reduces side-effect on guest when there is high dirty rate.

Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
---
 arch/x86/kvm/mmu/mmu.c | 29 +++++++++++++++++++++++++----
 arch/x86/kvm/x86.c     | 37 ++++++++++---------------------------
 2 files changed, 35 insertions(+), 31 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2ce5bc2ea46d..f52c7ceafb72 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1188,8 +1188,7 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
  * @gfn_offset: start of the BITS_PER_LONG pages we care about
  * @mask: indicates which pages we should protect
  *
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * Used when we do not need to care about huge page mappings.
  */
 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
 				     struct kvm_memory_slot *slot,
@@ -1246,13 +1245,35 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
  * enable dirty logging for them.
  *
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * We need to care about huge page mappings: e.g. during dirty logging we may
+ * have any such mappings.
  */
 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 				struct kvm_memory_slot *slot,
 				gfn_t gfn_offset, unsigned long mask)
 {
+	/*
+	 * Huge pages are NOT write protected when we start dirty log with
+	 * init-all-set, so we must write protect them at here.
+	 *
+	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
+	 * of memslot has no such restriction, so the range can cross two large
+	 * pages.
+	 */
+	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
+		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
+		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
+
+		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
+
+		/* Cross two large pages? */
+		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
+		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
+			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
+						       PG_LEVEL_2M);
+	}
+
+	/* Then we can handle the PT level pages */
 	if (kvm_x86_ops.cpu_dirty_log_size)
 		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
 	else
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eca63625aee4..dfd676ffa7da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10888,36 +10888,19 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
 		 */
 		kvm_mmu_zap_collapsible_sptes(kvm, new);
 	} else {
-		/* By default, write-protect everything to log writes. */
-		int level = PG_LEVEL_4K;
+		/*
+		 * If we're with initial-all-set, we don't need to write protect
+		 * any page because they're reported as dirty already.
+		 */
+		if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+			return;
 
 		if (kvm_x86_ops.cpu_dirty_log_size) {
-			/*
-			 * Clear all dirty bits, unless pages are treated as
-			 * dirty from the get-go.
-			 */
-			if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
-				kvm_mmu_slot_leaf_clear_dirty(kvm, new);
-
-			/*
-			 * Write-protect large pages on write so that dirty
-			 * logging happens at 4k granularity.  No need to
-			 * write-protect small SPTEs since write accesses are
-			 * logged by the CPU via dirty bits.
-			 */
-			level = PG_LEVEL_2M;
-		} else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
-			/*
-			 * If we're with initial-all-set, we don't need
-			 * to write protect any small page because
-			 * they're reported as dirty already.  However
-			 * we still need to write-protect huge pages
-			 * so that the page split can happen lazily on
-			 * the first write to the huge page.
-			 */
-			level = PG_LEVEL_2M;
+			kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+			kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
+		} else {
+			kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
 		}
-		kvm_mmu_slot_remove_write_access(kvm, new, level);
 	}
 }
 
-- 
2.23.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages
  2021-04-29  3:41 [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
  2021-04-29  3:41 ` [PATCH v3 1/2] KVM: x86: Support write protect gfn with min_level Keqian Zhu
  2021-04-29  3:41 ` [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log Keqian Zhu
@ 2021-05-11  6:49 ` Keqian Zhu
  2 siblings, 0 replies; 5+ messages in thread
From: Keqian Zhu @ 2021-05-11  6:49 UTC (permalink / raw)
  To: linux-kernel, kvm, Paolo Bonzini, Sean Christopherson, Ben Gardon
  Cc: wanghaibin.wang

ping. ^_^

On 2021/4/29 11:41, Keqian Zhu wrote:
> Hi,
> 
> Currently during start dirty logging, if we're with init-all-set,
> we write protect huge pages and leave normal pages untouched, for
> that we can enable dirty logging for these pages lazily.
> 
> Actually enable dirty logging lazily for huge pages is feasible
> too, which not only reduces the time of start dirty logging, also
> greatly reduces side-effect on guest when there is high dirty rate.
> 
> Thanks,
> Keqian
> 
> Changelog:
> 
> v3:
>  - Discussed with Ben and delete RFC comments. Thanks.
> 
> Keqian Zhu (2):
>   KVM: x86: Support write protect gfn with min_level
>   KVM: x86: Not wr-protect huge page with init_all_set dirty log
> 
>  arch/x86/kvm/mmu/mmu.c          | 38 ++++++++++++++++++++++++++-------
>  arch/x86/kvm/mmu/mmu_internal.h |  3 ++-
>  arch/x86/kvm/mmu/page_track.c   |  2 +-
>  arch/x86/kvm/mmu/tdp_mmu.c      | 16 ++++++++++----
>  arch/x86/kvm/mmu/tdp_mmu.h      |  3 ++-
>  arch/x86/kvm/x86.c              | 37 +++++++++-----------------------
>  6 files changed, 57 insertions(+), 42 deletions(-)
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log
  2021-04-29  3:41 ` [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log Keqian Zhu
@ 2021-05-24 12:12   ` Paolo Bonzini
  0 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2021-05-24 12:12 UTC (permalink / raw)
  To: Keqian Zhu, linux-kernel, kvm, Sean Christopherson, Ben Gardon
  Cc: wanghaibin.wang

On 29/04/21 05:41, Keqian Zhu wrote:
> Currently during start dirty logging, if we're with init-all-set,
> we write protect huge pages and leave normal pages untouched, for
> that we can enable dirty logging for these pages lazily.
> 
> Actually enable dirty logging lazily for huge pages is feasible
> too, which not only reduces the time of start dirty logging, also
> greatly reduces side-effect on guest when there is high dirty rate.
> 
> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
> ---
>   arch/x86/kvm/mmu/mmu.c | 29 +++++++++++++++++++++++++----
>   arch/x86/kvm/x86.c     | 37 ++++++++++---------------------------
>   2 files changed, 35 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 2ce5bc2ea46d..f52c7ceafb72 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -1188,8 +1188,7 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
>    * @gfn_offset: start of the BITS_PER_LONG pages we care about
>    * @mask: indicates which pages we should protect
>    *
> - * Used when we do not need to care about huge page mappings: e.g. during dirty
> - * logging we do not have any such mappings.
> + * Used when we do not need to care about huge page mappings.
>    */
>   static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
>   				     struct kvm_memory_slot *slot,
> @@ -1246,13 +1245,35 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
>    * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
>    * enable dirty logging for them.
>    *
> - * Used when we do not need to care about huge page mappings: e.g. during dirty
> - * logging we do not have any such mappings.
> + * We need to care about huge page mappings: e.g. during dirty logging we may
> + * have any such mappings.
>    */
>   void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
>   				struct kvm_memory_slot *slot,
>   				gfn_t gfn_offset, unsigned long mask)
>   {
> +	/*
> +	 * Huge pages are NOT write protected when we start dirty log with
> +	 * init-all-set, so we must write protect them at here.
> +	 *
> +	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
> +	 * of memslot has no such restriction, so the range can cross two large
> +	 * pages.
> +	 */
> +	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
> +		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
> +		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
> +
> +		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
> +
> +		/* Cross two large pages? */
> +		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
> +		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
> +			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
> +						       PG_LEVEL_2M);
> +	}
> +
> +	/* Then we can handle the PT level pages */
>   	if (kvm_x86_ops.cpu_dirty_log_size)
>   		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
>   	else
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index eca63625aee4..dfd676ffa7da 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10888,36 +10888,19 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
>   		 */
>   		kvm_mmu_zap_collapsible_sptes(kvm, new);
>   	} else {
> -		/* By default, write-protect everything to log writes. */
> -		int level = PG_LEVEL_4K;
> +		/*
> +		 * If we're with initial-all-set, we don't need to write protect
> +		 * any page because they're reported as dirty already.
> +		 */
> +		if (kvm_dirty_log_manual_protect_and_init_set(kvm))
> +			return;
>   
>   		if (kvm_x86_ops.cpu_dirty_log_size) {
> -			/*
> -			 * Clear all dirty bits, unless pages are treated as
> -			 * dirty from the get-go.
> -			 */
> -			if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
> -				kvm_mmu_slot_leaf_clear_dirty(kvm, new);
> -
> -			/*
> -			 * Write-protect large pages on write so that dirty
> -			 * logging happens at 4k granularity.  No need to
> -			 * write-protect small SPTEs since write accesses are
> -			 * logged by the CPU via dirty bits.
> -			 */
> -			level = PG_LEVEL_2M;
> -		} else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
> -			/*
> -			 * If we're with initial-all-set, we don't need
> -			 * to write protect any small page because
> -			 * they're reported as dirty already.  However
> -			 * we still need to write-protect huge pages
> -			 * so that the page split can happen lazily on
> -			 * the first write to the huge page.
> -			 */
> -			level = PG_LEVEL_2M;
> +			kvm_mmu_slot_leaf_clear_dirty(kvm, new);
> +			kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
> +		} else {
> +			kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
>   		}
> -		kvm_mmu_slot_remove_write_access(kvm, new, level);
>   	}
>   }
>   
> 

Queued (with a few adjustments to the comments and commit messages), thanks.

Paolo


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-05-24 12:13 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-29  3:41 [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu
2021-04-29  3:41 ` [PATCH v3 1/2] KVM: x86: Support write protect gfn with min_level Keqian Zhu
2021-04-29  3:41 ` [PATCH v3 2/2] KVM: x86: Not wr-protect huge page with init_all_set dirty log Keqian Zhu
2021-05-24 12:12   ` Paolo Bonzini
2021-05-11  6:49 ` [PATCH v3 0/2] KVM: x86: Enable dirty logging lazily for huge pages Keqian Zhu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).