All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write
@ 2011-08-16  6:40 Xiao Guangrong
  2011-08-16  6:41 ` [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
                   ` (9 more replies)
  0 siblings, 10 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:40 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

kvm_mmu_pte_write is unsafe since we need to alloc pte_list_desc in the
function when spte is prefetched, unfortunately, we can not know how many
spte need to be prefetched on this path, that means we can use out of the
free  pte_list_desc object in the cache, and BUG_ON() is triggered, also some
path does not fill the cache, such as INS instruction emulated that does not
trigger page fault

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |   25 ++++++++++++++++++++-----
 1 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d7fbf0..b01afee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -592,6 +592,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 	return 0;
 }
 
+static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
+{
+	return cache->nobjs;
+}
+
 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
 				  struct kmem_cache *cache)
 {
@@ -969,6 +974,14 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 	return &linfo->rmap_pde;
 }
 
+static bool rmap_can_add(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mmu_memory_cache *cache;
+
+	cache = &vcpu->arch.mmu_pte_list_desc_cache;
+	return mmu_memory_cache_free_objects(cache);
+}
+
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
 	struct kvm_mmu_page *sp;
@@ -3585,6 +3598,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		break;
 	}
 
+	/*
+	 * No need to care whether allocation memory is successful
+	 * or not since pte prefetch is skiped if it does not have
+	 * enough objects in the cache.
+	 */
+	mmu_topup_memory_caches(vcpu);
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
 		gentry = 0;
@@ -3655,7 +3674,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 			mmu_page_zap_pte(vcpu->kvm, sp, spte);
 			if (gentry &&
 			      !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
-			      & mask.word))
+			      & mask.word) && rmap_can_add(vcpu))
 				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
 			if (!remote_flush && need_remote_flush(entry, *spte))
 				remote_flush = true;
@@ -3716,10 +3735,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
 		goto out;
 	}
 
-	r = mmu_topup_memory_caches(vcpu);
-	if (r)
-		goto out;
-
 	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
 	switch (er) {
-- 
1.7.5.4

^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
@ 2011-08-16  6:41 ` Xiao Guangrong
  2011-08-22 14:32   ` Marcelo Tosatti
  2011-08-16  6:42 ` [PATCH 03/11] KVM: x86: retry non-page-table writing instruction Xiao Guangrong
                   ` (8 subsequent siblings)
  9 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:41 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

The idea is from Avi:
| tag instructions that are typically used to modify the page tables, and
| drop shadow if any other instruction is used.
| The list would include, I'd guess, and, or, bts, btc, mov, xchg, cmpxchg,
| and cmpxchg8b.

This patch is used to tag the instructions and in the later path, shadow page
is dropped if it is written by other instructions

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/emulate.c |   35 ++++++++++++++++++++---------------
 1 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0453c07..e24c269 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -82,6 +82,7 @@
 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
 #define Sse         (1<<18)     /* SSE Vector instruction */
 /* Misc flags */
+#define PageTable   (1 << 19)   /* instruction used to write page table */
 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
 #define VendorSpecific (1<<22) /* Vendor specific instruction */
 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
@@ -3018,10 +3019,10 @@ static struct opcode group7_rm7[] = {
 
 static struct opcode group1[] = {
 	I(Lock, em_add),
-	I(Lock, em_or),
+	I(Lock | PageTable, em_or),
 	I(Lock, em_adc),
 	I(Lock, em_sbb),
-	I(Lock, em_and),
+	I(Lock | PageTable, em_and),
 	I(Lock, em_sub),
 	I(Lock, em_xor),
 	I(0, em_cmp),
@@ -3076,18 +3077,21 @@ static struct group_dual group7 = { {
 
 static struct opcode group8[] = {
 	N, N, N, N,
-	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
-	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
+	D(DstMem | SrcImmByte | ModRM),
+	D(DstMem | SrcImmByte | ModRM | Lock | PageTable),
+	D(DstMem | SrcImmByte | ModRM | Lock),
+	D(DstMem | SrcImmByte | ModRM | Lock | PageTable),
 };
 
 static struct group_dual group9 = { {
-	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
+	N, D(DstMem64 | ModRM | Lock | PageTable), N, N, N, N, N, N,
 }, {
 	N, N, N, N, N, N, N, N,
 } };
 
 static struct opcode group11[] = {
-	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
+	I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+	X7(D(Undefined)),
 };
 
 static struct gprefix pfx_0f_6f_0f_7f = {
@@ -3099,7 +3103,7 @@ static struct opcode opcode_table[256] = {
 	I6ALU(Lock, em_add),
 	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 	/* 0x08 - 0x0F */
-	I6ALU(Lock, em_or),
+	I6ALU(Lock | PageTable, em_or),
 	D(ImplicitOps | Stack | No64), N,
 	/* 0x10 - 0x17 */
 	I6ALU(Lock, em_adc),
@@ -3108,7 +3112,7 @@ static struct opcode opcode_table[256] = {
 	I6ALU(Lock, em_sbb),
 	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
 	/* 0x20 - 0x27 */
-	I6ALU(Lock, em_and), N, N,
+	I6ALU(Lock | PageTable, em_and), N, N,
 	/* 0x28 - 0x2F */
 	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
 	/* 0x30 - 0x37 */
@@ -3141,11 +3145,11 @@ static struct opcode opcode_table[256] = {
 	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
 	G(DstMem | SrcImmByte | ModRM | Group, group1),
 	I2bv(DstMem | SrcReg | ModRM, em_test),
-	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
+	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
 	/* 0x88 - 0x8F */
-	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
+	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
-	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
+	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
 	D(ModRM | SrcMem | NoAccess | DstReg),
 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
 	G(0, group1A),
@@ -3158,7 +3162,7 @@ static struct opcode opcode_table[256] = {
 	II(ImplicitOps | Stack, em_popf, popf), N, N,
 	/* 0xA0 - 0xA7 */
 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
-	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
+	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
 	I2bv(SrcSI | DstDI | String, em_cmp),
 	/* 0xA8 - 0xAF */
@@ -3255,18 +3259,19 @@ static struct opcode twobyte_table[256] = {
 	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
 	/* 0xA8 - 0xAF */
 	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
-	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
+	DI(ImplicitOps, rsm),
+	D(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable),
 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
 	D(DstMem | SrcReg | Src2CL | ModRM),
 	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
 	/* 0xB0 - 0xB7 */
-	D2bv(DstMem | SrcReg | ModRM | Lock),
+	D2bv(DstMem | SrcReg | ModRM | Lock | PageTable),
 	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
 	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
 	/* 0xB8 - 0xBF */
 	N, N,
-	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
+	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable),
 	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
 	/* 0xC0 - 0xCF */
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 03/11] KVM: x86: retry non-page-table writing instruction
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
  2011-08-16  6:41 ` [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
@ 2011-08-16  6:42 ` Xiao Guangrong
  2011-08-22 19:59   ` Marcelo Tosatti
  2011-08-16  6:42 ` [PATCH 04/11] KVM: x86: cleanup port-in/port-out emulated Xiao Guangrong
                   ` (7 subsequent siblings)
  9 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

If the emulation is caused by #PF and it is non-page_table writing instruction,
it means the VM-EXIT is caused by shadow page protected, we can zap the shadow
page and retry this instruction directly

The idea is from Avi

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_emulate.h |    1 +
 arch/x86/include/asm/kvm_host.h    |    5 +++
 arch/x86/kvm/emulate.c             |    5 +++
 arch/x86/kvm/mmu.c                 |   22 +++++++++++---
 arch/x86/kvm/x86.c                 |   53 ++++++++++++++++++++++++++++++++++++
 5 files changed, 81 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 6040d11..fa87b63 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -362,6 +362,7 @@ enum x86_intercept {
 #endif
 
 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+bool page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ab4241..27a25df 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -443,6 +443,9 @@ struct kvm_vcpu_arch {
 
 	cpumask_var_t wbinvd_dirty_mask;
 
+	unsigned long last_retry_eip;
+	unsigned long last_retry_addr;
+
 	struct {
 		bool halted;
 		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -689,6 +692,7 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE	    (1 << 0)
 #define EMULTYPE_TRAP_UD	    (1 << 1)
 #define EMULTYPE_SKIP		    (1 << 2)
+#define EMULTYPE_RETRY		    (1 << 3)
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
 			    int emulation_type, void *insn, int insn_len);
 
@@ -753,6 +757,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes,
 		       bool guest_initiated);
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e24c269..c62424e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3691,6 +3691,11 @@ done:
 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
 }
 
+bool page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
+{
+	return ctxt->d & PageTable;
+}
+
 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 {
 	/* The second termination condition only applies for REPE
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b01afee..26aae11 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1997,7 +1997,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
 }
 
-static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_mmu_page *sp;
 	struct hlist_node *node;
@@ -2007,6 +2007,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
 	r = 0;
 
+	spin_lock(&kvm->mmu_lock);
 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
 		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
 			 sp->role.word);
@@ -2014,8 +2015,10 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
 	}
 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+	spin_unlock(&kvm->mmu_lock);
 	return r;
 }
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
 
 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
 {
@@ -3697,9 +3700,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 
 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
 
-	spin_lock(&vcpu->kvm->mmu_lock);
 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-	spin_unlock(&vcpu->kvm->mmu_lock);
 	return r;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
@@ -3720,10 +3721,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
+static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
+{
+	if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
+		return vcpu_match_mmio_gpa(vcpu, addr);
+
+	return vcpu_match_mmio_gva(vcpu, addr);
+}
+
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
 		       void *insn, int insn_len)
 {
-	int r;
+	int r, emulation_type = EMULTYPE_RETRY;
 	enum emulation_result er;
 
 	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
@@ -3735,7 +3744,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
 		goto out;
 	}
 
-	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
+	if (is_mmio_page_fault(vcpu, cr2))
+		emulation_type = 0;
+
+	er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
 
 	switch (er) {
 	case EMULATE_DONE:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6b37f18..db83fbe 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4814,6 +4814,56 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
 	return false;
 }
 
+static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
+			      unsigned long cr2,  int emulation_type)
+{
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
+
+	last_retry_eip = vcpu->arch.last_retry_eip;
+	last_retry_addr = vcpu->arch.last_retry_addr;
+
+	/*
+	 * If the emulation is caused by #PF and it is non-page_table
+	 * writing instruction, it means the VM-EXIT is caused by shadow
+	 * page protected, we can zap the shadow page and retry this
+	 * instruction directly.
+	 *
+	 * Note: if the guest uses a non-page-table modifying instruction
+	 * on the PDE that points to the instruction, then we will unmap
+	 * the instruction and go to an infinite loop. So, we cache the
+	 * last retried eip and the last fault address, if we meet the eip
+	 * and the address again, we can break out of the potential infinite
+	 * loop.
+	 */
+	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
+
+	if (!(emulation_type & EMULTYPE_RETRY))
+		return false;
+
+	if (page_table_writing_insn(ctxt))
+		return false;
+
+	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
+		return false;
+
+	vcpu->arch.last_retry_eip = ctxt->eip;
+	vcpu->arch.last_retry_addr = cr2;
+
+	if (!vcpu->arch.mmu.direct_map && !mmu_is_nested(vcpu))
+		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
+
+	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+	/*
+	 * The shadow pages have been zapped, then we call the page
+	 * fault path to change the mapping to writable.
+	 */
+	vcpu->arch.mmu.page_fault(vcpu, cr2, PFERR_WRITE_MASK, true);
+
+	return true;
+}
+
 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 			    unsigned long cr2,
 			    int emulation_type,
@@ -4855,6 +4905,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 		return EMULATE_DONE;
 	}
 
+	if (retry_instruction(ctxt, cr2, emulation_type))
+		return EMULATE_DONE;
+
 	/* this is needed for vmware backdoor interface to work since it
 	   changes registers values  during IO operation */
 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 04/11] KVM: x86: cleanup port-in/port-out emulated
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
  2011-08-16  6:41 ` [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
  2011-08-16  6:42 ` [PATCH 03/11] KVM: x86: retry non-page-table writing instruction Xiao Guangrong
@ 2011-08-16  6:42 ` Xiao Guangrong
  2011-08-16  6:43 ` [PATCH 05/11] KVM: MMU: do not mark access bit on pte write path Xiao Guangrong
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:42 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Remove the same code between emulator_pio_in_emulated and
emulator_pio_out_emulated

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/x86.c |   59 ++++++++++++++++++++++-----------------------------
 1 files changed, 26 insertions(+), 33 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index db83fbe..c66e021 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4327,32 +4327,24 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 	return r;
 }
 
-
-static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
-				    int size, unsigned short port, void *val,
-				    unsigned int count)
+static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
+			       unsigned short port, void *val,
+			       unsigned int count, bool in)
 {
-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
-
-	if (vcpu->arch.pio.count)
-		goto data_avail;
-
-	trace_kvm_pio(0, port, size, count);
+	trace_kvm_pio(!in, port, size, count);
 
 	vcpu->arch.pio.port = port;
-	vcpu->arch.pio.in = 1;
+	vcpu->arch.pio.in = in;
 	vcpu->arch.pio.count  = count;
 	vcpu->arch.pio.size = size;
 
 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
-	data_avail:
-		memcpy(val, vcpu->arch.pio_data, size * count);
 		vcpu->arch.pio.count = 0;
 		return 1;
 	}
 
 	vcpu->run->exit_reason = KVM_EXIT_IO;
-	vcpu->run->io.direction = KVM_EXIT_IO_IN;
+	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
 	vcpu->run->io.size = size;
 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
 	vcpu->run->io.count = count;
@@ -4361,36 +4353,37 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
 	return 0;
 }
 
-static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
-				     int size, unsigned short port,
-				     const void *val, unsigned int count)
+static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+				    int size, unsigned short port, void *val,
+				    unsigned int count)
 {
 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+	int ret;
 
-	trace_kvm_pio(1, port, size, count);
-
-	vcpu->arch.pio.port = port;
-	vcpu->arch.pio.in = 0;
-	vcpu->arch.pio.count = count;
-	vcpu->arch.pio.size = size;
-
-	memcpy(vcpu->arch.pio_data, val, size * count);
+	if (vcpu->arch.pio.count)
+		goto data_avail;
 
-	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
+	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
+	if (ret) {
+data_avail:
+		memcpy(val, vcpu->arch.pio_data, size * count);
 		vcpu->arch.pio.count = 0;
 		return 1;
 	}
 
-	vcpu->run->exit_reason = KVM_EXIT_IO;
-	vcpu->run->io.direction = KVM_EXIT_IO_OUT;
-	vcpu->run->io.size = size;
-	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
-	vcpu->run->io.count = count;
-	vcpu->run->io.port = port;
-
 	return 0;
 }
 
+static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
+				     int size, unsigned short port,
+				     const void *val, unsigned int count)
+{
+	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+	memcpy(vcpu->arch.pio_data, val, size * count);
+	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
+}
+
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
 	return kvm_x86_ops->get_segment_base(vcpu, seg);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 05/11] KVM: MMU: do not mark access bit on pte write path
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (2 preceding siblings ...)
  2011-08-16  6:42 ` [PATCH 04/11] KVM: x86: cleanup port-in/port-out emulated Xiao Guangrong
@ 2011-08-16  6:43 ` Xiao Guangrong
  2011-08-16  6:44 ` [PATCH 06/11] KVM: MMU: cleanup FNAME(invlpg) Xiao Guangrong
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:43 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

In current code, the accessed bit is always set when page fault occurred,
do not need to set it on pte write path

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    1 -
 arch/x86/kvm/mmu.c              |   22 +---------------------
 2 files changed, 1 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 27a25df..58ea3a7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -356,7 +356,6 @@ struct kvm_vcpu_arch {
 	gfn_t last_pt_write_gfn;
 	int   last_pt_write_count;
 	u64  *last_pte_updated;
-	gfn_t last_pte_gfn;
 
 	struct fpu guest_fpu;
 	u64 xcr0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 26aae11..7ec2a6a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2206,11 +2206,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	if (set_mmio_spte(sptep, gfn, pfn, pte_access))
 		return 0;
 
-	/*
-	 * We don't set the accessed bit, since we sometimes want to see
-	 * whether the guest actually used the pte (in order to detect
-	 * demand paging).
-	 */
 	spte = PT_PRESENT_MASK;
 	if (!speculative)
 		spte |= shadow_accessed_mask;
@@ -2361,10 +2356,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		}
 	}
 	kvm_release_pfn_clean(pfn);
-	if (speculative) {
+	if (speculative)
 		vcpu->arch.last_pte_updated = sptep;
-		vcpu->arch.last_pte_gfn = gfn;
-	}
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3532,18 +3525,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
 	return !!(spte && (*spte & shadow_accessed_mask));
 }
 
-static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
-	u64 *spte = vcpu->arch.last_pte_updated;
-
-	if (spte
-	    && vcpu->arch.last_pte_gfn == gfn
-	    && shadow_accessed_mask
-	    && !(*spte & shadow_accessed_mask)
-	    && is_shadow_present_pte(*spte))
-		set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
-}
-
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		       const u8 *new, int bytes,
 		       bool guest_initiated)
@@ -3614,7 +3595,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 	if (guest_initiated) {
-		kvm_mmu_access_page(vcpu, gfn);
 		if (gfn == vcpu->arch.last_pt_write_gfn
 		    && !last_updated_pte_accessed(vcpu)) {
 			++vcpu->arch.last_pt_write_count;
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 06/11] KVM: MMU: cleanup FNAME(invlpg)
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (3 preceding siblings ...)
  2011-08-16  6:43 ` [PATCH 05/11] KVM: MMU: do not mark access bit on pte write path Xiao Guangrong
@ 2011-08-16  6:44 ` Xiao Guangrong
  2011-08-16  6:44 ` [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path Xiao Guangrong
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:44 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Directly use mmu_page_zap_pte to zap spte in FNAME(invlpg), also remove the
same code between FNAME(invlpg) and FNAME(sync_page)

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c         |   16 ++++++++++------
 arch/x86/kvm/paging_tmpl.h |   42 +++++++++++++++---------------------------
 2 files changed, 25 insertions(+), 33 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7ec2a6a..ed3e778 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1808,7 +1808,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	}
 }
 
-static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
+static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
 			     u64 *spte)
 {
 	u64 pte;
@@ -1816,17 +1816,21 @@ static void mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
 
 	pte = *spte;
 	if (is_shadow_present_pte(pte)) {
-		if (is_last_spte(pte, sp->role.level))
+		if (is_last_spte(pte, sp->role.level)) {
 			drop_spte(kvm, spte);
-		else {
+			if (is_large_pte(pte))
+				--kvm->stat.lpages;
+		} else {
 			child = page_header(pte & PT64_BASE_ADDR_MASK);
 			drop_parent_pte(child, spte);
 		}
-	} else if (is_mmio_spte(pte))
+		return true;
+	}
+
+	if (is_mmio_spte(pte))
 		mmu_spte_clear_no_track(spte);
 
-	if (is_large_pte(pte))
-		--kvm->stat.lpages;
+	return false;
 }
 
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9299410..7862c05 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -656,6 +656,16 @@ out_unlock:
 	return 0;
 }
 
+static gpa_t FNAME(get_first_pte_gpa)(struct kvm_mmu_page *sp)
+{
+	int offset = 0;
+
+	if (PTTYPE == 32)
+		offset = sp->role.quadrant << PT64_LEVEL_BITS;
+
+	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+}
+
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	struct kvm_shadow_walk_iterator iterator;
@@ -663,7 +673,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 	gpa_t pte_gpa = -1;
 	int level;
 	u64 *sptep;
-	int need_flush = 0;
 
 	vcpu_clear_mmio_info(vcpu, gva);
 
@@ -675,36 +684,20 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 
 		sp = page_header(__pa(sptep));
 		if (is_last_spte(*sptep, level)) {
-			int offset, shift;
-
 			if (!sp->unsync)
 				break;
 
-			shift = PAGE_SHIFT -
-				  (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
-			offset = sp->role.quadrant << shift;
-
-			pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
+			pte_gpa = FNAME(get_first_pte_gpa)(sp);
 			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-			if (is_shadow_present_pte(*sptep)) {
-				if (is_large_pte(*sptep))
-					--vcpu->kvm->stat.lpages;
-				drop_spte(vcpu->kvm, sptep);
-				need_flush = 1;
-			} else if (is_mmio_spte(*sptep))
-				mmu_spte_clear_no_track(sptep);
-
-			break;
+			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
+				kvm_flush_remote_tlbs(vcpu->kvm);
 		}
 
 		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 			break;
 	}
 
-	if (need_flush)
-		kvm_flush_remote_tlbs(vcpu->kvm);
-
 	atomic_inc(&vcpu->kvm->arch.invlpg_counter);
 
 	spin_unlock(&vcpu->kvm->mmu_lock);
@@ -769,19 +762,14 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  */
 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
-	int i, offset, nr_present;
+	int i, nr_present = 0;
 	bool host_writable;
 	gpa_t first_pte_gpa;
 
-	offset = nr_present = 0;
-
 	/* direct kvm_mmu_page can not be unsync. */
 	BUG_ON(sp->role.direct);
 
-	if (PTTYPE == 32)
-		offset = sp->role.quadrant << PT64_LEVEL_BITS;
-
-	first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
+	first_pte_gpa = FNAME(get_first_pte_gpa)(sp);
 
 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 		unsigned pte_access;
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (4 preceding siblings ...)
  2011-08-16  6:44 ` [PATCH 06/11] KVM: MMU: cleanup FNAME(invlpg) Xiao Guangrong
@ 2011-08-16  6:44 ` Xiao Guangrong
  2011-08-22 22:28   ` Marcelo Tosatti
  2011-08-16  6:45 ` [PATCH 08/11] KVM: MMU: remove unnecessary kvm_mmu_free_some_pages Xiao Guangrong
                   ` (3 subsequent siblings)
  9 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:44 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Fast prefetch spte for the unsync shadow page on invlpg path

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    4 +---
 arch/x86/kvm/mmu.c              |   38 +++++++++++++++-----------------------
 arch/x86/kvm/paging_tmpl.h      |   30 ++++++++++++++++++------------
 arch/x86/kvm/x86.c              |    4 ++--
 4 files changed, 36 insertions(+), 40 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 58ea3a7..927ba73 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -460,7 +460,6 @@ struct kvm_arch {
 	unsigned int n_requested_mmu_pages;
 	unsigned int n_max_mmu_pages;
 	unsigned int indirect_shadow_pages;
-	atomic_t invlpg_counter;
 	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
 	/*
 	 * Hash table of struct kvm_mmu_page.
@@ -754,8 +753,7 @@ int fx_init(struct kvm_vcpu *vcpu);
 
 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-		       const u8 *new, int bytes,
-		       bool guest_initiated);
+		       const u8 *new, int bytes);
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ed3e778..f6de2fc 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3530,8 +3530,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-		       const u8 *new, int bytes,
-		       bool guest_initiated)
+		       const u8 *new, int bytes)
 {
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	union kvm_mmu_page_role mask = { .word = 0 };
@@ -3540,7 +3539,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	unsigned pte_size, page_offset, misaligned, quadrant, offset;
-	int level, npte, invlpg_counter, r, flooded = 0;
+	int level, npte, r, flooded = 0;
 	bool remote_flush, local_flush, zap_page;
 
 	/*
@@ -3555,19 +3554,16 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
-	invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
-
 	/*
 	 * Assume that the pte write on a page table of the same type
 	 * as the current vcpu paging mode since we update the sptes only
 	 * when they have the same mode.
 	 */
-	if ((is_pae(vcpu) && bytes == 4) || !new) {
+	if (is_pae(vcpu) && bytes == 4) {
 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
-		if (is_pae(vcpu)) {
-			gpa &= ~(gpa_t)7;
-			bytes = 8;
-		}
+		gpa &= ~(gpa_t)7;
+		bytes = 8;
+
 		r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
 		if (r)
 			gentry = 0;
@@ -3593,22 +3589,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 */
 	mmu_topup_memory_caches(vcpu);
 	spin_lock(&vcpu->kvm->mmu_lock);
-	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
-		gentry = 0;
 	kvm_mmu_free_some_pages(vcpu);
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
-	if (guest_initiated) {
-		if (gfn == vcpu->arch.last_pt_write_gfn
-		    && !last_updated_pte_accessed(vcpu)) {
-			++vcpu->arch.last_pt_write_count;
-			if (vcpu->arch.last_pt_write_count >= 3)
-				flooded = 1;
-		} else {
-			vcpu->arch.last_pt_write_gfn = gfn;
-			vcpu->arch.last_pt_write_count = 1;
-			vcpu->arch.last_pte_updated = NULL;
-		}
+	if (gfn == vcpu->arch.last_pt_write_gfn
+	    && !last_updated_pte_accessed(vcpu)) {
+		++vcpu->arch.last_pt_write_count;
+		if (vcpu->arch.last_pt_write_count >= 3)
+			flooded = 1;
+	} else {
+		vcpu->arch.last_pt_write_gfn = gfn;
+		vcpu->arch.last_pt_write_count = 1;
+		vcpu->arch.last_pte_updated = NULL;
 	}
 
 	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7862c05..bdc2241 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -670,20 +670,27 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	struct kvm_mmu_page *sp;
-	gpa_t pte_gpa = -1;
 	int level;
 	u64 *sptep;
 
 	vcpu_clear_mmio_info(vcpu, gva);
 
-	spin_lock(&vcpu->kvm->mmu_lock);
+	/*
+	 * No need to check return value here, rmap_can_add() can
+	 * help us to skip pte prefetch later.
+	 */
+	mmu_topup_memory_caches(vcpu);
 
+	spin_lock(&vcpu->kvm->mmu_lock);
 	for_each_shadow_entry(vcpu, gva, iterator) {
 		level = iterator.level;
 		sptep = iterator.sptep;
 
 		sp = page_header(__pa(sptep));
 		if (is_last_spte(*sptep, level)) {
+			pt_element_t gpte;
+			gpa_t pte_gpa;
+
 			if (!sp->unsync)
 				break;
 
@@ -692,22 +699,21 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 
 			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
 				kvm_flush_remote_tlbs(vcpu->kvm);
+
+			if (rmap_can_add(vcpu))
+				break;
+
+			if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+						  sizeof(pt_element_t)))
+				break;
+
+			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 		}
 
 		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 			break;
 	}
-
-	atomic_inc(&vcpu->kvm->arch.invlpg_counter);
-
 	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	if (pte_gpa == -1)
-		return;
-
-	if (mmu_topup_memory_caches(vcpu))
-		return;
-	kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c66e021..db8cbfd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4065,7 +4065,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 	ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
 	if (ret < 0)
 		return 0;
-	kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
+	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
 	return 1;
 }
 
@@ -4302,7 +4302,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 	if (!exchanged)
 		return X86EMUL_CMPXCHG_FAILED;
 
-	kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
+	kvm_mmu_pte_write(vcpu, gpa, new, bytes);
 
 	return X86EMUL_CONTINUE;
 
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 08/11] KVM: MMU: remove unnecessary kvm_mmu_free_some_pages
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (5 preceding siblings ...)
  2011-08-16  6:44 ` [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path Xiao Guangrong
@ 2011-08-16  6:45 ` Xiao Guangrong
  2011-08-16  6:45 ` [PATCH 09/11] KVM: MMU: split kvm_mmu_pte_write function Xiao Guangrong
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:45 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

In kvm_mmu_pte_write, we do not need to alloc shadow page, so calling
kvm_mmu_free_some_pages is really unnecessary

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |    1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f6de2fc..9ac0dc8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3589,7 +3589,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	 */
 	mmu_topup_memory_caches(vcpu);
 	spin_lock(&vcpu->kvm->mmu_lock);
-	kvm_mmu_free_some_pages(vcpu);
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 	if (gfn == vcpu->arch.last_pt_write_gfn
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 09/11] KVM: MMU: split kvm_mmu_pte_write function
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (6 preceding siblings ...)
  2011-08-16  6:45 ` [PATCH 08/11] KVM: MMU: remove unnecessary kvm_mmu_free_some_pages Xiao Guangrong
@ 2011-08-16  6:45 ` Xiao Guangrong
  2011-08-16  6:46 ` [PATCH 10/11] KVM: MMU: fix detecting misaligned accessed Xiao Guangrong
  2011-08-16  6:46 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:45 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

kvm_mmu_pte_write is too long, we split it for better readable

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |  187 +++++++++++++++++++++++++++++++---------------------
 1 files changed, 112 insertions(+), 75 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9ac0dc8..cfe24fe 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3529,48 +3529,28 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
 	return !!(spte && (*spte & shadow_accessed_mask));
 }
 
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
-		       const u8 *new, int bytes)
+static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
+				    const u8 *new, int *bytes)
 {
-	gfn_t gfn = gpa >> PAGE_SHIFT;
-	union kvm_mmu_page_role mask = { .word = 0 };
-	struct kvm_mmu_page *sp;
-	struct hlist_node *node;
-	LIST_HEAD(invalid_list);
-	u64 entry, gentry, *spte;
-	unsigned pte_size, page_offset, misaligned, quadrant, offset;
-	int level, npte, r, flooded = 0;
-	bool remote_flush, local_flush, zap_page;
-
-	/*
-	 * If we don't have indirect shadow pages, it means no page is
-	 * write-protected, so we can exit simply.
-	 */
-	if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
-		return;
-
-	zap_page = remote_flush = local_flush = false;
-	offset = offset_in_page(gpa);
-
-	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+	u64 gentry;
+	int r;
 
 	/*
 	 * Assume that the pte write on a page table of the same type
 	 * as the current vcpu paging mode since we update the sptes only
 	 * when they have the same mode.
 	 */
-	if (is_pae(vcpu) && bytes == 4) {
+	if (is_pae(vcpu) && *bytes == 4) {
 		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
-		gpa &= ~(gpa_t)7;
-		bytes = 8;
-
-		r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
+		*gpa &= ~(gpa_t)7;
+		*bytes = 8;
+		r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8));
 		if (r)
 			gentry = 0;
 		new = (const u8 *)&gentry;
 	}
 
-	switch (bytes) {
+	switch (*bytes) {
 	case 4:
 		gentry = *(const u32 *)new;
 		break;
@@ -3582,71 +3562,128 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 		break;
 	}
 
-	/*
-	 * No need to care whether allocation memory is successful
-	 * or not since pte prefetch is skiped if it does not have
-	 * enough objects in the cache.
-	 */
-	mmu_topup_memory_caches(vcpu);
-	spin_lock(&vcpu->kvm->mmu_lock);
-	++vcpu->kvm->stat.mmu_pte_write;
-	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+	return gentry;
+}
+
+/*
+ * If we're seeing too many writes to a page, it may no longer be a page table,
+ * or we may be forking, in which case it is better to unmap the page.
+ */
+static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	bool flooded = false;
+
 	if (gfn == vcpu->arch.last_pt_write_gfn
 	    && !last_updated_pte_accessed(vcpu)) {
 		++vcpu->arch.last_pt_write_count;
 		if (vcpu->arch.last_pt_write_count >= 3)
-			flooded = 1;
+			flooded = true;
 	} else {
 		vcpu->arch.last_pt_write_gfn = gfn;
 		vcpu->arch.last_pt_write_count = 1;
 		vcpu->arch.last_pte_updated = NULL;
 	}
 
+	return flooded;
+}
+
+/*
+ * Misaligned accesses are too much trouble to fix up; also, they usually
+ * indicate a page is not used as a page table.
+ */
+static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
+				    int bytes)
+{
+	unsigned offset, pte_size, misaligned;
+
+	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
+		 gpa, bytes, sp->role.word);
+
+	offset = offset_in_page(gpa);
+	pte_size = sp->role.cr4_pae ? 8 : 4;
+	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
+	misaligned |= bytes < 4;
+
+	return misaligned;
+}
+
+static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
+{
+	unsigned page_offset, quadrant;
+	u64 *spte;
+	int level;
+
+	page_offset = offset_in_page(gpa);
+	level = sp->role.level;
+	*nspte = 1;
+	if (!sp->role.cr4_pae) {
+		page_offset <<= 1;	/* 32->64 */
+		/*
+		 * A 32-bit pde maps 4MB while the shadow pdes map
+		 * only 2MB.  So we need to double the offset again
+		 * and zap two pdes instead of one.
+		 */
+		if (level == PT32_ROOT_LEVEL) {
+			page_offset &= ~7; /* kill rounding error */
+			page_offset <<= 1;
+			*nspte = 2;
+		}
+		quadrant = page_offset >> PAGE_SHIFT;
+		page_offset &= ~PAGE_MASK;
+		if (quadrant != sp->role.quadrant)
+			return NULL;
+	}
+
+	spte = &sp->spt[page_offset / sizeof(*spte)];
+	return spte;
+}
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+		       const u8 *new, int bytes)
+{
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+	union kvm_mmu_page_role mask = { .word = 0 };
+	struct kvm_mmu_page *sp;
+	struct hlist_node *node;
+	LIST_HEAD(invalid_list);
+	u64 entry, gentry, *spte;
+	int npte;
+	bool remote_flush, local_flush, zap_page, flooded, misaligned;
+
+	/*
+	 * If we don't have indirect shadow pages, it means no page is
+	 * write-protected, so we can exit simply.
+	 */
+	if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+		return;
+
+	zap_page = remote_flush = local_flush = false;
+
+	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
+
+	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
+	mmu_topup_memory_caches(vcpu);
+	spin_lock(&vcpu->kvm->mmu_lock);
+	++vcpu->kvm->stat.mmu_pte_write;
+	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
+
+	flooded = detect_write_flooding(vcpu, gfn);
 	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-		pte_size = sp->role.cr4_pae ? 8 : 4;
-		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
-		misaligned |= bytes < 4;
+		misaligned = detect_write_misaligned(sp, gpa, bytes);
+
 		if (misaligned || flooded) {
-			/*
-			 * Misaligned accesses are too much trouble to fix
-			 * up; also, they usually indicate a page is not used
-			 * as a page table.
-			 *
-			 * If we're seeing too many writes to a page,
-			 * it may no longer be a page table, or we may be
-			 * forking, in which case it is better to unmap the
-			 * page.
-			 */
-			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
-				 gpa, bytes, sp->role.word);
 			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
 						     &invalid_list);
 			++vcpu->kvm->stat.mmu_flooded;
 			continue;
 		}
-		page_offset = offset;
-		level = sp->role.level;
-		npte = 1;
-		if (!sp->role.cr4_pae) {
-			page_offset <<= 1;	/* 32->64 */
-			/*
-			 * A 32-bit pde maps 4MB while the shadow pdes map
-			 * only 2MB.  So we need to double the offset again
-			 * and zap two pdes instead of one.
-			 */
-			if (level == PT32_ROOT_LEVEL) {
-				page_offset &= ~7; /* kill rounding error */
-				page_offset <<= 1;
-				npte = 2;
-			}
-			quadrant = page_offset >> PAGE_SHIFT;
-			page_offset &= ~PAGE_MASK;
-			if (quadrant != sp->role.quadrant)
-				continue;
-		}
+
+		spte = get_written_sptes(sp, gpa, &npte);
+		if (!spte)
+			continue;
+
 		local_flush = true;
-		spte = &sp->spt[page_offset / sizeof(*spte)];
 		while (npte--) {
 			entry = *spte;
 			mmu_page_zap_pte(vcpu->kvm, sp, spte);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 10/11] KVM: MMU: fix detecting misaligned accessed
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (7 preceding siblings ...)
  2011-08-16  6:45 ` [PATCH 09/11] KVM: MMU: split kvm_mmu_pte_write function Xiao Guangrong
@ 2011-08-16  6:46 ` Xiao Guangrong
  2011-08-16  6:46 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
  9 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:46 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Sometimes, we only modify the last one byte of a pte to update status bit,
for example, clear_bit is used to clear r/w bit in linux kernel and 'andb'
instruction is used in this function, in this case, kvm_mmu_pte_write will
treat it as misaligned access, and the shadow page table is zapped

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/kvm/mmu.c |    8 ++++++++
 1 files changed, 8 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cfe24fe..adaa160 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3601,6 +3601,14 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
 
 	offset = offset_in_page(gpa);
 	pte_size = sp->role.cr4_pae ? 8 : 4;
+
+	/*
+	 * Sometimes, the OS only writes the last one bytes to update status
+	 * bits, for example, in linux, andb instruction is used in clear_bit().
+	 */
+	if (!(offset & (pte_size - 1)) && bytes == 1)
+		return false;
+
 	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
 	misaligned |= bytes < 4;
 
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
                   ` (8 preceding siblings ...)
  2011-08-16  6:46 ` [PATCH 10/11] KVM: MMU: fix detecting misaligned accessed Xiao Guangrong
@ 2011-08-16  6:46 ` Xiao Guangrong
  2011-08-23  8:00   ` Marcelo Tosatti
  9 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-16  6:46 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Detecting write-flooding does not work well, when we handle page written, if
the last speculative spte is not accessed, we treat the page is
write-flooding, however, we can speculative spte on many path, such as pte
prefetch, page synced, that means the last speculative spte may be not point
to the written page and the written page can be accessed via other sptes, so
depends on the Accessed bit of the last speculative spte is not enough

Instead of detected page accessed, we can detect whether the spte is accessed
or not, if the spte is not accessed but it is written frequently, we treat is
not a page table or it not used for a long time

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    6 +---
 arch/x86/kvm/mmu.c              |   48 +++++++++------------------------------
 arch/x86/kvm/paging_tmpl.h      |    9 +-----
 3 files changed, 15 insertions(+), 48 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 927ba73..9d17238 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -239,6 +239,8 @@ struct kvm_mmu_page {
 	int clear_spte_count;
 #endif
 
+	int write_flooding_count;
+
 	struct rcu_head rcu;
 };
 
@@ -353,10 +355,6 @@ struct kvm_vcpu_arch {
 	struct kvm_mmu_memory_cache mmu_page_cache;
 	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
-	gfn_t last_pt_write_gfn;
-	int   last_pt_write_count;
-	u64  *last_pte_updated;
-
 	struct fpu guest_fpu;
 	u64 xcr0;
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index adaa160..3230f84 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1695,6 +1695,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		} else if (sp->unsync)
 			kvm_mmu_mark_parents_unsync(sp);
 
+		sp->write_flooding_count = 0;
 		trace_kvm_mmu_get_page(sp, false);
 		return sp;
 	}
@@ -1847,15 +1848,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 	mmu_page_remove_parent_pte(sp, parent_pte);
 }
 
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
-	int i;
-	struct kvm_vcpu *vcpu;
-
-	kvm_for_each_vcpu(i, vcpu, kvm)
-		vcpu->arch.last_pte_updated = NULL;
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	u64 *parent_pte;
@@ -1915,7 +1907,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 	}
 
 	sp->role.invalid = 1;
-	kvm_mmu_reset_last_pte_updated(kvm);
 	return ret;
 }
 
@@ -2360,8 +2351,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		}
 	}
 	kvm_release_pfn_clean(pfn);
-	if (speculative)
-		vcpu->arch.last_pte_updated = sptep;
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3522,13 +3511,6 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
 		kvm_mmu_flush_tlb(vcpu);
 }
 
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
-{
-	u64 *spte = vcpu->arch.last_pte_updated;
-
-	return !!(spte && (*spte & shadow_accessed_mask));
-}
-
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
 				    const u8 *new, int *bytes)
 {
@@ -3569,22 +3551,14 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
  * If we're seeing too many writes to a page, it may no longer be a page table,
  * or we may be forking, in which case it is better to unmap the page.
  */
-static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
 {
-	bool flooded = false;
-
-	if (gfn == vcpu->arch.last_pt_write_gfn
-	    && !last_updated_pte_accessed(vcpu)) {
-		++vcpu->arch.last_pt_write_count;
-		if (vcpu->arch.last_pt_write_count >= 3)
-			flooded = true;
-	} else {
-		vcpu->arch.last_pt_write_gfn = gfn;
-		vcpu->arch.last_pt_write_count = 1;
-		vcpu->arch.last_pte_updated = NULL;
-	}
+	if (spte && !(*spte & shadow_accessed_mask))
+		sp->write_flooding_count++;
+	else
+		sp->write_flooding_count = 0;
 
-	return flooded;
+	return sp->write_flooding_count >= 3;
 }
 
 /*
@@ -3656,7 +3630,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	int npte;
-	bool remote_flush, local_flush, zap_page, flooded, misaligned;
+	bool remote_flush, local_flush, zap_page;
 
 	/*
 	 * If we don't have indirect shadow pages, it means no page is
@@ -3675,12 +3649,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-	flooded = detect_write_flooding(vcpu, gfn);
 	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-		misaligned = detect_write_misaligned(sp, gpa, bytes);
+		spte = get_written_sptes(sp, gpa, &npte);
 
-		if (misaligned || flooded) {
+		if (detect_write_misaligned(sp, gpa, bytes) ||
+		      detect_write_flooding(sp, spte)) {
 			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
 						     &invalid_list);
 			++vcpu->kvm->stat.mmu_flooded;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index bdc2241..ec5c1b4 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -599,11 +599,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		if (!prefault) {
+		if (!prefault)
 			inject_page_fault(vcpu, &walker.fault);
-			/* reset fork detector */
-			vcpu->arch.last_pt_write_count = 0;
-		}
+
 		return 0;
 	}
 
@@ -641,9 +639,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
 		 sptep, *sptep, emulate);
 
-	if (!emulate)
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
 	++vcpu->stat.pf_fixed;
 	trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 	spin_unlock(&vcpu->kvm->mmu_lock);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

* Re: [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table
  2011-08-16  6:41 ` [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
@ 2011-08-22 14:32   ` Marcelo Tosatti
  2011-08-22 14:36     ` Avi Kivity
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-22 14:32 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 16, 2011 at 02:41:27PM +0800, Xiao Guangrong wrote:
> The idea is from Avi:
> | tag instructions that are typically used to modify the page tables, and
> | drop shadow if any other instruction is used.
> | The list would include, I'd guess, and, or, bts, btc, mov, xchg, cmpxchg,
> | and cmpxchg8b.
> 
> This patch is used to tag the instructions and in the later path, shadow page
> is dropped if it is written by other instructions

What is the advantage of doing this again? What is the point of
dropping shadow if the instruction is emulated?


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table
  2011-08-22 14:32   ` Marcelo Tosatti
@ 2011-08-22 14:36     ` Avi Kivity
  0 siblings, 0 replies; 41+ messages in thread
From: Avi Kivity @ 2011-08-22 14:36 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Xiao Guangrong, LKML, KVM

On 08/22/2011 05:32 PM, Marcelo Tosatti wrote:
> On Tue, Aug 16, 2011 at 02:41:27PM +0800, Xiao Guangrong wrote:
> >  The idea is from Avi:
> >  | tag instructions that are typically used to modify the page tables, and
> >  | drop shadow if any other instruction is used.
> >  | The list would include, I'd guess, and, or, bts, btc, mov, xchg, cmpxchg,
> >  | and cmpxchg8b.
> >
> >  This patch is used to tag the instructions and in the later path, shadow page
> >  is dropped if it is written by other instructions
>
> What is the advantage of doing this again? What is the point of
> dropping shadow if the instruction is emulated?
>

So it won't be emulated again; the assumption is that if you addl into a 
page, it isn't a pagetable.

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 03/11] KVM: x86: retry non-page-table writing instruction
  2011-08-16  6:42 ` [PATCH 03/11] KVM: x86: retry non-page-table writing instruction Xiao Guangrong
@ 2011-08-22 19:59   ` Marcelo Tosatti
  2011-08-22 20:21     ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-22 19:59 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 16, 2011 at 02:42:07PM +0800, Xiao Guangrong wrote:
> If the emulation is caused by #PF and it is non-page_table writing instruction,
> it means the VM-EXIT is caused by shadow page protected, we can zap the shadow
> page and retry this instruction directly
> 
> The idea is from Avi
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
> ---
>  arch/x86/include/asm/kvm_emulate.h |    1 +
>  arch/x86/include/asm/kvm_host.h    |    5 +++
>  arch/x86/kvm/emulate.c             |    5 +++
>  arch/x86/kvm/mmu.c                 |   22 +++++++++++---
>  arch/x86/kvm/x86.c                 |   53 ++++++++++++++++++++++++++++++++++++
>  5 files changed, 81 insertions(+), 5 deletions(-)
> 
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -4814,6 +4814,56 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
>  	return false;
>  }
>  
> +static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
> +			      unsigned long cr2,  int emulation_type)
> +{
> +	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
> +	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
> +
> +	last_retry_eip = vcpu->arch.last_retry_eip;
> +	last_retry_addr = vcpu->arch.last_retry_addr;
> +
> +	/*
> +	 * If the emulation is caused by #PF and it is non-page_table
> +	 * writing instruction, it means the VM-EXIT is caused by shadow
> +	 * page protected, we can zap the shadow page and retry this
> +	 * instruction directly.
> +	 *
> +	 * Note: if the guest uses a non-page-table modifying instruction
> +	 * on the PDE that points to the instruction, then we will unmap
> +	 * the instruction and go to an infinite loop. So, we cache the
> +	 * last retried eip and the last fault address, if we meet the eip
> +	 * and the address again, we can break out of the potential infinite
> +	 * loop.
> +	 */
> +	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
> +
> +	if (!(emulation_type & EMULTYPE_RETRY))
> +		return false;
> +
> +	if (page_table_writing_insn(ctxt))
> +		return false;
> +
> +	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
> +		return false;
> +
> +	vcpu->arch.last_retry_eip = ctxt->eip;
> +	vcpu->arch.last_retry_addr = cr2;
> +
> +	if (!vcpu->arch.mmu.direct_map && !mmu_is_nested(vcpu))
> +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);

Why write? 

> +	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
> +
> +	/*
> +	 * The shadow pages have been zapped, then we call the page
> +	 * fault path to change the mapping to writable.
> +	 */
> +	vcpu->arch.mmu.page_fault(vcpu, cr2, PFERR_WRITE_MASK, true);

I don't see why is this necessary. Just allowing the instruction to
proceed should be enough?

Looks good otherwise.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 03/11] KVM: x86: retry non-page-table writing instruction
  2011-08-22 19:59   ` Marcelo Tosatti
@ 2011-08-22 20:21     ` Xiao Guangrong
  2011-08-22 20:42       ` Marcelo Tosatti
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-22 20:21 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/23/2011 03:59 AM, Marcelo Tosatti wrote:

>> +	if (!vcpu->arch.mmu.direct_map && !mmu_is_nested(vcpu))
>> +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
> 
> Why write? 
> 

Since the fault is caused by page table written, and the 'gpa' can
be written after instruction is retied.

>> +	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
>> +
>> +	/*
>> +	 * The shadow pages have been zapped, then we call the page
>> +	 * fault path to change the mapping to writable.
>> +	 */
>> +	vcpu->arch.mmu.page_fault(vcpu, cr2, PFERR_WRITE_MASK, true);
> 
> I don't see why is this necessary. Just allowing the instruction to
> proceed should be enough?
> 

It used to avoid later VM-exit, since we will retry the instruction
but the mapped is still read-only. So we can it to let the mapping become
writable to avoid page fault again.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 03/11] KVM: x86: retry non-page-table writing instruction
  2011-08-22 20:21     ` Xiao Guangrong
@ 2011-08-22 20:42       ` Marcelo Tosatti
  0 siblings, 0 replies; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-22 20:42 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 23, 2011 at 04:21:05AM +0800, Xiao Guangrong wrote:
> On 08/23/2011 03:59 AM, Marcelo Tosatti wrote:
> 
> >> +	if (!vcpu->arch.mmu.direct_map && !mmu_is_nested(vcpu))
> >> +		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
> > 
> > Why write? 
> > 
> 
> Since the fault is caused by page table written, and the 'gpa' can
> be written after instruction is retied.
> 
> >> +	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
> >> +
> >> +	/*
> >> +	 * The shadow pages have been zapped, then we call the page
> >> +	 * fault path to change the mapping to writable.
> >> +	 */
> >> +	vcpu->arch.mmu.page_fault(vcpu, cr2, PFERR_WRITE_MASK, true);
> > 
> > I don't see why is this necessary. Just allowing the instruction to
> > proceed should be enough?
> > 
> 
> It used to avoid later VM-exit, since we will retry the instruction
> but the mapped is still read-only. So we can it to let the mapping become
> writable to avoid page fault again.

Its not like this case is performance sensitive. Usually optimizing
things without the need for it leads to bad results. So please drop 
this.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path
  2011-08-16  6:44 ` [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path Xiao Guangrong
@ 2011-08-22 22:28   ` Marcelo Tosatti
  2011-08-23  1:50     ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-22 22:28 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 16, 2011 at 02:44:42PM +0800, Xiao Guangrong wrote:
> Fast prefetch spte for the unsync shadow page on invlpg path
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    4 +---
>  arch/x86/kvm/mmu.c              |   38 +++++++++++++++-----------------------
>  arch/x86/kvm/paging_tmpl.h      |   30 ++++++++++++++++++------------
>  arch/x86/kvm/x86.c              |    4 ++--
>  4 files changed, 36 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 58ea3a7..927ba73 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -460,7 +460,6 @@ struct kvm_arch {
>  	unsigned int n_requested_mmu_pages;
>  	unsigned int n_max_mmu_pages;
>  	unsigned int indirect_shadow_pages;
> -	atomic_t invlpg_counter;
>  	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
>  	/*
>  	 * Hash table of struct kvm_mmu_page.
> @@ -754,8 +753,7 @@ int fx_init(struct kvm_vcpu *vcpu);
>  
>  void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> -		       const u8 *new, int bytes,
> -		       bool guest_initiated);
> +		       const u8 *new, int bytes);
>  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
>  int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
>  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ed3e778..f6de2fc 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -3530,8 +3530,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
>  }
>  
>  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
> -		       const u8 *new, int bytes,
> -		       bool guest_initiated)
> +		       const u8 *new, int bytes)
>  {
>  	gfn_t gfn = gpa >> PAGE_SHIFT;
>  	union kvm_mmu_page_role mask = { .word = 0 };
> @@ -3540,7 +3539,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  	LIST_HEAD(invalid_list);
>  	u64 entry, gentry, *spte;
>  	unsigned pte_size, page_offset, misaligned, quadrant, offset;
> -	int level, npte, invlpg_counter, r, flooded = 0;
> +	int level, npte, r, flooded = 0;
>  	bool remote_flush, local_flush, zap_page;
>  
>  	/*
> @@ -3555,19 +3554,16 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  
>  	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
>  
> -	invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
> -
>  	/*
>  	 * Assume that the pte write on a page table of the same type
>  	 * as the current vcpu paging mode since we update the sptes only
>  	 * when they have the same mode.
>  	 */
> -	if ((is_pae(vcpu) && bytes == 4) || !new) {
> +	if (is_pae(vcpu) && bytes == 4) {
>  		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
> -		if (is_pae(vcpu)) {
> -			gpa &= ~(gpa_t)7;
> -			bytes = 8;
> -		}
> +		gpa &= ~(gpa_t)7;
> +		bytes = 8;
> +
>  		r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
>  		if (r)
>  			gentry = 0;
> @@ -3593,22 +3589,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
>  	 */
>  	mmu_topup_memory_caches(vcpu);
>  	spin_lock(&vcpu->kvm->mmu_lock);
> -	if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
> -		gentry = 0;
>  	kvm_mmu_free_some_pages(vcpu);
>  	++vcpu->kvm->stat.mmu_pte_write;
>  	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
> -	if (guest_initiated) {
> -		if (gfn == vcpu->arch.last_pt_write_gfn
> -		    && !last_updated_pte_accessed(vcpu)) {
> -			++vcpu->arch.last_pt_write_count;
> -			if (vcpu->arch.last_pt_write_count >= 3)
> -				flooded = 1;
> -		} else {
> -			vcpu->arch.last_pt_write_gfn = gfn;
> -			vcpu->arch.last_pt_write_count = 1;
> -			vcpu->arch.last_pte_updated = NULL;
> -		}
> +	if (gfn == vcpu->arch.last_pt_write_gfn
> +	    && !last_updated_pte_accessed(vcpu)) {
> +		++vcpu->arch.last_pt_write_count;
> +		if (vcpu->arch.last_pt_write_count >= 3)
> +			flooded = 1;
> +	} else {
> +		vcpu->arch.last_pt_write_gfn = gfn;
> +		vcpu->arch.last_pt_write_count = 1;
> +		vcpu->arch.last_pte_updated = NULL;
>  	}
>  
>  	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
> diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> index 7862c05..bdc2241 100644
> --- a/arch/x86/kvm/paging_tmpl.h
> +++ b/arch/x86/kvm/paging_tmpl.h
> @@ -670,20 +670,27 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  {
>  	struct kvm_shadow_walk_iterator iterator;
>  	struct kvm_mmu_page *sp;
> -	gpa_t pte_gpa = -1;
>  	int level;
>  	u64 *sptep;
>  
>  	vcpu_clear_mmio_info(vcpu, gva);
>  
> -	spin_lock(&vcpu->kvm->mmu_lock);
> +	/*
> +	 * No need to check return value here, rmap_can_add() can
> +	 * help us to skip pte prefetch later.
> +	 */
> +	mmu_topup_memory_caches(vcpu);
>  
> +	spin_lock(&vcpu->kvm->mmu_lock);
>  	for_each_shadow_entry(vcpu, gva, iterator) {
>  		level = iterator.level;
>  		sptep = iterator.sptep;
>  
>  		sp = page_header(__pa(sptep));
>  		if (is_last_spte(*sptep, level)) {
> +			pt_element_t gpte;
> +			gpa_t pte_gpa;
> +
>  			if (!sp->unsync)
>  				break;
>  
> @@ -692,22 +699,21 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
>  
>  			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
>  				kvm_flush_remote_tlbs(vcpu->kvm);
> +
> +			if (rmap_can_add(vcpu))
> +				break;

if (!rmap_can_add(vcpu))
        break;

?

> +
> +			if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
> +						  sizeof(pt_element_t)))
> +				break;
> +
> +			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
>  		}
>  
>  		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
>  			break;


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path
  2011-08-22 22:28   ` Marcelo Tosatti
@ 2011-08-23  1:50     ` Xiao Guangrong
  0 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-23  1:50 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/23/2011 06:28 AM, Marcelo Tosatti wrote:

>> +
>> +			if (rmap_can_add(vcpu))
>> +				break;
> 
> if (!rmap_can_add(vcpu))
>         break;
> 
> ?
> 

Oh, oops, thanks for you point it out. Will fix it.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-16  6:46 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
@ 2011-08-23  8:00   ` Marcelo Tosatti
  2011-08-23 10:55     ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-23  8:00 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 16, 2011 at 02:46:47PM +0800, Xiao Guangrong wrote:
> Detecting write-flooding does not work well, when we handle page written, if
> the last speculative spte is not accessed, we treat the page is
> write-flooding, however, we can speculative spte on many path, such as pte
> prefetch, page synced, that means the last speculative spte may be not point
> to the written page and the written page can be accessed via other sptes, so
> depends on the Accessed bit of the last speculative spte is not enough

Yes, a stale last_speculative_spte is possible, but is this fact a
noticeable problem in practice?

Was this detected by code inspection?

> Instead of detected page accessed, we can detect whether the spte is accessed
> or not, if the spte is not accessed but it is written frequently, we treat is
> not a page table or it not used for a long time
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
> ---
>  arch/x86/include/asm/kvm_host.h |    6 +---
>  arch/x86/kvm/mmu.c              |   48 +++++++++------------------------------
>  arch/x86/kvm/paging_tmpl.h      |    9 +-----
>  3 files changed, 15 insertions(+), 48 deletions(-)
>
 
> -static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
> +static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
>  {
> -	bool flooded = false;
> -
> -	if (gfn == vcpu->arch.last_pt_write_gfn
> -	    && !last_updated_pte_accessed(vcpu)) {
> -		++vcpu->arch.last_pt_write_count;
> -		if (vcpu->arch.last_pt_write_count >= 3)
> -			flooded = true;
> -	} else {
> -		vcpu->arch.last_pt_write_gfn = gfn;
> -		vcpu->arch.last_pt_write_count = 1;
> -		vcpu->arch.last_pte_updated = NULL;
> -	}
> +	if (spte && !(*spte & shadow_accessed_mask))
> +		sp->write_flooding_count++;
> +	else
> +		sp->write_flooding_count = 0;

This relies on the sptes being created by speculative means
or by pressure on the host clearing the accessed bit for the
shadow page to be zapped. 

There is no guarantee that either of these is true for a given
spte.

And if the sptes do not have accessed bit set, any nonconsecutive 3 pte
updates will zap the page.

Back to the first question, what is the motivation for this heuristic
change? Do you have any numbers?

If its a significant problem, perhaps getting rid of the
'last_spte_accessed' part is enough.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23  8:00   ` Marcelo Tosatti
@ 2011-08-23 10:55     ` Xiao Guangrong
  2011-08-23 12:38       ` Marcelo Tosatti
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-23 10:55 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

Hi Marcelo,

On 08/23/2011 04:00 PM, Marcelo Tosatti wrote:
> On Tue, Aug 16, 2011 at 02:46:47PM +0800, Xiao Guangrong wrote:
>> Detecting write-flooding does not work well, when we handle page written, if
>> the last speculative spte is not accessed, we treat the page is
>> write-flooding, however, we can speculative spte on many path, such as pte
>> prefetch, page synced, that means the last speculative spte may be not point
>> to the written page and the written page can be accessed via other sptes, so
>> depends on the Accessed bit of the last speculative spte is not enough
> 
> Yes, a stale last_speculative_spte is possible, but is this fact a
> noticeable problem in practice?
> 
> Was this detected by code inspection?
> 

I detected this because: i noticed some shadow page is zapped by
write-flooding but it is accessed soon, it causes the shadow page zapped
and alloced again and again(very frequently).

Another reason is that: in current code, write-flooding is little complex
and it stuffs code in many places, actually, write-flooding is only needed for
shadow page/nested guest, so i want to simplify it and wrap its code up.

>> -	}
>> +	if (spte && !(*spte & shadow_accessed_mask))
>> +		sp->write_flooding_count++;
>> +	else
>> +		sp->write_flooding_count = 0;
> 
> This relies on the sptes being created by speculative means
> or by pressure on the host clearing the accessed bit for the
> shadow page to be zapped. 
> 
> There is no guarantee that either of these is true for a given
> spte.
> 
> And if the sptes do not have accessed bit set, any nonconsecutive 3 pte
> updates will zap the page.
> 

Please note we clear 'sp->write_flooding_count' when it is accessed from
shadow page cache (in kvm_mmu_get_page), it means if any spte of sp generates
#PF, the fooding count can be reset.

And, i think there are not problems since: if the spte without accssed bit is
written frequently, it means the guest page table is accessed infrequently or
during the writing, the guest page table is not accessed, in this time, zapping
this shadow page is not bad.

Comparing the old way, the advantage of it is good for zapping upper shadow page,
for example, in the old way:
if a gfn is used as PDE for a task, later, the gfn is freed and used as PTE for
the new task, so we have two shadow pages in the host, one sp1.level = 2 and the
other sp2.level = 1. So, when we detect write-flooding, the vcpu->last_pte_updated
always point to sp2.pte. As sp2 is used for the new task, we always detected both
shadow pages are bing used, but actually, sp1 is not used by guest anymore.

> Back to the first question, what is the motivation for this heuristic
> change? Do you have any numbers?
> 

Yes, i have done the quick test:

before this patch:
2m56.561
2m50.651
2m51.220
2m52.199
2m48.066

After this patch:
2m51.194
2m55.980
2m50.755
2m47.396
2m46.807

It shows the new way is little better than the old way.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 10:55     ` Xiao Guangrong
@ 2011-08-23 12:38       ` Marcelo Tosatti
  2011-08-23 16:32         ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-23 12:38 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Tue, Aug 23, 2011 at 06:55:39PM +0800, Xiao Guangrong wrote:
> Hi Marcelo,
> 
> On 08/23/2011 04:00 PM, Marcelo Tosatti wrote:
> > On Tue, Aug 16, 2011 at 02:46:47PM +0800, Xiao Guangrong wrote:
> >> Detecting write-flooding does not work well, when we handle page written, if
> >> the last speculative spte is not accessed, we treat the page is
> >> write-flooding, however, we can speculative spte on many path, such as pte
> >> prefetch, page synced, that means the last speculative spte may be not point
> >> to the written page and the written page can be accessed via other sptes, so
> >> depends on the Accessed bit of the last speculative spte is not enough
> > 
> > Yes, a stale last_speculative_spte is possible, but is this fact a
> > noticeable problem in practice?
> > 
> > Was this detected by code inspection?
> > 
> 
> I detected this because: i noticed some shadow page is zapped by
> write-flooding but it is accessed soon, it causes the shadow page zapped
> and alloced again and again(very frequently).
> 
> Another reason is that: in current code, write-flooding is little complex
> and it stuffs code in many places, actually, write-flooding is only needed for
> shadow page/nested guest, so i want to simplify it and wrap its code up.
> 
> >> -	}
> >> +	if (spte && !(*spte & shadow_accessed_mask))
> >> +		sp->write_flooding_count++;
> >> +	else
> >> +		sp->write_flooding_count = 0;
> > 
> > This relies on the sptes being created by speculative means
> > or by pressure on the host clearing the accessed bit for the
> > shadow page to be zapped. 
> > 
> > There is no guarantee that either of these is true for a given
> > spte.
> > 
> > And if the sptes do not have accessed bit set, any nonconsecutive 3 pte
> > updates will zap the page.
> > 
> 
> Please note we clear 'sp->write_flooding_count' when it is accessed from
> shadow page cache (in kvm_mmu_get_page), it means if any spte of sp generates
> #PF, the fooding count can be reset.

OK.

> And, i think there are not problems since: if the spte without accssed bit is
> written frequently, it means the guest page table is accessed infrequently or
> during the writing, the guest page table is not accessed, in this time, zapping
> this shadow page is not bad.

Think of the following scenario:

1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
is not increased.
3) repeat

So you cannot rely on the accessed bit being cleared to zap the shadow
page, because it might not be cleared in certain scenarios.

> Comparing the old way, the advantage of it is good for zapping upper shadow page,
> for example, in the old way:
> if a gfn is used as PDE for a task, later, the gfn is freed and used as PTE for
> the new task, so we have two shadow pages in the host, one sp1.level = 2 and the
> other sp2.level = 1. So, when we detect write-flooding, the vcpu->last_pte_updated
> always point to sp2.pte. As sp2 is used for the new task, we always detected both
> shadow pages are bing used, but actually, sp1 is not used by guest anymore.

Makes sense.

> > Back to the first question, what is the motivation for this heuristic
> > change? Do you have any numbers?
> > 
> 
> Yes, i have done the quick test:
> 
> before this patch:
> 2m56.561
> 2m50.651
> 2m51.220
> 2m52.199
> 2m48.066
> 
> After this patch:
> 2m51.194
> 2m55.980
> 2m50.755
> 2m47.396
> 2m46.807
> 
> It shows the new way is little better than the old way.

What test is this?

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 12:38       ` Marcelo Tosatti
@ 2011-08-23 16:32         ` Xiao Guangrong
  2011-08-23 19:09           ` Marcelo Tosatti
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-23 16:32 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:

>> And, i think there are not problems since: if the spte without accssed bit is
>> written frequently, it means the guest page table is accessed infrequently or
>> during the writing, the guest page table is not accessed, in this time, zapping
>> this shadow page is not bad.
> 
> Think of the following scenario:
> 
> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
> is not increased.
> 3) repeat
> 

I think the result is just we hoped, we do not want to zap the shadow page
because the spte is currently used by the guest, it also will be used in the
next repetition. So do not increase 'write_flooding_count' is a good choice.

Let's consider what will happen if we increase 'write_flooding_count':
1: after three repetitions, zap the shadow page
2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
3: in step 2, the flooding count is creased, so after 3 repetitions, the
   shadow page can be zapped again, repeat 1 to 3.

The result is the shadow page for gfnA is alloced and zapped again and again,
yes?

> So you cannot rely on the accessed bit being cleared to zap the shadow
> page, because it might not be cleared in certain scenarios.
> 
>> Comparing the old way, the advantage of it is good for zapping upper shadow page,
>> for example, in the old way:
>> if a gfn is used as PDE for a task, later, the gfn is freed and used as PTE for
>> the new task, so we have two shadow pages in the host, one sp1.level = 2 and the
>> other sp2.level = 1. So, when we detect write-flooding, the vcpu->last_pte_updated
>> always point to sp2.pte. As sp2 is used for the new task, we always detected both
>> shadow pages are bing used, but actually, sp1 is not used by guest anymore.
> 
> Makes sense.
> 
>>> Back to the first question, what is the motivation for this heuristic
>>> change? Do you have any numbers?
>>>
>>
>> Yes, i have done the quick test:
>>
>> before this patch:
>> 2m56.561
>> 2m50.651
>> 2m51.220
>> 2m52.199
>> 2m48.066
>>
>> After this patch:
>> 2m51.194
>> 2m55.980
>> 2m50.755
>> 2m47.396
>> 2m46.807
>>
>> It shows the new way is little better than the old way.
> 
> What test is this?
> 

Sorry, i forgot to mention it, the test case is kerbench. :-)
 

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 16:32         ` Xiao Guangrong
@ 2011-08-23 19:09           ` Marcelo Tosatti
  2011-08-23 20:16             ` Xiao Guangrong
  2011-08-25  7:57             ` Xiao Guangrong
  0 siblings, 2 replies; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-23 19:09 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
> 
> >> And, i think there are not problems since: if the spte without accssed bit is
> >> written frequently, it means the guest page table is accessed infrequently or
> >> during the writing, the guest page table is not accessed, in this time, zapping
> >> this shadow page is not bad.
> > 
> > Think of the following scenario:
> > 
> > 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
> > 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
> > is not increased.
> > 3) repeat
> > 
> 
> I think the result is just we hoped, we do not want to zap the shadow page
> because the spte is currently used by the guest, it also will be used in the
> next repetition. So do not increase 'write_flooding_count' is a good choice.

Its not used. Step 2) is write to write protected shadow page at
gfnA.

> Let's consider what will happen if we increase 'write_flooding_count':
> 1: after three repetitions, zap the shadow page
> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
>    shadow page can be zapped again, repeat 1 to 3.

The shadow page will not be zapped because the spte created from
gfnA+indexA has the accessed bit set:

       if (spte && !(*spte & shadow_accessed_mask))
               sp->write_flooding_count++;
       else
               sp->write_flooding_count = 0;

> The result is the shadow page for gfnA is alloced and zapped again and again,
> yes?

The point is you cannot rely on the accessed bit of sptes that have been
instantiated with the accessed bit set to decide whether or not to zap.
Because the accessed bit will only be cleared on host memory pressure.

> > So you cannot rely on the accessed bit being cleared to zap the shadow
> > page, because it might not be cleared in certain scenarios.
> > 
> >> Comparing the old way, the advantage of it is good for zapping upper shadow page,
> >> for example, in the old way:
> >> if a gfn is used as PDE for a task, later, the gfn is freed and used as PTE for
> >> the new task, so we have two shadow pages in the host, one sp1.level = 2 and the
> >> other sp2.level = 1. So, when we detect write-flooding, the vcpu->last_pte_updated
> >> always point to sp2.pte. As sp2 is used for the new task, we always detected both
> >> shadow pages are bing used, but actually, sp1 is not used by guest anymore.
> > 
> > Makes sense.
> > 
> >>> Back to the first question, what is the motivation for this heuristic
> >>> change? Do you have any numbers?
> >>>
> >>
> >> Yes, i have done the quick test:
> >>
> >> before this patch:
> >> 2m56.561
> >> 2m50.651
> >> 2m51.220
> >> 2m52.199
> >> 2m48.066
> >>
> >> After this patch:
> >> 2m51.194
> >> 2m55.980
> >> 2m50.755
> >> 2m47.396
> >> 2m46.807
> >>
> >> It shows the new way is little better than the old way.
> > 
> > What test is this?
> > 
> 
> Sorry, i forgot to mention it, the test case is kerbench. :-)
>  

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 19:09           ` Marcelo Tosatti
@ 2011-08-23 20:16             ` Xiao Guangrong
  2011-08-24 20:05               ` Marcelo Tosatti
  2011-08-25  7:57             ` Xiao Guangrong
  1 sibling, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-23 20:16 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/24/2011 03:09 AM, Marcelo Tosatti wrote:
> On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
>> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
>>
>>>> And, i think there are not problems since: if the spte without accssed bit is
>>>> written frequently, it means the guest page table is accessed infrequently or
>>>> during the writing, the guest page table is not accessed, in this time, zapping
>>>> this shadow page is not bad.
>>>
>>> Think of the following scenario:
>>>
>>> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
>>> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
>>> is not increased.
>>> 3) repeat
>>>
>>
>> I think the result is just we hoped, we do not want to zap the shadow page
>> because the spte is currently used by the guest, it also will be used in the
>> next repetition. So do not increase 'write_flooding_count' is a good choice.
> 
> Its not used. Step 2) is write to write protected shadow page at
> gfnA.
> 
>> Let's consider what will happen if we increase 'write_flooding_count':
>> 1: after three repetitions, zap the shadow page
>> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
>> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
>>    shadow page can be zapped again, repeat 1 to 3.
> 
> The shadow page will not be zapped because the spte created from
> gfnA+indexA has the accessed bit set:
> 
>        if (spte && !(*spte & shadow_accessed_mask))
>                sp->write_flooding_count++;
>        else
>                sp->write_flooding_count = 0;
> 

Ah, i see, i thought it was "repeat"ed on the same spte, it was my wrong.

Yes, in this case, the sp is not zapped, but it is hardly to know the gfn
is not used as gpte just depends on writing, for example, the guest can
change the mapping address or the status bit, and so on...The sp can be
zapped if the guest write it again(on the same address), i think it is
acceptable, anymore, it is just the speculative way to zap the unused
shadow page...your opinion?

>> The result is the shadow page for gfnA is alloced and zapped again and again,
>> yes?
> 
> The point is you cannot rely on the accessed bit of sptes that have been
> instantiated with the accessed bit set to decide whether or not to zap.
> Because the accessed bit will only be cleared on host memory pressure.
> 

Yes, accessed bit is the cursory way to track gpte accessed, however,
at least, the accessed bit can indicate whether the gfn is accessed
for a period of time in the most case, for example, from it is
speculated to it is written, or from it is zapped to it is written,
i thinks it is not too bad.

Do you have ideas to improve this?

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 20:16             ` Xiao Guangrong
@ 2011-08-24 20:05               ` Marcelo Tosatti
  2011-08-25  2:04                 ` Marcelo Tosatti
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-24 20:05 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Wed, Aug 24, 2011 at 04:16:52AM +0800, Xiao Guangrong wrote:
> On 08/24/2011 03:09 AM, Marcelo Tosatti wrote:
> > On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
> >> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
> >>
> >>>> And, i think there are not problems since: if the spte without accssed bit is
> >>>> written frequently, it means the guest page table is accessed infrequently or
> >>>> during the writing, the guest page table is not accessed, in this time, zapping
> >>>> this shadow page is not bad.
> >>>
> >>> Think of the following scenario:
> >>>
> >>> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
> >>> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
> >>> is not increased.
> >>> 3) repeat
> >>>
> >>
> >> I think the result is just we hoped, we do not want to zap the shadow page
> >> because the spte is currently used by the guest, it also will be used in the
> >> next repetition. So do not increase 'write_flooding_count' is a good choice.
> > 
> > Its not used. Step 2) is write to write protected shadow page at
> > gfnA.
> > 
> >> Let's consider what will happen if we increase 'write_flooding_count':
> >> 1: after three repetitions, zap the shadow page
> >> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
> >> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
> >>    shadow page can be zapped again, repeat 1 to 3.
> > 
> > The shadow page will not be zapped because the spte created from
> > gfnA+indexA has the accessed bit set:
> > 
> >        if (spte && !(*spte & shadow_accessed_mask))
> >                sp->write_flooding_count++;
> >        else
> >                sp->write_flooding_count = 0;
> > 
> 
> Ah, i see, i thought it was "repeat"ed on the same spte, it was my wrong.
> 
> Yes, in this case, the sp is not zapped, but it is hardly to know the gfn
> is not used as gpte just depends on writing, for example, the guest can
> change the mapping address or the status bit, and so on...The sp can be
> zapped if the guest write it again(on the same address), i think it is
> acceptable, anymore, it is just the speculative way to zap the unused
> shadow page...your opinion?

It could increase the flood count independently of the accessed bit of
the spte being updated, zapping after 3 attempts as it is now.

But additionally reset the flood count if the gpte appears to be valid
(points to an existant gfn if the present bit is set, or if its zeroed).

> >> The result is the shadow page for gfnA is alloced and zapped again and again,
> >> yes?
> > 
> > The point is you cannot rely on the accessed bit of sptes that have been
> > instantiated with the accessed bit set to decide whether or not to zap.
> > Because the accessed bit will only be cleared on host memory pressure.
> > 
> 
> Yes, accessed bit is the cursory way to track gpte accessed, however,
> at least, the accessed bit can indicate whether the gfn is accessed
> for a period of time in the most case, for example, from it is
> speculated to it is written, or from it is zapped to it is written,
> i thinks it is not too bad.
> 
> Do you have ideas to improve this?



^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-24 20:05               ` Marcelo Tosatti
@ 2011-08-25  2:04                 ` Marcelo Tosatti
  2011-08-25  4:42                   ` Avi Kivity
  2011-08-25  7:40                   ` Xiao Guangrong
  0 siblings, 2 replies; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-25  2:04 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Wed, Aug 24, 2011 at 05:05:40PM -0300, Marcelo Tosatti wrote:
> On Wed, Aug 24, 2011 at 04:16:52AM +0800, Xiao Guangrong wrote:
> > On 08/24/2011 03:09 AM, Marcelo Tosatti wrote:
> > > On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
> > >> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
> > >>
> > >>>> And, i think there are not problems since: if the spte without accssed bit is
> > >>>> written frequently, it means the guest page table is accessed infrequently or
> > >>>> during the writing, the guest page table is not accessed, in this time, zapping
> > >>>> this shadow page is not bad.
> > >>>
> > >>> Think of the following scenario:
> > >>>
> > >>> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
> > >>> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
> > >>> is not increased.
> > >>> 3) repeat
> > >>>
> > >>
> > >> I think the result is just we hoped, we do not want to zap the shadow page
> > >> because the spte is currently used by the guest, it also will be used in the
> > >> next repetition. So do not increase 'write_flooding_count' is a good choice.
> > > 
> > > Its not used. Step 2) is write to write protected shadow page at
> > > gfnA.
> > > 
> > >> Let's consider what will happen if we increase 'write_flooding_count':
> > >> 1: after three repetitions, zap the shadow page
> > >> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
> > >> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
> > >>    shadow page can be zapped again, repeat 1 to 3.
> > > 
> > > The shadow page will not be zapped because the spte created from
> > > gfnA+indexA has the accessed bit set:
> > > 
> > >        if (spte && !(*spte & shadow_accessed_mask))
> > >                sp->write_flooding_count++;
> > >        else
> > >                sp->write_flooding_count = 0;
> > > 
> > 
> > Ah, i see, i thought it was "repeat"ed on the same spte, it was my wrong.
> > 
> > Yes, in this case, the sp is not zapped, but it is hardly to know the gfn
> > is not used as gpte just depends on writing, for example, the guest can
> > change the mapping address or the status bit, and so on...The sp can be
> > zapped if the guest write it again(on the same address), i think it is
> > acceptable, anymore, it is just the speculative way to zap the unused
> > shadow page...your opinion?
> 
> It could increase the flood count independently of the accessed bit of
> the spte being updated, zapping after 3 attempts as it is now.
> 
> But additionally reset the flood count if the gpte appears to be valid
> (points to an existant gfn if the present bit is set, or if its zeroed).

Well not zero, as thats a common pattern for non ptes.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25  2:04                 ` Marcelo Tosatti
@ 2011-08-25  4:42                   ` Avi Kivity
  2011-08-25 13:21                     ` Marcelo Tosatti
  2011-08-25  7:40                   ` Xiao Guangrong
  1 sibling, 1 reply; 41+ messages in thread
From: Avi Kivity @ 2011-08-25  4:42 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Xiao Guangrong, LKML, KVM

On 08/25/2011 05:04 AM, Marcelo Tosatti wrote:
> >
> >  It could increase the flood count independently of the accessed bit of
> >  the spte being updated, zapping after 3 attempts as it is now.
> >
> >  But additionally reset the flood count if the gpte appears to be valid
> >  (points to an existant gfn if the present bit is set, or if its zeroed).
>
> Well not zero, as thats a common pattern for non ptes.
>

On 32-bit with 4GB RAM, practically anything is a valid gpte.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25  2:04                 ` Marcelo Tosatti
  2011-08-25  4:42                   ` Avi Kivity
@ 2011-08-25  7:40                   ` Xiao Guangrong
  1 sibling, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-25  7:40 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/25/2011 10:04 AM, Marcelo Tosatti wrote:

>>> Yes, in this case, the sp is not zapped, but it is hardly to know the gfn
>>> is not used as gpte just depends on writing, for example, the guest can
>>> change the mapping address or the status bit, and so on...The sp can be
>>> zapped if the guest write it again(on the same address), i think it is
>>> acceptable, anymore, it is just the speculative way to zap the unused
>>> shadow page...your opinion?
>>
>> It could increase the flood count independently of the accessed bit of
>> the spte being updated, zapping after 3 attempts as it is now.
>>
>> But additionally reset the flood count if the gpte appears to be valid
>> (points to an existant gfn if the present bit is set, or if its zeroed).
> 
> Well not zero, as thats a common pattern for non ptes.
> 

Hi Marcelo,

Maybe it is not good i think, for some reasons:
- checking gfn valid which it is pointed by gpte is high overload,
  it needs to call gfn_to_hva to walk memslots, especially. kvm_mmu_pte_write
  is called very frequently on shadow mmu.

- MMIO gfn is not an existent gfn, but it is valid pointed by gpte

- we can check the reserved bits in the gpte to check whether it is valid a
  gpte, but for some paging modes, all bits are valid.(for example, non-PAE mode)

- it can not work if the gfn has multiple shadow pages, for example:
  if the gfn was used as PDE, later it is used as PTE, then we have two shadow
  pages: sp1.level = 2, sp2.level = 1, sp1 can not be zapped even even though it
  is not used anymore.

- sometime, we need to zap the shadow page even though the gpte is written validly:
  if the gpte is written frequently but infrequently accessed, we do better zap the
  shadow page to let it is writable(write it directly without #PF) and map it when it
  is accessed, one example is from Avi, the guest OS may update many gptes at one time
  after one page fault.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-23 19:09           ` Marcelo Tosatti
  2011-08-23 20:16             ` Xiao Guangrong
@ 2011-08-25  7:57             ` Xiao Guangrong
  2011-08-25 13:47               ` Marcelo Tosatti
  1 sibling, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-25  7:57 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/24/2011 03:09 AM, Marcelo Tosatti wrote:
> On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
>> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
>>
>>>> And, i think there are not problems since: if the spte without accssed bit is
>>>> written frequently, it means the guest page table is accessed infrequently or
>>>> during the writing, the guest page table is not accessed, in this time, zapping
>>>> this shadow page is not bad.
>>>
>>> Think of the following scenario:
>>>
>>> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
>>> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
>>> is not increased.
>>> 3) repeat
>>>
>>
>> I think the result is just we hoped, we do not want to zap the shadow page
>> because the spte is currently used by the guest, it also will be used in the
>> next repetition. So do not increase 'write_flooding_count' is a good choice.
> 
> Its not used. Step 2) is write to write protected shadow page at
> gfnA.
> 
>> Let's consider what will happen if we increase 'write_flooding_count':
>> 1: after three repetitions, zap the shadow page
>> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
>> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
>>    shadow page can be zapped again, repeat 1 to 3.
> 
> The shadow page will not be zapped because the spte created from
> gfnA+indexA has the accessed bit set:
> 
>        if (spte && !(*spte & shadow_accessed_mask))
>                sp->write_flooding_count++;
>        else
>                sp->write_flooding_count = 0;
> 

Marcelo, i am still confused with your example, in step 3), what is repeated?
it repeats step 2) or it repeats step 1) and 2)?

Only step 2) is repeated i guess, right? if it is yes, it works well:
when the guest writes gpte, the spte of corresponding shadow page is zapped
(level > 1) or it is speculatively fetched(level == 1), the accessed bit is
cleared in both case.

the later write can detect that the accessed bit is not set, and write_flooding_count
is increased. finally, the shadow page is zapped, the gpte is written directly.

>> The result is the shadow page for gfnA is alloced and zapped again and again,
>> yes?
> 
> The point is you cannot rely on the accessed bit of sptes that have been
> instantiated with the accessed bit set to decide whether or not to zap.
> Because the accessed bit will only be cleared on host memory pressure.
> 

But the accessed bit is also cleared after spte is written.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25  4:42                   ` Avi Kivity
@ 2011-08-25 13:21                     ` Marcelo Tosatti
  2011-08-25 14:06                       ` Avi Kivity
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-25 13:21 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Xiao Guangrong, LKML, KVM

On Thu, Aug 25, 2011 at 07:42:10AM +0300, Avi Kivity wrote:
> On 08/25/2011 05:04 AM, Marcelo Tosatti wrote:
> >>
> >>  It could increase the flood count independently of the accessed bit of
> >>  the spte being updated, zapping after 3 attempts as it is now.
> >>
> >>  But additionally reset the flood count if the gpte appears to be valid
> >>  (points to an existant gfn if the present bit is set, or if its zeroed).
> >
> >Well not zero, as thats a common pattern for non ptes.
> >
> 
> On 32-bit with 4GB RAM, practically anything is a valid gpte.

The following could be required to consider a valid gpte, for write
flood detection purposes:

- Must be present.
- PageCacheDisable must be unset.
- PageWriteThrough must be unset.



^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25  7:57             ` Xiao Guangrong
@ 2011-08-25 13:47               ` Marcelo Tosatti
  2011-08-26  3:18                 ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-25 13:47 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Thu, Aug 25, 2011 at 03:57:22PM +0800, Xiao Guangrong wrote:
> On 08/24/2011 03:09 AM, Marcelo Tosatti wrote:
> > On Wed, Aug 24, 2011 at 12:32:32AM +0800, Xiao Guangrong wrote:
> >> On 08/23/2011 08:38 PM, Marcelo Tosatti wrote:
> >>
> >>>> And, i think there are not problems since: if the spte without accssed bit is
> >>>> written frequently, it means the guest page table is accessed infrequently or
> >>>> during the writing, the guest page table is not accessed, in this time, zapping
> >>>> this shadow page is not bad.
> >>>
> >>> Think of the following scenario:
> >>>
> >>> 1) page fault, spte with accessed bit is created from gpte at gfnA+indexA.
> >>> 2) write to gfnA+indexA, spte has accessed bit set, write_flooding_count
> >>> is not increased.
> >>> 3) repeat
> >>>
> >>
> >> I think the result is just we hoped, we do not want to zap the shadow page
> >> because the spte is currently used by the guest, it also will be used in the
> >> next repetition. So do not increase 'write_flooding_count' is a good choice.
> > 
> > Its not used. Step 2) is write to write protected shadow page at
> > gfnA.
> > 
> >> Let's consider what will happen if we increase 'write_flooding_count':
> >> 1: after three repetitions, zap the shadow page
> >> 2: in step 1, we will alloc a new shadow page for gpte at gfnA+indexA
> >> 3: in step 2, the flooding count is creased, so after 3 repetitions, the
> >>    shadow page can be zapped again, repeat 1 to 3.
> > 
> > The shadow page will not be zapped because the spte created from
> > gfnA+indexA has the accessed bit set:
> > 
> >        if (spte && !(*spte & shadow_accessed_mask))
> >                sp->write_flooding_count++;
> >        else
> >                sp->write_flooding_count = 0;
> > 
> 
> Marcelo, i am still confused with your example, in step 3), what is repeated?
> it repeats step 2) or it repeats step 1) and 2)?
> 
> Only step 2) is repeated i guess, right? if it is yes, it works well:
> when the guest writes gpte, the spte of corresponding shadow page is zapped
> (level > 1) or it is speculatively fetched(level == 1), the accessed bit is
> cleared in both case.

Right.

> the later write can detect that the accessed bit is not set, and write_flooding_count
> is increased. finally, the shadow page is zapped, the gpte is written directly.
> 
> >> The result is the shadow page for gfnA is alloced and zapped again and again,
> >> yes?
> > 
> > The point is you cannot rely on the accessed bit of sptes that have been
> > instantiated with the accessed bit set to decide whether or not to zap.
> > Because the accessed bit will only be cleared on host memory pressure.
> > 
> 
> But the accessed bit is also cleared after spte is written.

Right. But only one of the 512 sptes. Worst case, a shadow that has 1
spte with accessed bit at every 3 spte entries would not be zapped for a
linear write of the entire guest pagetable. The current heuristic does 
not suffer from this issue.

I guess it is OK to be more trigger happy with zapping by ignoring
the accessed bit, clearing the flood counter on page fault.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25 13:21                     ` Marcelo Tosatti
@ 2011-08-25 14:06                       ` Avi Kivity
  2011-08-25 14:07                         ` Avi Kivity
  0 siblings, 1 reply; 41+ messages in thread
From: Avi Kivity @ 2011-08-25 14:06 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Xiao Guangrong, LKML, KVM

On 08/25/2011 04:21 PM, Marcelo Tosatti wrote:
> On Thu, Aug 25, 2011 at 07:42:10AM +0300, Avi Kivity wrote:
> >  On 08/25/2011 05:04 AM, Marcelo Tosatti wrote:
> >  >>
> >  >>   It could increase the flood count independently of the accessed bit of
> >  >>   the spte being updated, zapping after 3 attempts as it is now.
> >  >>
> >  >>   But additionally reset the flood count if the gpte appears to be valid
> >  >>   (points to an existant gfn if the present bit is set, or if its zeroed).
> >  >
> >  >Well not zero, as thats a common pattern for non ptes.
> >  >
> >
> >  On 32-bit with 4GB RAM, practically anything is a valid gpte.
>
> The following could be required to consider a valid gpte, for write
> flood detection purposes:
>
> - Must be present.
> - PageCacheDisable must be unset.
> - PageWriteThrough must be unset.
>

Unless the guest is using PAT.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25 14:06                       ` Avi Kivity
@ 2011-08-25 14:07                         ` Avi Kivity
  0 siblings, 0 replies; 41+ messages in thread
From: Avi Kivity @ 2011-08-25 14:07 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Xiao Guangrong, LKML, KVM

On 08/25/2011 05:06 PM, Avi Kivity wrote:
> On 08/25/2011 04:21 PM, Marcelo Tosatti wrote:
>> On Thu, Aug 25, 2011 at 07:42:10AM +0300, Avi Kivity wrote:
>> >  On 08/25/2011 05:04 AM, Marcelo Tosatti wrote:
>> > >>
>> > >>   It could increase the flood count independently of the 
>> accessed bit of
>> > >>   the spte being updated, zapping after 3 attempts as it is now.
>> > >>
>> > >>   But additionally reset the flood count if the gpte appears to 
>> be valid
>> > >>   (points to an existant gfn if the present bit is set, or if 
>> its zeroed).
>> > >
>> > >Well not zero, as thats a common pattern for non ptes.
>> > >
>> >
>> >  On 32-bit with 4GB RAM, practically anything is a valid gpte.
>>
>> The following could be required to consider a valid gpte, for write
>> flood detection purposes:
>>
>> - Must be present.
>> - PageCacheDisable must be unset.
>> - PageWriteThrough must be unset.
>>
>
> Unless the guest is using PAT.
>

And not swapping.

-- 
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-25 13:47               ` Marcelo Tosatti
@ 2011-08-26  3:18                 ` Xiao Guangrong
  2011-08-26 10:53                   ` Marcelo Tosatti
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-26  3:18 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/25/2011 09:47 PM, Marcelo Tosatti wrote:

> I guess it is OK to be more trigger happy with zapping by ignoring
> the accessed bit, clearing the flood counter on page fault.
> 

Yeah, i like this way, is this patch good for you?

================================
Subject: [PATCH 11/11] KVM: MMU: improve write flooding detected

Detecting write-flooding does not work well, when we handle page written, if
the last speculative spte is not accessed, we treat the page is
write-flooding, however, we can speculative spte on many path, such as pte
prefetch, page synced, that means the last speculative spte may be not point
to the written page and the written page can be accessed via other sptes, so
depends on the Accessed bit of the last speculative spte is not enough

Instead of detected page accessed, we can detect whether the spte is accessed
after it is written, if the spte is not accessed but it is written frequently,
we treat is not a page table or it not used for a long time

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    6 +---
 arch/x86/kvm/mmu.c              |   57 +++++++++++++--------------------------
 arch/x86/kvm/paging_tmpl.h      |   12 +++-----
 3 files changed, 26 insertions(+), 49 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 927ba73..9d17238 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -239,6 +239,8 @@ struct kvm_mmu_page {
 	int clear_spte_count;
 #endif
 
+	int write_flooding_count;
+
 	struct rcu_head rcu;
 };
 
@@ -353,10 +355,6 @@ struct kvm_vcpu_arch {
 	struct kvm_mmu_memory_cache mmu_page_cache;
 	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
-	gfn_t last_pt_write_gfn;
-	int   last_pt_write_count;
-	u64  *last_pte_updated;
-
 	struct fpu guest_fpu;
 	u64 xcr0;
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index adaa160..fd5b389 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1652,6 +1652,18 @@ static void init_shadow_page_table(struct kvm_mmu_page *sp)
 		sp->spt[i] = 0ull;
 }
 
+static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
+{
+	sp->write_flooding_count = 0;
+}
+
+static void clear_sp_write_flooding_count(u64 *spte)
+{
+	struct kvm_mmu_page *sp =  page_header(__pa(spte));
+
+	__clear_sp_write_flooding_count(sp);
+}
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     gfn_t gfn,
 					     gva_t gaddr,
@@ -1695,6 +1707,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		} else if (sp->unsync)
 			kvm_mmu_mark_parents_unsync(sp);
 
+		__clear_sp_write_flooding_count(sp);
 		trace_kvm_mmu_get_page(sp, false);
 		return sp;
 	}
@@ -1847,15 +1860,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 	mmu_page_remove_parent_pte(sp, parent_pte);
 }
 
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
-	int i;
-	struct kvm_vcpu *vcpu;
-
-	kvm_for_each_vcpu(i, vcpu, kvm)
-		vcpu->arch.last_pte_updated = NULL;
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	u64 *parent_pte;
@@ -1915,7 +1919,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 	}
 
 	sp->role.invalid = 1;
-	kvm_mmu_reset_last_pte_updated(kvm);
 	return ret;
 }
 
@@ -2360,8 +2363,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		}
 	}
 	kvm_release_pfn_clean(pfn);
-	if (speculative)
-		vcpu->arch.last_pte_updated = sptep;
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3522,13 +3523,6 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
 		kvm_mmu_flush_tlb(vcpu);
 }
 
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
-{
-	u64 *spte = vcpu->arch.last_pte_updated;
-
-	return !!(spte && (*spte & shadow_accessed_mask));
-}
-
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
 				    const u8 *new, int *bytes)
 {
@@ -3569,22 +3563,9 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
  * If we're seeing too many writes to a page, it may no longer be a page table,
  * or we may be forking, in which case it is better to unmap the page.
  */
-static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
 {
-	bool flooded = false;
-
-	if (gfn == vcpu->arch.last_pt_write_gfn
-	    && !last_updated_pte_accessed(vcpu)) {
-		++vcpu->arch.last_pt_write_count;
-		if (vcpu->arch.last_pt_write_count >= 3)
-			flooded = true;
-	} else {
-		vcpu->arch.last_pt_write_gfn = gfn;
-		vcpu->arch.last_pt_write_count = 1;
-		vcpu->arch.last_pte_updated = NULL;
-	}
-
-	return flooded;
+	return ++sp->write_flooding_count >= 3;
 }
 
 /*
@@ -3656,7 +3637,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	int npte;
-	bool remote_flush, local_flush, zap_page, flooded, misaligned;
+	bool remote_flush, local_flush, zap_page;
 
 	/*
 	 * If we don't have indirect shadow pages, it means no page is
@@ -3675,12 +3656,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-	flooded = detect_write_flooding(vcpu, gfn);
 	mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-		misaligned = detect_write_misaligned(sp, gpa, bytes);
+		spte = get_written_sptes(sp, gpa, &npte);
 
-		if (misaligned || flooded) {
+		if (detect_write_misaligned(sp, gpa, bytes) ||
+		      detect_write_flooding(sp, spte)) {
 			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
 						     &invalid_list);
 			++vcpu->kvm->stat.mmu_flooded;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 3395ab2..379a795 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -497,6 +497,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 	     shadow_walk_next(&it)) {
 		gfn_t table_gfn;
 
+		clear_sp_write_flooding_count(it.sptep);
 		drop_large_spte(vcpu, it.sptep);
 
 		sp = NULL;
@@ -522,6 +523,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 	     shadow_walk_next(&it)) {
 		gfn_t direct_gfn;
 
+		clear_sp_write_flooding_count(it.sptep);
 		validate_direct_spte(vcpu, it.sptep, direct_access);
 
 		drop_large_spte(vcpu, it.sptep);
@@ -536,6 +538,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 		link_shadow_page(it.sptep, sp);
 	}
 
+	clear_sp_write_flooding_count(it.sptep);
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
 		     user_fault, write_fault, emulate, it.level,
 		     gw->gfn, pfn, prefault, map_writable);
@@ -599,11 +602,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		if (!prefault) {
+		if (!prefault)
 			inject_page_fault(vcpu, &walker.fault);
-			/* reset fork detector */
-			vcpu->arch.last_pt_write_count = 0;
-		}
+
 		return 0;
 	}
 
@@ -641,9 +642,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
 		 sptep, *sptep, emulate);
 
-	if (!emulate)
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
 	++vcpu->stat.pf_fixed;
 	trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 	spin_unlock(&vcpu->kvm->mmu_lock);
-- 
1.7.5.4



^ permalink raw reply related	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-26  3:18                 ` Xiao Guangrong
@ 2011-08-26 10:53                   ` Marcelo Tosatti
  2011-08-26 14:24                     ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Marcelo Tosatti @ 2011-08-26 10:53 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Avi Kivity, LKML, KVM

On Fri, Aug 26, 2011 at 11:18:01AM +0800, Xiao Guangrong wrote:
> On 08/25/2011 09:47 PM, Marcelo Tosatti wrote:
> 
> > I guess it is OK to be more trigger happy with zapping by ignoring
> > the accessed bit, clearing the flood counter on page fault.
> > 
> 
> Yeah, i like this way, is this patch good for you?

Looks fine, can you rerun kernbench?

> ================================
> Subject: [PATCH 11/11] KVM: MMU: improve write flooding detected
> 
> Detecting write-flooding does not work well, when we handle page written, if
> the last speculative spte is not accessed, we treat the page is
> write-flooding, however, we can speculative spte on many path, such as pte
> prefetch, page synced, that means the last speculative spte may be not point
> to the written page and the written page can be accessed via other sptes, so
> depends on the Accessed bit of the last speculative spte is not enough
> 
> Instead of detected page accessed, we can detect whether the spte is accessed
> after it is written, if the spte is not accessed but it is written frequently,
> we treat is not a page table or it not used for a long time
> 
> Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-08-26 10:53                   ` Marcelo Tosatti
@ 2011-08-26 14:24                     ` Xiao Guangrong
  0 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-08-26 14:24 UTC (permalink / raw)
  To: Marcelo Tosatti; +Cc: Avi Kivity, LKML, KVM

On 08/26/2011 06:53 PM, Marcelo Tosatti wrote:
> On Fri, Aug 26, 2011 at 11:18:01AM +0800, Xiao Guangrong wrote:
>> On 08/25/2011 09:47 PM, Marcelo Tosatti wrote:
>>
>>> I guess it is OK to be more trigger happy with zapping by ignoring
>>> the accessed bit, clearing the flood counter on page fault.
>>>
>>
>> Yeah, i like this way, is this patch good for you?
> 
> Looks fine, can you rerun kernbench?
> 

Sure, i tested the performance of this way, there is the result:

The origin way:
2m56.561
2m50.651
2m51.220
2m52.199
2m48.066

The way of using accessed bit:
2m51.194
2m55.980
2m50.755
2m47.396
2m46.807

The way of ignoring accessed bit:
2m45.547
2m44.551
2m55.840
2m56.333
2m45.534

I think the result is not bad.
 

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-07-27 11:08       ` Avi Kivity
@ 2011-07-28  2:43         ` Xiao Guangrong
  0 siblings, 0 replies; 41+ messages in thread
From: Xiao Guangrong @ 2011-07-28  2:43 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

On 07/27/2011 07:08 PM, Avi Kivity wrote:
> On 07/27/2011 01:20 PM, Xiao Guangrong wrote:
>> >>    }
>> >
>> >  I think this is a little dangerous.  A guest kernel may be instantiating multiple gptes on a page fault, but guest userspace hits only one of them (the one which caused the page fault) - I think Windows does this, but I'm not sure.
>> >
>>
>> I think this case is not bad: if the guest kernel need to write multiple gptes (>=3),
>> it will cause many page fault, we do better zap the shadow page and let it become writable as
>> soon as possible.
>> (And, we have pte-fetch, it can quickly establish the mapping for a new shadow page)
> 
> Actually, what should save us is unsync pages.  Why are we hitting this path at all?
> 

Avi,

The shadow page can not became unsync if it has other sp that sp.gfn = gfn && sp.role.level != 1,
for example:
- if the gfn is not only used for the last page structure(PTE page)
or
- gfn was used for upper page structure before but we do not zap the old shadow pages

So, if this gfn is written, #PF is generated, we hope that these sp can be zapped earlier,
the later #PF can detect this gfn is not have shadow pages, and the mapping can became writable.

^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-07-27 10:20     ` Xiao Guangrong
@ 2011-07-27 11:08       ` Avi Kivity
  2011-07-28  2:43         ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Avi Kivity @ 2011-07-27 11:08 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM

On 07/27/2011 01:20 PM, Xiao Guangrong wrote:
> >>    }
> >
> >  I think this is a little dangerous.  A guest kernel may be instantiating multiple gptes on a page fault, but guest userspace hits only one of them (the one which caused the page fault) - I think Windows does this, but I'm not sure.
> >
>
> I think this case is not bad: if the guest kernel need to write multiple gptes (>=3),
> it will cause many page fault, we do better zap the shadow page and let it become writable as
> soon as possible.
> (And, we have pte-fetch, it can quickly establish the mapping for a new shadow page)

Actually, what should save us is unsync pages.  Why are we hitting this 
path at all?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-07-27  9:23   ` Avi Kivity
@ 2011-07-27 10:20     ` Xiao Guangrong
  2011-07-27 11:08       ` Avi Kivity
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-07-27 10:20 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

On 07/27/2011 05:23 PM, Avi Kivity wrote:
> On 07/26/2011 02:32 PM, Xiao Guangrong wrote:
>> Detecting write-flooding does not work well, when we handle page written, if
>> the last speculative spte is not accessed, we treat the page is
>> write-flooding, however, we can speculative spte on many path, such as pte
>> prefetch, page synced, that means the last speculative spte may be not point
>> to the written page and the written page can be accessed via other sptes, so
>> depends on the Accessed bit of the last speculative spte is not enough
>>
>> Instead of detected page accessed, we can detect whether the spte is accessed
>> or not, if the spte is not accessed but it is written frequently, we treat is
>> not a page table or it not used for a long time
>>
>>   static int get_free_pte_list_desc_nr(struct kvm_vcpu *vcpu)
>>   {
>>       struct kvm_mmu_memory_cache *cache;
>> @@ -3565,22 +3547,14 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
>>    * If we're seeing too many writes to a page, it may no longer be a page table,
>>    * or we may be forking, in which case it is better to unmap the page.
>>    */
>> -static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
>> +static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
>>   {
>> -    bool flooded = false;
>> -
>> -    if (gfn == vcpu->arch.last_pt_write_gfn
>> -    &&  !last_updated_pte_accessed(vcpu)) {
>> -        ++vcpu->arch.last_pt_write_count;
>> -        if (vcpu->arch.last_pt_write_count>= 3)
>> -            flooded = true;
>> -    } else {
>> -        vcpu->arch.last_pt_write_gfn = gfn;
>> -        vcpu->arch.last_pt_write_count = 1;
>> -        vcpu->arch.last_pte_updated = NULL;
>> -    }
>> +    if (spte&&  !(*spte&  shadow_accessed_mask))
>> +        sp->write_flooding_count++;
>> +    else
>> +        sp->write_flooding_count = 0;
>>
>> -    return flooded;
>> +    return sp->write_flooding_count>= 3;
>>   }
> 
> I think this is a little dangerous.  A guest kernel may be instantiating multiple gptes on a page fault, but guest userspace hits only one of them (the one which caused the page fault) - I think Windows does this, but I'm not sure.
> 

I think this case is not bad: if the guest kernel need to write multiple gptes (>=3),
it will cause many page fault, we do better zap the shadow page and let it become writable as
soon as possible.
(And, we have pte-fetch, it can quickly establish the mapping for a new shadow page)


^ permalink raw reply	[flat|nested] 41+ messages in thread

* Re: [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-07-26 11:32 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
@ 2011-07-27  9:23   ` Avi Kivity
  2011-07-27 10:20     ` Xiao Guangrong
  0 siblings, 1 reply; 41+ messages in thread
From: Avi Kivity @ 2011-07-27  9:23 UTC (permalink / raw)
  To: Xiao Guangrong; +Cc: Marcelo Tosatti, LKML, KVM

On 07/26/2011 02:32 PM, Xiao Guangrong wrote:
> Detecting write-flooding does not work well, when we handle page written, if
> the last speculative spte is not accessed, we treat the page is
> write-flooding, however, we can speculative spte on many path, such as pte
> prefetch, page synced, that means the last speculative spte may be not point
> to the written page and the written page can be accessed via other sptes, so
> depends on the Accessed bit of the last speculative spte is not enough
>
> Instead of detected page accessed, we can detect whether the spte is accessed
> or not, if the spte is not accessed but it is written frequently, we treat is
> not a page table or it not used for a long time
>
>   static int get_free_pte_list_desc_nr(struct kvm_vcpu *vcpu)
>   {
>   	struct kvm_mmu_memory_cache *cache;
> @@ -3565,22 +3547,14 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
>    * If we're seeing too many writes to a page, it may no longer be a page table,
>    * or we may be forking, in which case it is better to unmap the page.
>    */
> -static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
> +static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
>   {
> -	bool flooded = false;
> -
> -	if (gfn == vcpu->arch.last_pt_write_gfn
> -	&&  !last_updated_pte_accessed(vcpu)) {
> -		++vcpu->arch.last_pt_write_count;
> -		if (vcpu->arch.last_pt_write_count>= 3)
> -			flooded = true;
> -	} else {
> -		vcpu->arch.last_pt_write_gfn = gfn;
> -		vcpu->arch.last_pt_write_count = 1;
> -		vcpu->arch.last_pte_updated = NULL;
> -	}
> +	if (spte&&  !(*spte&  shadow_accessed_mask))
> +		sp->write_flooding_count++;
> +	else
> +		sp->write_flooding_count = 0;
>
> -	return flooded;
> +	return sp->write_flooding_count>= 3;
>   }

I think this is a little dangerous.  A guest kernel may be instantiating 
multiple gptes on a page fault, but guest userspace hits only one of 
them (the one which caused the page fault) - I think Windows does this, 
but I'm not sure.

Maybe we should inspect parent_ptes instead?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 41+ messages in thread

* [PATCH 11/11] KVM: MMU: improve write flooding detected
  2011-07-26 11:24 [PATCH 0/11] KVM: x86: optimize for guest page written Xiao Guangrong
@ 2011-07-26 11:32 ` Xiao Guangrong
  2011-07-27  9:23   ` Avi Kivity
  0 siblings, 1 reply; 41+ messages in thread
From: Xiao Guangrong @ 2011-07-26 11:32 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Marcelo Tosatti, LKML, KVM

Detecting write-flooding does not work well, when we handle page written, if
the last speculative spte is not accessed, we treat the page is
write-flooding, however, we can speculative spte on many path, such as pte
prefetch, page synced, that means the last speculative spte may be not point
to the written page and the written page can be accessed via other sptes, so
depends on the Accessed bit of the last speculative spte is not enough

Instead of detected page accessed, we can detect whether the spte is accessed
or not, if the spte is not accessed but it is written frequently, we treat is
not a page table or it not used for a long time

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    6 +---
 arch/x86/kvm/mmu.c              |   53 +++++++++------------------------------
 arch/x86/kvm/paging_tmpl.h      |    9 +-----
 3 files changed, 16 insertions(+), 52 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ce17642..8c938db 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -239,6 +239,8 @@ struct kvm_mmu_page {
 	int clear_spte_count;
 #endif
 
+	int write_flooding_count;
+
 	struct rcu_head rcu;
 };
 
@@ -352,10 +354,6 @@ struct kvm_vcpu_arch {
 	struct kvm_mmu_memory_cache mmu_page_cache;
 	struct kvm_mmu_memory_cache mmu_page_header_cache;
 
-	gfn_t last_pt_write_gfn;
-	int   last_pt_write_count;
-	u64  *last_pte_updated;
-
 	struct fpu guest_fpu;
 	u64 xcr0;
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bb55b15..8c2885c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1688,6 +1688,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		} else if (sp->unsync)
 			kvm_mmu_mark_parents_unsync(sp);
 
+		sp->write_flooding_count = 0;
 		trace_kvm_mmu_get_page(sp, false);
 		return sp;
 	}
@@ -1840,15 +1841,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
 	mmu_page_remove_parent_pte(sp, parent_pte);
 }
 
-static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
-{
-	int i;
-	struct kvm_vcpu *vcpu;
-
-	kvm_for_each_vcpu(i, vcpu, kvm)
-		vcpu->arch.last_pte_updated = NULL;
-}
-
 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
 	u64 *parent_pte;
@@ -1908,7 +1900,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 	}
 
 	sp->role.invalid = 1;
-	kvm_mmu_reset_last_pte_updated(kvm);
 	return ret;
 }
 
@@ -2350,8 +2341,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		}
 	}
 	kvm_release_pfn_clean(pfn);
-	if (speculative)
-		vcpu->arch.last_pte_updated = sptep;
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3509,13 +3498,6 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
 		kvm_mmu_flush_tlb(vcpu);
 }
 
-static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
-{
-	u64 *spte = vcpu->arch.last_pte_updated;
-
-	return !!(spte && (*spte & shadow_accessed_mask));
-}
-
 static int get_free_pte_list_desc_nr(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_memory_cache *cache;
@@ -3565,22 +3547,14 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
  * If we're seeing too many writes to a page, it may no longer be a page table,
  * or we may be forking, in which case it is better to unmap the page.
  */
-static bool detect_write_flooding(struct kvm_vcpu *vcpu, gfn_t gfn)
+static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
 {
-	bool flooded = false;
-
-	if (gfn == vcpu->arch.last_pt_write_gfn
-	    && !last_updated_pte_accessed(vcpu)) {
-		++vcpu->arch.last_pt_write_count;
-		if (vcpu->arch.last_pt_write_count >= 3)
-			flooded = true;
-	} else {
-		vcpu->arch.last_pt_write_gfn = gfn;
-		vcpu->arch.last_pt_write_count = 1;
-		vcpu->arch.last_pte_updated = NULL;
-	}
+	if (spte && !(*spte & shadow_accessed_mask))
+		sp->write_flooding_count++;
+	else
+		sp->write_flooding_count = 0;
 
-	return flooded;
+	return sp->write_flooding_count >= 3;
 }
 
 /*
@@ -3663,7 +3637,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	LIST_HEAD(invalid_list);
 	u64 entry, gentry, *spte;
 	int npte;
-	bool remote_flush, local_flush, zap_page, flooded;
+	bool remote_flush, local_flush, zap_page;
 
 	/*
 	 * If we don't have indirect shadow pages, it means no page is
@@ -3682,21 +3656,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-	flooded = detect_write_flooding(vcpu, gfn);
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
-		bool mismatch, misaligned;
-
-		misaligned = detect_write_misaligned(sp, gpa, bytes);
-		mismatch = detect_mismatch_sp(vcpu, sp);
+		spte = get_written_sptes(sp, gpa, &npte);
 
-		if (misaligned || mismatch || flooded || repeat_write) {
+		if (repeat_write || detect_write_misaligned(sp, gpa, bytes) ||
+		      detect_write_flooding(sp, spte) ||
+		      detect_mismatch_sp(vcpu, sp)) {
 			zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
 						     &invalid_list);
 			++vcpu->kvm->stat.mmu_flooded;
 			continue;
 		}
 
-		spte = get_written_sptes(sp, gpa, &npte);
 		if (!spte)
 			continue;
 
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 3466229..82063b2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -595,11 +595,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		if (!prefault) {
+		if (!prefault)
 			inject_page_fault(vcpu, &walker.fault);
-			/* reset fork detector */
-			vcpu->arch.last_pt_write_count = 0;
-		}
+
 		return 0;
 	}
 
@@ -637,9 +635,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 	pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
 		 sptep, *sptep, emulate);
 
-	if (!emulate)
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-
 	++vcpu->stat.pf_fixed;
 	trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 	spin_unlock(&vcpu->kvm->mmu_lock);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 41+ messages in thread

end of thread, other threads:[~2011-08-26 14:22 UTC | newest]

Thread overview: 41+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-16  6:40 [PATCH 01/11] KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write Xiao Guangrong
2011-08-16  6:41 ` [PATCH 02/11] KVM: x86: tag the instructions which are used to write page table Xiao Guangrong
2011-08-22 14:32   ` Marcelo Tosatti
2011-08-22 14:36     ` Avi Kivity
2011-08-16  6:42 ` [PATCH 03/11] KVM: x86: retry non-page-table writing instruction Xiao Guangrong
2011-08-22 19:59   ` Marcelo Tosatti
2011-08-22 20:21     ` Xiao Guangrong
2011-08-22 20:42       ` Marcelo Tosatti
2011-08-16  6:42 ` [PATCH 04/11] KVM: x86: cleanup port-in/port-out emulated Xiao Guangrong
2011-08-16  6:43 ` [PATCH 05/11] KVM: MMU: do not mark access bit on pte write path Xiao Guangrong
2011-08-16  6:44 ` [PATCH 06/11] KVM: MMU: cleanup FNAME(invlpg) Xiao Guangrong
2011-08-16  6:44 ` [PATCH 07/11] KVM: MMU: fast prefetch spte on invlpg path Xiao Guangrong
2011-08-22 22:28   ` Marcelo Tosatti
2011-08-23  1:50     ` Xiao Guangrong
2011-08-16  6:45 ` [PATCH 08/11] KVM: MMU: remove unnecessary kvm_mmu_free_some_pages Xiao Guangrong
2011-08-16  6:45 ` [PATCH 09/11] KVM: MMU: split kvm_mmu_pte_write function Xiao Guangrong
2011-08-16  6:46 ` [PATCH 10/11] KVM: MMU: fix detecting misaligned accessed Xiao Guangrong
2011-08-16  6:46 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
2011-08-23  8:00   ` Marcelo Tosatti
2011-08-23 10:55     ` Xiao Guangrong
2011-08-23 12:38       ` Marcelo Tosatti
2011-08-23 16:32         ` Xiao Guangrong
2011-08-23 19:09           ` Marcelo Tosatti
2011-08-23 20:16             ` Xiao Guangrong
2011-08-24 20:05               ` Marcelo Tosatti
2011-08-25  2:04                 ` Marcelo Tosatti
2011-08-25  4:42                   ` Avi Kivity
2011-08-25 13:21                     ` Marcelo Tosatti
2011-08-25 14:06                       ` Avi Kivity
2011-08-25 14:07                         ` Avi Kivity
2011-08-25  7:40                   ` Xiao Guangrong
2011-08-25  7:57             ` Xiao Guangrong
2011-08-25 13:47               ` Marcelo Tosatti
2011-08-26  3:18                 ` Xiao Guangrong
2011-08-26 10:53                   ` Marcelo Tosatti
2011-08-26 14:24                     ` Xiao Guangrong
  -- strict thread matches above, loose matches on Subject: below --
2011-07-26 11:24 [PATCH 0/11] KVM: x86: optimize for guest page written Xiao Guangrong
2011-07-26 11:32 ` [PATCH 11/11] KVM: MMU: improve write flooding detected Xiao Guangrong
2011-07-27  9:23   ` Avi Kivity
2011-07-27 10:20     ` Xiao Guangrong
2011-07-27 11:08       ` Avi Kivity
2011-07-28  2:43         ` Xiao Guangrong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.