All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] KVM: PPC: e500: misc MMU stuff
@ 2011-11-29  1:19 ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:19 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

Liu Yu (1):
  KVM: booke: Add booke206 TLB trace

Scott Wood (2):
  KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
  KVM: PPC: e500: use hardware hint when loading TLB0 entries

 arch/powerpc/include/asm/mmu-book3e.h |    1 +
 arch/powerpc/kvm/e500_tlb.c           |   97 ++++++++++++++++++++-------------
 arch/powerpc/kvm/trace.h              |   57 +++++++++++++++++++
 3 files changed, 116 insertions(+), 39 deletions(-)

-- 
1.7.7.rc3.4.g8d714


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 0/3] KVM: PPC: e500: misc MMU stuff
@ 2011-11-29  1:19 ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:19 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

Liu Yu (1):
  KVM: booke: Add booke206 TLB trace

Scott Wood (2):
  KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
  KVM: PPC: e500: use hardware hint when loading TLB0 entries

 arch/powerpc/include/asm/mmu-book3e.h |    1 +
 arch/powerpc/kvm/e500_tlb.c           |   97 ++++++++++++++++++++-------------
 arch/powerpc/kvm/trace.h              |   57 +++++++++++++++++++
 3 files changed, 116 insertions(+), 39 deletions(-)

-- 
1.7.7.rc3.4.g8d714


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 1/3] KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
  2011-11-29  1:19 ` Scott Wood
@ 2011-11-29  1:20   ` Scott Wood
  -1 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The associativity, not just total size, can differ from the host
hardware.

Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/kvm/e500_tlb.c |   19 ++++++++++++++-----
 1 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 9cd124a..5073768 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -1227,12 +1227,14 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 	vcpu_e500->gtlb_offset[0] = 0;
 	vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
 
-	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
+	vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	if (params.tlb_sizes[0] <= 2048)
 		vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
+	vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
 
-	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
+	vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
+	vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
 
 	vcpu_e500->shared_tlb_pages = pages;
 	vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1348,10 +1350,17 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 		goto err;
 
 	/* Init TLB configuration register */
-	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
+	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
-	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
-	vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_params[1].entries;
+	vcpu_e500->tlb0cfg |=
+		vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
+
+	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
+	vcpu_e500->tlb0cfg |=
+		vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
 
 	return 0;
 
-- 
1.7.7.rc3.4.g8d714

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 1/3] KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
@ 2011-11-29  1:20   ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The associativity, not just total size, can differ from the host
hardware.

Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/kvm/e500_tlb.c |   19 ++++++++++++++-----
 1 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 9cd124a..5073768 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -1227,12 +1227,14 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 	vcpu_e500->gtlb_offset[0] = 0;
 	vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
 
-	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
+	vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	if (params.tlb_sizes[0] <= 2048)
 		vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
+	vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
 
-	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
+	vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
+	vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
 
 	vcpu_e500->shared_tlb_pages = pages;
 	vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1348,10 +1350,17 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 		goto err;
 
 	/* Init TLB configuration register */
-	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
+	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
-	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
-	vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_params[1].entries;
+	vcpu_e500->tlb0cfg |+		vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
+
+	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+			     ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
+	vcpu_e500->tlb0cfg |+		vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
 
 	return 0;
 
-- 
1.7.7.rc3.4.g8d714



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/3] KVM: booke: Add booke206 TLB trace
  2011-11-29  1:19 ` Scott Wood
@ 2011-11-29  1:20   ` Scott Wood
  -1 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

From: Liu Yu <yu.liu@freescale.com>

Signed-off-by: Liu Yu <yu.liu@freescale.com>
[scottwood@freescale.com: made mas2 64-bit, and added mas8 init]
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/kvm/e500_tlb.c |   10 ++++---
 arch/powerpc/kvm/trace.h    |   57 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 5073768..d041f5e 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
 	asm volatile("isync; tlbwe" : : : "memory");
 	local_irq_restore(flags);
+
+	trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
+	                              stlbe->mas2, stlbe->mas7_3);
 }
 
 /* esel is index into set, not whole array */
@@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 				  MAS0_TLBSEL(1) |
 				  MAS0_ESEL(to_htlb1_esel(esel)));
 	}
-	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
-			     (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
 }
 
 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
@@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 		       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+	magic.mas8 = 0;
 
 	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
 	preempt_enable();
@@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 	gtlbe->mas2 = vcpu->arch.shared->mas2;
 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 
-	trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
-			     (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
+	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
+	                              gtlbe->mas2, gtlbe->mas7_3);
 
 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index b135d3d..f2ea44b 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
 
 #endif /* CONFIG_PPC_BOOK3S */
 
+
+/*************************************************************************
+ *                         Book3E trace points                           *
+ *************************************************************************/
+
+#ifdef CONFIG_BOOKE
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas8		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas8		= mas8;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas8, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+#endif
+
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
-- 
1.7.7.rc3.4.g8d714

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/3] KVM: booke: Add booke206 TLB trace
@ 2011-11-29  1:20   ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

From: Liu Yu <yu.liu@freescale.com>

Signed-off-by: Liu Yu <yu.liu@freescale.com>
[scottwood@freescale.com: made mas2 64-bit, and added mas8 init]
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/kvm/e500_tlb.c |   10 ++++---
 arch/powerpc/kvm/trace.h    |   57 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 5073768..d041f5e 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
 	asm volatile("isync; tlbwe" : : : "memory");
 	local_irq_restore(flags);
+
+	trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
+	                              stlbe->mas2, stlbe->mas7_3);
 }
 
 /* esel is index into set, not whole array */
@@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 				  MAS0_TLBSEL(1) |
 				  MAS0_ESEL(to_htlb1_esel(esel)));
 	}
-	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
-			     (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
 }
 
 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
@@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 		       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+	magic.mas8 = 0;
 
 	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
 	preempt_enable();
@@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 	gtlbe->mas2 = vcpu->arch.shared->mas2;
 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 
-	trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
-			     (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
+	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
+	                              gtlbe->mas2, gtlbe->mas7_3);
 
 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index b135d3d..f2ea44b 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
 
 #endif /* CONFIG_PPC_BOOK3S */
 
+
+/*************************************************************************
+ *                         Book3E trace points                           *
+ *************************************************************************/
+
+#ifdef CONFIG_BOOKE
+
+TRACE_EVENT(kvm_booke206_stlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas8		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas8		= mas8;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas8, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+TRACE_EVENT(kvm_booke206_gtlb_write,
+	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
+	TP_ARGS(mas0, mas1, mas2, mas7_3),
+
+	TP_STRUCT__entry(
+		__field(	__u32,	mas0		)
+		__field(	__u32,	mas1		)
+		__field(	__u64,	mas2		)
+		__field(	__u64,	mas7_3		)
+	),
+
+	TP_fast_assign(
+		__entry->mas0		= mas0;
+		__entry->mas1		= mas1;
+		__entry->mas2		= mas2;
+		__entry->mas7_3		= mas7_3;
+	),
+
+	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
+		__entry->mas0, __entry->mas1,
+		__entry->mas2, __entry->mas7_3)
+);
+
+#endif
+
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
-- 
1.7.7.rc3.4.g8d714



^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 entries
  2011-11-29  1:19 ` Scott Wood
@ 2011-11-29  1:20   ` Scott Wood
  -1 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The hardware maintains a per-set next victim hint.  Using this
reduces conflicts, especially on e500v2 where a single guest
TLB entry is mapped to two shadow TLB entries (user and kernel).
We want those two entries to go to different TLB ways.

sesel is now only used for TLB1.

Reported-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/include/asm/mmu-book3e.h |    1 +
 arch/powerpc/kvm/e500_tlb.c           |   68 ++++++++++++++++++--------------
 2 files changed, 39 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 4c30de3..3a7042d 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -44,6 +44,7 @@
 #define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
 #define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_ESEL_MASK		0x0FFF0000
+#define MAS0_ESEL_SHIFT		16
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
 #define MAS0_WQ_COND		0x00001000
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index d041f5e..6be6917 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -299,17 +299,40 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	                              stlbe->mas2, stlbe->mas7_3);
 }
 
-/* esel is index into set, not whole array */
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that
+ * it's right set and is not present in the TLB.  Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+	unsigned long flags, mas0;
+
+	local_irq_save(flags);
+	mtspr(SPRN_MAS6, 0);
+	asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+	mas0 = mfspr(SPRN_MAS0);
+	local_irq_restore(flags);
+
+	return mas0;
+}
+
+/* sesel is for tlb1 only */
 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
-		int tlbsel, int esel, struct kvm_book3e_206_tlb_entry *stlbe)
+		int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 {
+	unsigned long mas0;
+
 	if (tlbsel == 0) {
-		int way = esel & (vcpu_e500->gtlb_params[0].ways - 1);
-		__write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(way));
+		mas0 = get_host_mas0(stlbe->mas2);
+		__write_host_tlbe(stlbe, mas0);
 	} else {
 		__write_host_tlbe(stlbe,
 				  MAS0_TLBSEL(1) |
-				  MAS0_ESEL(to_htlb1_esel(esel)));
+				  MAS0_ESEL(to_htlb1_esel(sesel)));
 	}
 }
 
@@ -424,12 +447,6 @@ static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
 			     vcpu_e500->gtlb_params[0].ways);
 }
 
-static int htlb0_set_base(gva_t addr)
-{
-	return tlb0_set_base(addr, host_tlb_params[0].sets,
-			     host_tlb_params[0].ways);
-}
-
 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -587,10 +604,9 @@ static inline void kvmppc_e500_setup_stlbe(
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 }
 
-/* sesel is an index into the entire array, not just the set */
 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
-	int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe,
+	int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 	struct tlbe_ref *ref)
 {
 	struct kvm_memory_slot *slot;
@@ -723,27 +739,19 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-				int esel,
-				struct kvm_book3e_206_tlb_entry *stlbe)
+static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+				 int esel,
+				 struct kvm_book3e_206_tlb_entry *stlbe)
 {
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 	struct tlbe_ref *ref;
-	int sesel = esel & (host_tlb_params[0].ways - 1);
-	int sesel_base;
-	gva_t ea;
 
 	gtlbe = get_entry(vcpu_e500, 0, esel);
 	ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 
-	ea = get_tlb_eaddr(gtlbe);
-	sesel_base = htlb0_set_base(ea);
-
 	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
-			gtlbe, 0, sesel_base + sesel, stlbe, ref);
-
-	return sesel;
+			gtlbe, 0, stlbe, ref);
 }
 
 /* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -762,8 +770,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		vcpu_e500->host_tlb1_nv = 0;
 
 	ref = &vcpu_e500->tlb_refs[1][victim];
-	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
-			       victim, stlbe, ref);
+	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
 
 	return victim;
 }
@@ -912,7 +919,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 	return EMULATE_DONE;
 }
 
-/* sesel is index into the set, not the whole array */
+/* sesel is for tlb1 only */
 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 			struct kvm_book3e_206_tlb_entry *gtlbe,
 			struct kvm_book3e_206_tlb_entry *stlbe,
@@ -965,7 +972,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 
 			stlbsel = 0;
-			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			sesel = 0; /* unused */
 
 			break;
 
@@ -1054,7 +1062,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 	switch (tlbsel) {
 	case 0:
 		stlbsel = 0;
-		sesel = esel & (host_tlb_params[0].ways - 1);
+		sesel = 0; /* unused */
 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
 		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
-- 
1.7.7.rc3.4.g8d714

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/3] KVM: PPC: e500: use hardware hint when loading TLB0
@ 2011-11-29  1:20   ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29  1:20 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The hardware maintains a per-set next victim hint.  Using this
reduces conflicts, especially on e500v2 where a single guest
TLB entry is mapped to two shadow TLB entries (user and kernel).
We want those two entries to go to different TLB ways.

sesel is now only used for TLB1.

Reported-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/include/asm/mmu-book3e.h |    1 +
 arch/powerpc/kvm/e500_tlb.c           |   68 ++++++++++++++++++--------------
 2 files changed, 39 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 4c30de3..3a7042d 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -44,6 +44,7 @@
 #define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
 #define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_ESEL_MASK		0x0FFF0000
+#define MAS0_ESEL_SHIFT		16
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
 #define MAS0_WQ_COND		0x00001000
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index d041f5e..6be6917 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -299,17 +299,40 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	                              stlbe->mas2, stlbe->mas7_3);
 }
 
-/* esel is index into set, not whole array */
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that
+ * it's right set and is not present in the TLB.  Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+	unsigned long flags, mas0;
+
+	local_irq_save(flags);
+	mtspr(SPRN_MAS6, 0);
+	asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+	mas0 = mfspr(SPRN_MAS0);
+	local_irq_restore(flags);
+
+	return mas0;
+}
+
+/* sesel is for tlb1 only */
 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
-		int tlbsel, int esel, struct kvm_book3e_206_tlb_entry *stlbe)
+		int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 {
+	unsigned long mas0;
+
 	if (tlbsel = 0) {
-		int way = esel & (vcpu_e500->gtlb_params[0].ways - 1);
-		__write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(way));
+		mas0 = get_host_mas0(stlbe->mas2);
+		__write_host_tlbe(stlbe, mas0);
 	} else {
 		__write_host_tlbe(stlbe,
 				  MAS0_TLBSEL(1) |
-				  MAS0_ESEL(to_htlb1_esel(esel)));
+				  MAS0_ESEL(to_htlb1_esel(sesel)));
 	}
 }
 
@@ -424,12 +447,6 @@ static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
 			     vcpu_e500->gtlb_params[0].ways);
 }
 
-static int htlb0_set_base(gva_t addr)
-{
-	return tlb0_set_base(addr, host_tlb_params[0].sets,
-			     host_tlb_params[0].ways);
-}
-
 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -587,10 +604,9 @@ static inline void kvmppc_e500_setup_stlbe(
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 }
 
-/* sesel is an index into the entire array, not just the set */
 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
-	int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe,
+	int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 	struct tlbe_ref *ref)
 {
 	struct kvm_memory_slot *slot;
@@ -723,27 +739,19 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-				int esel,
-				struct kvm_book3e_206_tlb_entry *stlbe)
+static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+				 int esel,
+				 struct kvm_book3e_206_tlb_entry *stlbe)
 {
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 	struct tlbe_ref *ref;
-	int sesel = esel & (host_tlb_params[0].ways - 1);
-	int sesel_base;
-	gva_t ea;
 
 	gtlbe = get_entry(vcpu_e500, 0, esel);
 	ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 
-	ea = get_tlb_eaddr(gtlbe);
-	sesel_base = htlb0_set_base(ea);
-
 	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
-			gtlbe, 0, sesel_base + sesel, stlbe, ref);
-
-	return sesel;
+			gtlbe, 0, stlbe, ref);
 }
 
 /* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -762,8 +770,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		vcpu_e500->host_tlb1_nv = 0;
 
 	ref = &vcpu_e500->tlb_refs[1][victim];
-	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
-			       victim, stlbe, ref);
+	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
 
 	return victim;
 }
@@ -912,7 +919,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 	return EMULATE_DONE;
 }
 
-/* sesel is index into the set, not the whole array */
+/* sesel is for tlb1 only */
 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 			struct kvm_book3e_206_tlb_entry *gtlbe,
 			struct kvm_book3e_206_tlb_entry *stlbe,
@@ -965,7 +972,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 
 			stlbsel = 0;
-			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			sesel = 0; /* unused */
 
 			break;
 
@@ -1054,7 +1062,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 	switch (tlbsel) {
 	case 0:
 		stlbsel = 0;
-		sesel = esel & (host_tlb_params[0].ways - 1);
+		sesel = 0; /* unused */
 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
 		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
-- 
1.7.7.rc3.4.g8d714


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 entries
  2011-11-29  1:20   ` [PATCH 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 Scott Wood
@ 2011-11-29 20:40     ` Scott Wood
  -1 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29 20:40 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The hardware maintains a per-set next victim hint.  Using this
reduces conflicts, especially on e500v2 where a single guest
TLB entry is mapped to two shadow TLB entries (user and kernel).
We want those two entries to go to different TLB ways.

sesel is now only used for TLB1.

Reported-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2:
 - avoid duplication of constants in MAS0_ESEL()
 - fix comment typo s/it's right set/it's in the right set/
 - consistently use u32 for mas0

 arch/powerpc/include/asm/mmu-book3e.h |    5 +-
 arch/powerpc/kvm/e500_tlb.c           |   69 ++++++++++++++++++--------------
 2 files changed, 42 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 4c30de3..307fac3 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -41,9 +41,10 @@
 /* MAS registers bit definitions */
 
 #define MAS0_TLBSEL(x)		(((x) << 28) & 0x30000000)
-#define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
-#define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_ESEL_MASK		0x0FFF0000
+#define MAS0_ESEL_SHIFT		16
+#define MAS0_ESEL(x)		(((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
+#define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
 #define MAS0_WQ_COND		0x00001000
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index d041f5e..6e53e41 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -299,17 +299,41 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	                              stlbe->mas2, stlbe->mas7_3);
 }
 
-/* esel is index into set, not whole array */
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that it's
+ * in the right set and is not present in the TLB.  Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+	unsigned long flags;
+	u32 mas0;
+
+	local_irq_save(flags);
+	mtspr(SPRN_MAS6, 0);
+	asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+	mas0 = mfspr(SPRN_MAS0);
+	local_irq_restore(flags);
+
+	return mas0;
+}
+
+/* sesel is for tlb1 only */
 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
-		int tlbsel, int esel, struct kvm_book3e_206_tlb_entry *stlbe)
+		int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 {
+	u32 mas0;
+
 	if (tlbsel == 0) {
-		int way = esel & (vcpu_e500->gtlb_params[0].ways - 1);
-		__write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(way));
+		mas0 = get_host_mas0(stlbe->mas2);
+		__write_host_tlbe(stlbe, mas0);
 	} else {
 		__write_host_tlbe(stlbe,
 				  MAS0_TLBSEL(1) |
-				  MAS0_ESEL(to_htlb1_esel(esel)));
+				  MAS0_ESEL(to_htlb1_esel(sesel)));
 	}
 }
 
@@ -424,12 +448,6 @@ static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
 			     vcpu_e500->gtlb_params[0].ways);
 }
 
-static int htlb0_set_base(gva_t addr)
-{
-	return tlb0_set_base(addr, host_tlb_params[0].sets,
-			     host_tlb_params[0].ways);
-}
-
 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -587,10 +605,9 @@ static inline void kvmppc_e500_setup_stlbe(
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 }
 
-/* sesel is an index into the entire array, not just the set */
 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
-	int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe,
+	int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 	struct tlbe_ref *ref)
 {
 	struct kvm_memory_slot *slot;
@@ -723,27 +740,19 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-				int esel,
-				struct kvm_book3e_206_tlb_entry *stlbe)
+static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+				 int esel,
+				 struct kvm_book3e_206_tlb_entry *stlbe)
 {
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 	struct tlbe_ref *ref;
-	int sesel = esel & (host_tlb_params[0].ways - 1);
-	int sesel_base;
-	gva_t ea;
 
 	gtlbe = get_entry(vcpu_e500, 0, esel);
 	ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 
-	ea = get_tlb_eaddr(gtlbe);
-	sesel_base = htlb0_set_base(ea);
-
 	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
-			gtlbe, 0, sesel_base + sesel, stlbe, ref);
-
-	return sesel;
+			gtlbe, 0, stlbe, ref);
 }
 
 /* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -762,8 +771,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		vcpu_e500->host_tlb1_nv = 0;
 
 	ref = &vcpu_e500->tlb_refs[1][victim];
-	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
-			       victim, stlbe, ref);
+	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
 
 	return victim;
 }
@@ -912,7 +920,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 	return EMULATE_DONE;
 }
 
-/* sesel is index into the set, not the whole array */
+/* sesel is for tlb1 only */
 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 			struct kvm_book3e_206_tlb_entry *gtlbe,
 			struct kvm_book3e_206_tlb_entry *stlbe,
@@ -965,7 +973,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 
 			stlbsel = 0;
-			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			sesel = 0; /* unused */
 
 			break;
 
@@ -1054,7 +1063,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 	switch (tlbsel) {
 	case 0:
 		stlbsel = 0;
-		sesel = esel & (host_tlb_params[0].ways - 1);
+		sesel = 0; /* unused */
 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
 		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
-- 
1.7.7.rc3.4.g8d714

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 3/3] KVM: PPC: e500: use hardware hint when loading TLB0
@ 2011-11-29 20:40     ` Scott Wood
  0 siblings, 0 replies; 14+ messages in thread
From: Scott Wood @ 2011-11-29 20:40 UTC (permalink / raw)
  To: agraf; +Cc: kvm, kvm-ppc

The hardware maintains a per-set next victim hint.  Using this
reduces conflicts, especially on e500v2 where a single guest
TLB entry is mapped to two shadow TLB entries (user and kernel).
We want those two entries to go to different TLB ways.

sesel is now only used for TLB1.

Reported-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2:
 - avoid duplication of constants in MAS0_ESEL()
 - fix comment typo s/it's right set/it's in the right set/
 - consistently use u32 for mas0

 arch/powerpc/include/asm/mmu-book3e.h |    5 +-
 arch/powerpc/kvm/e500_tlb.c           |   69 ++++++++++++++++++--------------
 2 files changed, 42 insertions(+), 32 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 4c30de3..307fac3 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -41,9 +41,10 @@
 /* MAS registers bit definitions */
 
 #define MAS0_TLBSEL(x)		(((x) << 28) & 0x30000000)
-#define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
-#define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_ESEL_MASK		0x0FFF0000
+#define MAS0_ESEL_SHIFT		16
+#define MAS0_ESEL(x)		(((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
+#define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
 #define MAS0_WQ_COND		0x00001000
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index d041f5e..6e53e41 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -299,17 +299,41 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
 	                              stlbe->mas2, stlbe->mas7_3);
 }
 
-/* esel is index into set, not whole array */
+/*
+ * Acquire a mas0 with victim hint, as if we just took a TLB miss.
+ *
+ * We don't care about the address we're searching for, other than that it's
+ * in the right set and is not present in the TLB.  Using a zero PID and a
+ * userspace address means we don't have to set and then restore MAS5, or
+ * calculate a proper MAS6 value.
+ */
+static u32 get_host_mas0(unsigned long eaddr)
+{
+	unsigned long flags;
+	u32 mas0;
+
+	local_irq_save(flags);
+	mtspr(SPRN_MAS6, 0);
+	asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
+	mas0 = mfspr(SPRN_MAS0);
+	local_irq_restore(flags);
+
+	return mas0;
+}
+
+/* sesel is for tlb1 only */
 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
-		int tlbsel, int esel, struct kvm_book3e_206_tlb_entry *stlbe)
+		int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 {
+	u32 mas0;
+
 	if (tlbsel = 0) {
-		int way = esel & (vcpu_e500->gtlb_params[0].ways - 1);
-		__write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(way));
+		mas0 = get_host_mas0(stlbe->mas2);
+		__write_host_tlbe(stlbe, mas0);
 	} else {
 		__write_host_tlbe(stlbe,
 				  MAS0_TLBSEL(1) |
-				  MAS0_ESEL(to_htlb1_esel(esel)));
+				  MAS0_ESEL(to_htlb1_esel(sesel)));
 	}
 }
 
@@ -424,12 +448,6 @@ static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
 			     vcpu_e500->gtlb_params[0].ways);
 }
 
-static int htlb0_set_base(gva_t addr)
-{
-	return tlb0_set_base(addr, host_tlb_params[0].sets,
-			     host_tlb_params[0].ways);
-}
-
 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -587,10 +605,9 @@ static inline void kvmppc_e500_setup_stlbe(
 				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 }
 
-/* sesel is an index into the entire array, not just the set */
 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 	u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
-	int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe,
+	int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 	struct tlbe_ref *ref)
 {
 	struct kvm_memory_slot *slot;
@@ -723,27 +740,19 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-				int esel,
-				struct kvm_book3e_206_tlb_entry *stlbe)
+static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
+				 int esel,
+				 struct kvm_book3e_206_tlb_entry *stlbe)
 {
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 	struct tlbe_ref *ref;
-	int sesel = esel & (host_tlb_params[0].ways - 1);
-	int sesel_base;
-	gva_t ea;
 
 	gtlbe = get_entry(vcpu_e500, 0, esel);
 	ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 
-	ea = get_tlb_eaddr(gtlbe);
-	sesel_base = htlb0_set_base(ea);
-
 	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
-			gtlbe, 0, sesel_base + sesel, stlbe, ref);
-
-	return sesel;
+			gtlbe, 0, stlbe, ref);
 }
 
 /* Caller must ensure that the specified guest TLB entry is safe to insert into
@@ -762,8 +771,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 		vcpu_e500->host_tlb1_nv = 0;
 
 	ref = &vcpu_e500->tlb_refs[1][victim];
-	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
-			       victim, stlbe, ref);
+	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
 
 	return victim;
 }
@@ -912,7 +920,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 	return EMULATE_DONE;
 }
 
-/* sesel is index into the set, not the whole array */
+/* sesel is for tlb1 only */
 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 			struct kvm_book3e_206_tlb_entry *gtlbe,
 			struct kvm_book3e_206_tlb_entry *stlbe,
@@ -965,7 +973,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 
 			stlbsel = 0;
-			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
+			sesel = 0; /* unused */
 
 			break;
 
@@ -1054,7 +1063,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 	switch (tlbsel) {
 	case 0:
 		stlbsel = 0;
-		sesel = esel & (host_tlb_params[0].ways - 1);
+		sesel = 0; /* unused */
 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 
 		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
-- 
1.7.7.rc3.4.g8d714


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
  2011-11-29  1:20   ` Scott Wood
@ 2011-12-19 13:28     ` Alexander Graf
  -1 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2011-12-19 13:28 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm, kvm-ppc


On 29.11.2011, at 02:20, Scott Wood wrote:

> The associativity, not just total size, can differ from the host
> hardware.
> 
> Signed-off-by: Scott Wood <scottwood@freescale.com>

Thanks, applied to kvm-ppc-next.


Alex

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/3] KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB
@ 2011-12-19 13:28     ` Alexander Graf
  0 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2011-12-19 13:28 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm, kvm-ppc


On 29.11.2011, at 02:20, Scott Wood wrote:

> The associativity, not just total size, can differ from the host
> hardware.
> 
> Signed-off-by: Scott Wood <scottwood@freescale.com>

Thanks, applied to kvm-ppc-next.


Alex


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/3] KVM: booke: Add booke206 TLB trace
  2011-11-29  1:20   ` Scott Wood
@ 2011-12-19 13:29     ` Alexander Graf
  -1 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2011-12-19 13:29 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm list, kvm-ppc, Liu Yu-B13201

On 29.11.2011, at 02:20, Scott Wood wrote:

> From: Liu Yu <yu.liu@freescale.com>
> 

Missing patch description.

Alex

> Signed-off-by: Liu Yu <yu.liu@freescale.com>
> [scottwood@freescale.com: made mas2 64-bit, and added mas8 init]
> Signed-off-by: Scott Wood <scottwood@freescale.com>
> ---
> arch/powerpc/kvm/e500_tlb.c |   10 ++++---
> arch/powerpc/kvm/trace.h    |   57 +++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 63 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
> index 5073768..d041f5e 100644
> --- a/arch/powerpc/kvm/e500_tlb.c
> +++ b/arch/powerpc/kvm/e500_tlb.c
> @@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
> 	mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
> 	asm volatile("isync; tlbwe" : : : "memory");
> 	local_irq_restore(flags);
> +
> +	trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
> +	                              stlbe->mas2, stlbe->mas7_3);
> }
> 
> /* esel is index into set, not whole array */
> @@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
> 				  MAS0_TLBSEL(1) |
> 				  MAS0_ESEL(to_htlb1_esel(esel)));
> 	}
> -	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
> -			     (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
> }
> 
> void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> @@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
> 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
> 		       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
> +	magic.mas8 = 0;
> 
> 	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
> 	preempt_enable();
> @@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
> 	gtlbe->mas2 = vcpu->arch.shared->mas2;
> 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
> 
> -	trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
> -			     (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
> +	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
> +	                              gtlbe->mas2, gtlbe->mas7_3);
> 
> 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
> 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
> diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
> index b135d3d..f2ea44b 100644
> --- a/arch/powerpc/kvm/trace.h
> +++ b/arch/powerpc/kvm/trace.h
> @@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
> 
> #endif /* CONFIG_PPC_BOOK3S */
> 
> +
> +/*************************************************************************
> + *                         Book3E trace points                           *
> + *************************************************************************/
> +
> +#ifdef CONFIG_BOOKE
> +
> +TRACE_EVENT(kvm_booke206_stlb_write,
> +	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
> +	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
> +
> +	TP_STRUCT__entry(
> +		__field(	__u32,	mas0		)
> +		__field(	__u32,	mas8		)
> +		__field(	__u32,	mas1		)
> +		__field(	__u64,	mas2		)
> +		__field(	__u64,	mas7_3		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->mas0		= mas0;
> +		__entry->mas8		= mas8;
> +		__entry->mas1		= mas1;
> +		__entry->mas2		= mas2;
> +		__entry->mas7_3		= mas7_3;
> +	),
> +
> +	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
> +		__entry->mas0, __entry->mas8, __entry->mas1,
> +		__entry->mas2, __entry->mas7_3)
> +);
> +
> +TRACE_EVENT(kvm_booke206_gtlb_write,
> +	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
> +	TP_ARGS(mas0, mas1, mas2, mas7_3),
> +
> +	TP_STRUCT__entry(
> +		__field(	__u32,	mas0		)
> +		__field(	__u32,	mas1		)
> +		__field(	__u64,	mas2		)
> +		__field(	__u64,	mas7_3		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->mas0		= mas0;
> +		__entry->mas1		= mas1;
> +		__entry->mas2		= mas2;
> +		__entry->mas7_3		= mas7_3;
> +	),
> +
> +	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
> +		__entry->mas0, __entry->mas1,
> +		__entry->mas2, __entry->mas7_3)
> +);
> +
> +#endif
> +
> #endif /* _TRACE_KVM_H */
> 
> /* This part must be outside protection */
> -- 
> 1.7.7.rc3.4.g8d714
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/3] KVM: booke: Add booke206 TLB trace
@ 2011-12-19 13:29     ` Alexander Graf
  0 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2011-12-19 13:29 UTC (permalink / raw)
  To: Scott Wood; +Cc: kvm list, kvm-ppc, Liu Yu-B13201

On 29.11.2011, at 02:20, Scott Wood wrote:

> From: Liu Yu <yu.liu@freescale.com>
> 

Missing patch description.

Alex

> Signed-off-by: Liu Yu <yu.liu@freescale.com>
> [scottwood@freescale.com: made mas2 64-bit, and added mas8 init]
> Signed-off-by: Scott Wood <scottwood@freescale.com>
> ---
> arch/powerpc/kvm/e500_tlb.c |   10 ++++---
> arch/powerpc/kvm/trace.h    |   57 +++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 63 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
> index 5073768..d041f5e 100644
> --- a/arch/powerpc/kvm/e500_tlb.c
> +++ b/arch/powerpc/kvm/e500_tlb.c
> @@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
> 	mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
> 	asm volatile("isync; tlbwe" : : : "memory");
> 	local_irq_restore(flags);
> +
> +	trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
> +	                              stlbe->mas2, stlbe->mas7_3);
> }
> 
> /* esel is index into set, not whole array */
> @@ -308,8 +311,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
> 				  MAS0_TLBSEL(1) |
> 				  MAS0_ESEL(to_htlb1_esel(esel)));
> 	}
> -	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
> -			     (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32));
> }
> 
> void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> @@ -331,6 +332,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
> 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
> 		       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
> +	magic.mas8 = 0;
> 
> 	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
> 	preempt_enable();
> @@ -946,8 +948,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
> 	gtlbe->mas2 = vcpu->arch.shared->mas2;
> 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
> 
> -	trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
> -			     (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
> +	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
> +	                              gtlbe->mas2, gtlbe->mas7_3);
> 
> 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
> 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
> diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
> index b135d3d..f2ea44b 100644
> --- a/arch/powerpc/kvm/trace.h
> +++ b/arch/powerpc/kvm/trace.h
> @@ -337,6 +337,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
> 
> #endif /* CONFIG_PPC_BOOK3S */
> 
> +
> +/*************************************************************************
> + *                         Book3E trace points                           *
> + *************************************************************************/
> +
> +#ifdef CONFIG_BOOKE
> +
> +TRACE_EVENT(kvm_booke206_stlb_write,
> +	TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
> +	TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
> +
> +	TP_STRUCT__entry(
> +		__field(	__u32,	mas0		)
> +		__field(	__u32,	mas8		)
> +		__field(	__u32,	mas1		)
> +		__field(	__u64,	mas2		)
> +		__field(	__u64,	mas7_3		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->mas0		= mas0;
> +		__entry->mas8		= mas8;
> +		__entry->mas1		= mas1;
> +		__entry->mas2		= mas2;
> +		__entry->mas7_3		= mas7_3;
> +	),
> +
> +	TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
> +		__entry->mas0, __entry->mas8, __entry->mas1,
> +		__entry->mas2, __entry->mas7_3)
> +);
> +
> +TRACE_EVENT(kvm_booke206_gtlb_write,
> +	TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
> +	TP_ARGS(mas0, mas1, mas2, mas7_3),
> +
> +	TP_STRUCT__entry(
> +		__field(	__u32,	mas0		)
> +		__field(	__u32,	mas1		)
> +		__field(	__u64,	mas2		)
> +		__field(	__u64,	mas7_3		)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->mas0		= mas0;
> +		__entry->mas1		= mas1;
> +		__entry->mas2		= mas2;
> +		__entry->mas7_3		= mas7_3;
> +	),
> +
> +	TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
> +		__entry->mas0, __entry->mas1,
> +		__entry->mas2, __entry->mas7_3)
> +);
> +
> +#endif
> +
> #endif /* _TRACE_KVM_H */
> 
> /* This part must be outside protection */
> -- 
> 1.7.7.rc3.4.g8d714
> 
> 
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2011-12-19 13:29 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-11-29  1:19 [PATCH 0/3] KVM: PPC: e500: misc MMU stuff Scott Wood
2011-11-29  1:19 ` Scott Wood
2011-11-29  1:20 ` [PATCH 1/3] KVM: PPC: e500: Fix TLBnCFG in KVM_CONFIG_TLB Scott Wood
2011-11-29  1:20   ` Scott Wood
2011-12-19 13:28   ` Alexander Graf
2011-12-19 13:28     ` Alexander Graf
2011-11-29  1:20 ` [PATCH 2/3] KVM: booke: Add booke206 TLB trace Scott Wood
2011-11-29  1:20   ` Scott Wood
2011-12-19 13:29   ` Alexander Graf
2011-12-19 13:29     ` Alexander Graf
2011-11-29  1:20 ` [PATCH 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 entries Scott Wood
2011-11-29  1:20   ` [PATCH 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 Scott Wood
2011-11-29 20:40   ` [PATCH v2 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 entries Scott Wood
2011-11-29 20:40     ` [PATCH v2 3/3] KVM: PPC: e500: use hardware hint when loading TLB0 Scott Wood

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.