All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
@ 2011-08-26 23:31 Scott Wood
  2011-09-05 22:28 ` Alexander Graf
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Scott Wood @ 2011-08-26 23:31 UTC (permalink / raw)
  To: kvm-ppc

Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/include/asm/kvm_e500.h |    8 --
 arch/powerpc/include/asm/kvm_host.h |    2 -
 arch/powerpc/include/asm/kvm_para.h |   28 +++++-
 arch/powerpc/kernel/asm-offsets.c   |    9 ++
 arch/powerpc/kernel/kvm.c           |  201 +++++++++++++++++++++++++++++------
 arch/powerpc/kvm/booke.c            |    7 +-
 arch/powerpc/kvm/booke_emulate.c    |    4 +-
 arch/powerpc/kvm/e500.c             |   24 ++--
 arch/powerpc/kvm/e500_emulate.c     |   38 ++++---
 arch/powerpc/kvm/e500_tlb.c         |   83 ++++++++-------
 arch/powerpc/kvm/e500_tlb.h         |   25 ++---
 arch/powerpc/kvm/emulate.c          |    3 +-
 arch/powerpc/kvm/powerpc.c          |    2 +-
 13 files changed, 299 insertions(+), 135 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index bc17441..8cd50a5 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -71,14 +71,6 @@ struct kvmppc_vcpu_e500 {
 	u32 pid[E500_PID_NUM];
 	u32 svr;
 
-	u32 mas0;
-	u32 mas1;
-	u32 mas2;
-	u64 mas7_3;
-	u32 mas4;
-	u32 mas5;
-	u32 mas6;
-
 	/* vcpu id table */
 	struct vcpu_id_table *idt;
 
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index cc22b28..3305af4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -329,7 +329,6 @@ struct kvm_vcpu_arch {
 	ulong mcsrr0;
 	ulong mcsrr1;
 	ulong mcsr;
-	ulong esr;
 	u32 dec;
 	u32 decar;
 	u32 tbl;
@@ -338,7 +337,6 @@ struct kvm_vcpu_arch {
 	u32 tsr;
 	u32 ivor[64];
 	ulong ivpr;
-	u32 pir;
 	u32 pvr;
 
 	u32 shadow_pid;
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 50533f9..e04b4a5 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -33,11 +33,32 @@ struct kvm_vcpu_arch_shared {
 	__u64 sprg3;
 	__u64 srr0;
 	__u64 srr1;
-	__u64 dar;
+	__u64 dar;		/* dear on BookE */
 	__u64 msr;
 	__u32 dsisr;
 	__u32 int_pending;	/* Tells the guest if we have an interrupt */
 	__u32 sr[16];
+	__u32 mas0;
+	__u32 mas1;
+	__u64 mas7_3;
+	__u64 mas2;
+	__u32 mas4;
+	__u32 mas6;
+	__u32 esr;
+	__u32 pir;
+
+	/*
+	 * SPRG4-7 are user-readable, so we can't keep these
+	 * consistent between the magic page and the real
+	 * registers.  We provide space in case the guest
+	 * can deal with this.
+	 *
+	 * This also applies to SPRG3 on some chips.
+	 */
+	__u64 sprg4;
+	__u64 sprg5;
+	__u64 sprg6;
+	__u64 sprg7;
 };
 
 #define KVM_SC_MAGIC_R0		0x4b564d21 /* "KVM!" */
@@ -47,7 +68,10 @@ struct kvm_vcpu_arch_shared {
 
 #define KVM_FEATURE_MAGIC_PAGE	1
 
-#define KVM_MAGIC_FEAT_SR	(1 << 0)
+#define KVM_MAGIC_FEAT_SR		(1 << 0)
+
+/* MASn, ESR, PIR, and high SPRGs */
+#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7	(1 << 1)
 
 #ifdef __KERNEL__
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 5f078bc..34da20d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -431,6 +431,15 @@ int main(void)
 	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
 	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
 
+#ifdef CONFIG_FSL_BOOKE
+	DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
+	DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
+	DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
+	DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
+	DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
+	DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
+#endif
+
 	/* book3s */
 #ifdef CONFIG_KVM_BOOK3S_64_HV
 	DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index e50c683..eb95a03 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -48,23 +48,14 @@
 #define KVM_RT_30		0x03c00000
 #define KVM_MASK_RB		0x0000f800
 #define KVM_INST_MFMSR		0x7c0000a6
-#define KVM_INST_MFSPR_SPRG0	0x7c1042a6
-#define KVM_INST_MFSPR_SPRG1	0x7c1142a6
-#define KVM_INST_MFSPR_SPRG2	0x7c1242a6
-#define KVM_INST_MFSPR_SPRG3	0x7c1342a6
-#define KVM_INST_MFSPR_SRR0	0x7c1a02a6
-#define KVM_INST_MFSPR_SRR1	0x7c1b02a6
-#define KVM_INST_MFSPR_DAR	0x7c1302a6
-#define KVM_INST_MFSPR_DSISR	0x7c1202a6
-
-#define KVM_INST_MTSPR_SPRG0	0x7c1043a6
-#define KVM_INST_MTSPR_SPRG1	0x7c1143a6
-#define KVM_INST_MTSPR_SPRG2	0x7c1243a6
-#define KVM_INST_MTSPR_SPRG3	0x7c1343a6
-#define KVM_INST_MTSPR_SRR0	0x7c1a03a6
-#define KVM_INST_MTSPR_SRR1	0x7c1b03a6
-#define KVM_INST_MTSPR_DAR	0x7c1303a6
-#define KVM_INST_MTSPR_DSISR	0x7c1203a6
+
+#define SPR_FROM		0
+#define SPR_TO			0x100
+
+#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
+				    (((sprn) & 0x1f) << 16) | \
+				    (((sprn) & 0x3e0) << 6) | \
+				    (moveto))
 
 #define KVM_INST_TLBSYNC	0x7c00046c
 #define KVM_INST_MTMSRD_L0	0x7c000164
@@ -440,56 +431,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
 	case KVM_INST_MFMSR:
 		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG0:
+	case KVM_INST_SPR(SPRN_SPRG0, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG1:
+	case KVM_INST_SPR(SPRN_SPRG1, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG2:
+	case KVM_INST_SPR(SPRN_SPRG2, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SPRG3:
+	case KVM_INST_SPR(SPRN_SPRG3, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SRR0:
+	case KVM_INST_SPR(SPRN_SRR0, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
 		break;
-	case KVM_INST_MFSPR_SRR1:
+	case KVM_INST_SPR(SPRN_SRR1, SPR_FROM):
 		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
 		break;
-	case KVM_INST_MFSPR_DAR:
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_DEAR, SPR_FROM):
+#else
+	case KVM_INST_SPR(SPRN_DAR, SPR_FROM):
+#endif
 		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
 		break;
-	case KVM_INST_MFSPR_DSISR:
+	case KVM_INST_SPR(SPRN_DSISR, SPR_FROM):
 		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
 		break;
 
+#ifdef CONFIG_PPC_BOOK3E_MMU
+	case KVM_INST_SPR(SPRN_MAS0, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS1, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS2, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS3, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS4, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS6, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS7, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
+		break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+	case KVM_INST_SPR(SPRN_SPRG4, SPR_FROM):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_SPRG4R, SPR_FROM):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG5, SPR_FROM):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_SPRG5R, SPR_FROM):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG6, SPR_FROM):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_SPRG6R, SPR_FROM):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG7, SPR_FROM):
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_SPRG7R, SPR_FROM):
+#endif
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
+		break;
+
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_ESR, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
+		break;
+#endif
+
+	case KVM_INST_SPR(SPRN_PIR, SPR_FROM):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
+		break;
+
+
 	/* Stores */
-	case KVM_INST_MTSPR_SPRG0:
+	case KVM_INST_SPR(SPRN_SPRG0, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG1:
+	case KVM_INST_SPR(SPRN_SPRG1, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG2:
+	case KVM_INST_SPR(SPRN_SPRG2, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SPRG3:
+	case KVM_INST_SPR(SPRN_SPRG3, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SRR0:
+	case KVM_INST_SPR(SPRN_SRR0, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
 		break;
-	case KVM_INST_MTSPR_SRR1:
+	case KVM_INST_SPR(SPRN_SRR1, SPR_TO):
 		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
 		break;
-	case KVM_INST_MTSPR_DAR:
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_DEAR, SPR_TO):
+#else
+	case KVM_INST_SPR(SPRN_DAR, SPR_TO):
+#endif
 		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
 		break;
-	case KVM_INST_MTSPR_DSISR:
+	case KVM_INST_SPR(SPRN_DSISR, SPR_TO):
 		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
 		break;
+#ifdef CONFIG_PPC_BOOK3E_MMU
+	case KVM_INST_SPR(SPRN_MAS0, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS1, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS2, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS3, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS4, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS6, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_MAS7, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
+		break;
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+
+	case KVM_INST_SPR(SPRN_SPRG4, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG5, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG6, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
+		break;
+	case KVM_INST_SPR(SPRN_SPRG7, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
+		break;
+
+#ifdef CONFIG_BOOKE
+	case KVM_INST_SPR(SPRN_ESR, SPR_TO):
+		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
+			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
+		break;
+#endif
 
 	/* Nops */
 	case KVM_INST_TLBSYNC:
@@ -556,9 +682,18 @@ static void kvm_use_magic_page(void)
 	start = (void*)_stext;
 	end = (void*)_etext;
 
+	/*
+	 * Being interrupted in the middle of patching would
+	 * be bad for SPRG4-7, which KVM can't keep in sync
+	 * with emulated accesses because reads don't trap.
+	 */
+	local_irq_disable();
+
 	for (p = start; p < end; p++)
 		kvm_check_ins(p, features);
 
+	local_irq_enable();
+
 	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
 			 kvm_patching_worked ? "worked" : "failed");
 }
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index aeb69b2..0ed62c1 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -276,7 +276,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
 		vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
 		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
 		if (update_esr = true)
-			vcpu->arch.esr = vcpu->arch.queued_esr;
+			vcpu->arch.shared->esr = vcpu->arch.queued_esr;
 		if (update_dear = true)
 			vcpu->arch.shared->dar = vcpu->arch.queued_dear;
 		kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -618,6 +618,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 	vcpu->arch.pc = 0;
 	vcpu->arch.shared->msr = 0;
 	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+	vcpu->arch.shared->pir = vcpu->vcpu_id;
 	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
 
 	vcpu->arch.shadow_pid = 1;
@@ -699,7 +700,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
 	sregs->u.e.mcsr = vcpu->arch.mcsr;
-	sregs->u.e.esr = vcpu->arch.esr;
+	sregs->u.e.esr = vcpu->arch.shared->esr;
 	sregs->u.e.dear = vcpu->arch.shared->dar;
 	sregs->u.e.tsr = vcpu->arch.tsr;
 	sregs->u.e.tcr = vcpu->arch.tcr;
@@ -717,7 +718,7 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
 	vcpu->arch.mcsr = sregs->u.e.mcsr;
-	vcpu->arch.esr = sregs->u.e.esr;
+	vcpu->arch.shared->esr = sregs->u.e.esr;
 	vcpu->arch.shared->dar = sregs->u.e.dear;
 	vcpu->arch.vrsave = sregs->u.e.vrsave;
 	vcpu->arch.tcr = sregs->u.e.tcr;
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 1260f5f..162b7ac 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -107,7 +107,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
 	case SPRN_DEAR:
 		vcpu->arch.shared->dar = spr_val; break;
 	case SPRN_ESR:
-		vcpu->arch.esr = spr_val; break;
+		vcpu->arch.shared->esr = spr_val; break;
 	case SPRN_DBCR0:
 		vcpu->arch.dbcr0 = spr_val; break;
 	case SPRN_DBCR1:
@@ -202,7 +202,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
 	case SPRN_DEAR:
 		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
 	case SPRN_ESR:
-		kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
 	case SPRN_DBCR0:
 		kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
 	case SPRN_DBCR1:
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index e8f5ec2..52c6872 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -112,12 +112,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
 	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 
-	sregs->u.e.mas0 = vcpu_e500->mas0;
-	sregs->u.e.mas1 = vcpu_e500->mas1;
-	sregs->u.e.mas2 = vcpu_e500->mas2;
-	sregs->u.e.mas7_3 = vcpu_e500->mas7_3;
-	sregs->u.e.mas4 = vcpu_e500->mas4;
-	sregs->u.e.mas6 = vcpu_e500->mas6;
+	sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+	sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+	sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+	sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+	sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+	sregs->u.e.mas6 = vcpu->arch.shared->mas6;
 
 	sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
 	sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -145,12 +145,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 	}
 
 	if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
-		vcpu_e500->mas0 = sregs->u.e.mas0;
-		vcpu_e500->mas1 = sregs->u.e.mas1;
-		vcpu_e500->mas2 = sregs->u.e.mas2;
-		vcpu_e500->mas7_3 = sregs->u.e.mas7_3;
-		vcpu_e500->mas4 = sregs->u.e.mas4;
-		vcpu_e500->mas6 = sregs->u.e.mas6;
+		vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+		vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+		vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+		vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+		vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+		vcpu->arch.shared->mas6 = sregs->u.e.mas6;
 	}
 
 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index e0d3609..6d0b2bd 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,22 +89,22 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
 			return EMULATE_FAIL;
 		vcpu_e500->pid[2] = spr_val; break;
 	case SPRN_MAS0:
-		vcpu_e500->mas0 = spr_val; break;
+		vcpu->arch.shared->mas0 = spr_val; break;
 	case SPRN_MAS1:
-		vcpu_e500->mas1 = spr_val; break;
+		vcpu->arch.shared->mas1 = spr_val; break;
 	case SPRN_MAS2:
-		vcpu_e500->mas2 = spr_val; break;
+		vcpu->arch.shared->mas2 = spr_val; break;
 	case SPRN_MAS3:
-		vcpu_e500->mas7_3 &= ~(u64)0xffffffff;
-		vcpu_e500->mas7_3 |= spr_val;
+		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
+		vcpu->arch.shared->mas7_3 |= spr_val;
 		break;
 	case SPRN_MAS4:
-		vcpu_e500->mas4 = spr_val; break;
+		vcpu->arch.shared->mas4 = spr_val; break;
 	case SPRN_MAS6:
-		vcpu_e500->mas6 = spr_val; break;
+		vcpu->arch.shared->mas6 = spr_val; break;
 	case SPRN_MAS7:
-		vcpu_e500->mas7_3 &= (u64)0xffffffff;
-		vcpu_e500->mas7_3 |= (u64)spr_val << 32;
+		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
+		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
 		break;
 	case SPRN_L1CSR0:
 		vcpu_e500->l1csr0 = spr_val;
@@ -147,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 	int emulated = EMULATE_DONE;
+	unsigned long val;
 
 	switch (sprn) {
 	case SPRN_PID:
@@ -156,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
 	case SPRN_PID2:
 		kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
 	case SPRN_MAS0:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
 	case SPRN_MAS1:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
 	case SPRN_MAS2:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
 	case SPRN_MAS3:
-		kvmppc_set_gpr(vcpu, rt, (u32)vcpu_e500->mas7_3); break;
+		val = (u32)vcpu->arch.shared->mas7_3;
+		kvmppc_set_gpr(vcpu, rt, val);
+		break;
 	case SPRN_MAS4:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
 	case SPRN_MAS6:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
 	case SPRN_MAS7:
-		kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7_3 >> 32); break;
-
+		val = vcpu->arch.shared->mas7_3 >> 32;
+		kvmppc_set_gpr(vcpu, rt, val);
+		break;
 	case SPRN_TLB0CFG:
 		kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
 	case SPRN_TLB1CFG:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index ec17148..5d78fad 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -427,13 +427,14 @@ static int htlb0_set_base(gva_t addr)
 			     host_tlb_params[0].ways);
 }
 
-static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
+static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
 {
-	unsigned int esel = get_tlb_esel_bit(vcpu_e500);
+	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+	int esel = get_tlb_esel_bit(vcpu);
 
 	if (tlbsel = 0) {
 		esel &= vcpu_e500->gtlb_params[0].ways - 1;
-		esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
+		esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
 	} else {
 		esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
 	}
@@ -544,20 +545,20 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 	int tlbsel;
 
 	/* since we only have two TLBs, only lower bit is used. */
-	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
+	tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
 	victim = (tlbsel = 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
-	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
-	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
+	pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+	tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
 
-	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+	vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
+	vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 		| MAS1_TID(vcpu_e500->pid[pidsel])
 		| MAS1_TSIZE(tsized);
-	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
-		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
-	vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
-	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
+	vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
+		| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
+	vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+	vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
 		| (get_cur_pid(vcpu) << 16)
 		| (as ? MAS6_SAS : 0);
 }
@@ -820,15 +821,15 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 	int tlbsel, esel;
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 
-	tlbsel = get_tlb_tlbsel(vcpu_e500);
-	esel = get_tlb_esel(vcpu_e500, tlbsel);
+	tlbsel = get_tlb_tlbsel(vcpu);
+	esel = get_tlb_esel(vcpu, tlbsel);
 
 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
-	vcpu_e500->mas0 &= ~MAS0_NV(~0);
-	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-	vcpu_e500->mas1 = gtlbe->mas1;
-	vcpu_e500->mas2 = gtlbe->mas2;
-	vcpu_e500->mas7_3 = gtlbe->mas7_3;
+	vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
+	vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
+	vcpu->arch.shared->mas1 = gtlbe->mas1;
+	vcpu->arch.shared->mas2 = gtlbe->mas2;
+	vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 
 	return EMULATE_DONE;
 }
@@ -836,8 +837,8 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 {
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-	int as = !!get_cur_sas(vcpu_e500);
-	unsigned int pid = get_cur_spid(vcpu_e500);
+	int as = !!get_cur_sas(vcpu);
+	unsigned int pid = get_cur_spid(vcpu);
 	int esel, tlbsel;
 	struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
 	gva_t ea;
@@ -855,26 +856,30 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 	if (gtlbe) {
 		esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
 
-		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
+		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-		vcpu_e500->mas1 = gtlbe->mas1;
-		vcpu_e500->mas2 = gtlbe->mas2;
-		vcpu_e500->mas7_3 = gtlbe->mas7_3;
+		vcpu->arch.shared->mas1 = gtlbe->mas1;
+		vcpu->arch.shared->mas2 = gtlbe->mas2;
+		vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 	} else {
 		int victim;
 
 		/* since we only have two TLBs, only lower bit is used. */
-		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
+		tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
 		victim = (tlbsel = 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 
-		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
+			| MAS0_ESEL(victim)
 			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
-		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
-			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
-			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
-		vcpu_e500->mas2 &= MAS2_EPN;
-		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
-		vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+		vcpu->arch.shared->mas1 +			  (vcpu->arch.shared->mas6 & MAS6_SPID0)
+			| (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+			| (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
+		vcpu->arch.shared->mas2 &= MAS2_EPN;
+		vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
+					   MAS2_ATTRIB_MASK;
+		vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
+					     MAS3_U2 | MAS3_U3;
 	}
 
 	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
@@ -905,19 +910,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 	struct kvm_book3e_206_tlb_entry *gtlbe;
 	int tlbsel, esel;
 
-	tlbsel = get_tlb_tlbsel(vcpu_e500);
-	esel = get_tlb_esel(vcpu_e500, tlbsel);
+	tlbsel = get_tlb_tlbsel(vcpu);
+	esel = get_tlb_esel(vcpu, tlbsel);
 
 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 
 	if (get_tlb_v(gtlbe))
 		inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 
-	gtlbe->mas1 = vcpu_e500->mas1;
-	gtlbe->mas2 = vcpu_e500->mas2;
-	gtlbe->mas7_3 = vcpu_e500->mas7_3;
+	gtlbe->mas1 = vcpu->arch.shared->mas1;
+	gtlbe->mas2 = vcpu->arch.shared->mas2;
+	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 
-	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
+	trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
 			     (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
 
 	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 2c29640..5c6d2d7 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -121,38 +121,33 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
 	return !!(vcpu->arch.shared->msr & MSR_PR);
 }
 
-static inline unsigned int get_cur_spid(
-		const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
 {
-	return (vcpu_e500->mas6 >> 16) & 0xff;
+	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
 }
 
-static inline unsigned int get_cur_sas(
-		const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
 {
-	return vcpu_e500->mas6 & 0x1;
+	return vcpu->arch.shared->mas6 & 0x1;
 }
 
-static inline unsigned int get_tlb_tlbsel(
-		const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
 {
 	/*
 	 * Manual says that tlbsel has 2 bits wide.
 	 * Since we only have two TLBs, only lower bit is used.
 	 */
-	return (vcpu_e500->mas0 >> 28) & 0x1;
+	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
 }
 
-static inline unsigned int get_tlb_nv_bit(
-		const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
 {
-	return vcpu_e500->mas0 & 0xfff;
+	return vcpu->arch.shared->mas0 & 0xfff;
 }
 
-static inline unsigned int get_tlb_esel_bit(
-		const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
 {
-	return (vcpu_e500->mas0 >> 16) & 0xfff;
+	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
 }
 
 static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 141dce3..dc8259d 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -159,7 +159,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 	case OP_TRAP_64:
 		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 #else
-		kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
+		kvmppc_core_queue_program(vcpu,
+					  vcpu->arch.shared->esr | ESR_PTR);
 #endif
 		advance = 0;
 		break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index eb2d10a..0ee0b80 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -71,7 +71,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 		vcpu->arch.magic_page_pa = param1;
 		vcpu->arch.magic_page_ea = param2;
 
-		r2 = KVM_MAGIC_FEAT_SR;
+		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
 
 		r = HC_EV_SUCCESS;
 		break;
-- 
1.7.6



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
  2011-08-26 23:31 [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn Scott Wood
@ 2011-09-05 22:28 ` Alexander Graf
  2011-09-15 18:26 ` Scott Wood
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Alexander Graf @ 2011-09-05 22:28 UTC (permalink / raw)
  To: kvm-ppc


On 27.08.2011, at 01:31, Scott Wood wrote:

> Signed-off-by: Scott Wood <scottwood@freescale.com>

Patch description missing.

> ---
> arch/powerpc/include/asm/kvm_e500.h |    8 --
> arch/powerpc/include/asm/kvm_host.h |    2 -
> arch/powerpc/include/asm/kvm_para.h |   28 +++++-
> arch/powerpc/kernel/asm-offsets.c   |    9 ++
> arch/powerpc/kernel/kvm.c           |  201 +++++++++++++++++++++++++++++------
> arch/powerpc/kvm/booke.c            |    7 +-
> arch/powerpc/kvm/booke_emulate.c    |    4 +-
> arch/powerpc/kvm/e500.c             |   24 ++--
> arch/powerpc/kvm/e500_emulate.c     |   38 ++++---
> arch/powerpc/kvm/e500_tlb.c         |   83 ++++++++-------
> arch/powerpc/kvm/e500_tlb.h         |   25 ++---
> arch/powerpc/kvm/emulate.c          |    3 +-
> arch/powerpc/kvm/powerpc.c          |    2 +-
> 13 files changed, 299 insertions(+), 135 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
> index bc17441..8cd50a5 100644
> --- a/arch/powerpc/include/asm/kvm_e500.h
> +++ b/arch/powerpc/include/asm/kvm_e500.h
> @@ -71,14 +71,6 @@ struct kvmppc_vcpu_e500 {
> 	u32 pid[E500_PID_NUM];
> 	u32 svr;
> 
> -	u32 mas0;
> -	u32 mas1;
> -	u32 mas2;
> -	u64 mas7_3;
> -	u32 mas4;
> -	u32 mas5;
> -	u32 mas6;
> -
> 	/* vcpu id table */
> 	struct vcpu_id_table *idt;
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index cc22b28..3305af4 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -329,7 +329,6 @@ struct kvm_vcpu_arch {
> 	ulong mcsrr0;
> 	ulong mcsrr1;
> 	ulong mcsr;
> -	ulong esr;
> 	u32 dec;
> 	u32 decar;
> 	u32 tbl;
> @@ -338,7 +337,6 @@ struct kvm_vcpu_arch {
> 	u32 tsr;
> 	u32 ivor[64];
> 	ulong ivpr;
> -	u32 pir;
> 	u32 pvr;
> 
> 	u32 shadow_pid;
> diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
> index 50533f9..e04b4a5 100644
> --- a/arch/powerpc/include/asm/kvm_para.h
> +++ b/arch/powerpc/include/asm/kvm_para.h
> @@ -33,11 +33,32 @@ struct kvm_vcpu_arch_shared {
> 	__u64 sprg3;
> 	__u64 srr0;
> 	__u64 srr1;
> -	__u64 dar;
> +	__u64 dar;		/* dear on BookE */
> 	__u64 msr;
> 	__u32 dsisr;
> 	__u32 int_pending;	/* Tells the guest if we have an interrupt */
> 	__u32 sr[16];
> +	__u32 mas0;
> +	__u32 mas1;
> +	__u64 mas7_3;
> +	__u64 mas2;
> +	__u32 mas4;
> +	__u32 mas6;
> +	__u32 esr;
> +	__u32 pir;
> +
> +	/*
> +	 * SPRG4-7 are user-readable, so we can't keep these
> +	 * consistent between the magic page and the real
> +	 * registers.  We provide space in case the guest
> +	 * can deal with this.
> +	 *
> +	 * This also applies to SPRG3 on some chips.
> +	 */
> +	__u64 sprg4;
> +	__u64 sprg5;
> +	__u64 sprg6;
> +	__u64 sprg7;

Hrm. You're touching sprg4-7 but don't remove the fields from vcpu->arch. That sounds wrong. Also, the entry/exit code needs to use these now instead of the vcpu struct fields to restore the correct values.

> };
> 
> #define KVM_SC_MAGIC_R0		0x4b564d21 /* "KVM!" */
> @@ -47,7 +68,10 @@ struct kvm_vcpu_arch_shared {
> 
> #define KVM_FEATURE_MAGIC_PAGE	1
> 
> -#define KVM_MAGIC_FEAT_SR	(1 << 0)
> +#define KVM_MAGIC_FEAT_SR		(1 << 0)
> +
> +/* MASn, ESR, PIR, and high SPRGs */
> +#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7	(1 << 1)
> 
> #ifdef __KERNEL__
> 
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index 5f078bc..34da20d 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -431,6 +431,15 @@ int main(void)
> 	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
> 	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
> 
> +#ifdef CONFIG_FSL_BOOKE
> +	DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
> +	DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
> +	DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
> +	DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
> +	DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
> +	DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
> +#endif

While I agree that they only make sense on BookE, the fields in the ABI are not #ifdef'ed, so I don't see why the asm-offsets fields should be.

> +
> 	/* book3s */
> #ifdef CONFIG_KVM_BOOK3S_64_HV
> 	DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
> diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
> index e50c683..eb95a03 100644
> --- a/arch/powerpc/kernel/kvm.c
> +++ b/arch/powerpc/kernel/kvm.c
> @@ -48,23 +48,14 @@
> #define KVM_RT_30		0x03c00000
> #define KVM_MASK_RB		0x0000f800
> #define KVM_INST_MFMSR		0x7c0000a6
> -#define KVM_INST_MFSPR_SPRG0	0x7c1042a6
> -#define KVM_INST_MFSPR_SPRG1	0x7c1142a6
> -#define KVM_INST_MFSPR_SPRG2	0x7c1242a6
> -#define KVM_INST_MFSPR_SPRG3	0x7c1342a6
> -#define KVM_INST_MFSPR_SRR0	0x7c1a02a6
> -#define KVM_INST_MFSPR_SRR1	0x7c1b02a6
> -#define KVM_INST_MFSPR_DAR	0x7c1302a6
> -#define KVM_INST_MFSPR_DSISR	0x7c1202a6
> -
> -#define KVM_INST_MTSPR_SPRG0	0x7c1043a6
> -#define KVM_INST_MTSPR_SPRG1	0x7c1143a6
> -#define KVM_INST_MTSPR_SPRG2	0x7c1243a6
> -#define KVM_INST_MTSPR_SPRG3	0x7c1343a6
> -#define KVM_INST_MTSPR_SRR0	0x7c1a03a6
> -#define KVM_INST_MTSPR_SRR1	0x7c1b03a6
> -#define KVM_INST_MTSPR_DAR	0x7c1303a6
> -#define KVM_INST_MTSPR_DSISR	0x7c1203a6
> +
> +#define SPR_FROM		0
> +#define SPR_TO			0x100
> +
> +#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
> +				    (((sprn) & 0x1f) << 16) | \
> +				    (((sprn) & 0x3e0) << 6) | \
> +				    (moveto))

#define KVM_INST_MFSPR(sprn) KVM_INST_MFSPR(sprn, SPR_FROM)
#define KVM_INST_MTSPR(sprn) KVM_INST_MFSPR(sprn, SPR_TO)

makes it more readable really :)
> 
> 

[...]

> @@ -618,6 +618,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
> 	vcpu->arch.pc = 0;
> 	vcpu->arch.shared->msr = 0;
> 	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
> +	vcpu->arch.shared->pir = vcpu->vcpu_id;

That one rings a bell. Are you sure this patch set is on top of the other one that fixes PIR?


Alex


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
  2011-08-26 23:31 [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn Scott Wood
  2011-09-05 22:28 ` Alexander Graf
@ 2011-09-15 18:26 ` Scott Wood
  2011-09-19  9:23 ` Alexander Graf
  2011-09-19 16:05 ` Scott Wood
  3 siblings, 0 replies; 5+ messages in thread
From: Scott Wood @ 2011-09-15 18:26 UTC (permalink / raw)
  To: kvm-ppc

On 09/05/2011 05:28 PM, Alexander Graf wrote:
>> +	/*
>> +	 * SPRG4-7 are user-readable, so we can't keep these
>> +	 * consistent between the magic page and the real
>> +	 * registers.  We provide space in case the guest
>> +	 * can deal with this.
>> +	 *
>> +	 * This also applies to SPRG3 on some chips.
>> +	 */
>> +	__u64 sprg4;
>> +	__u64 sprg5;
>> +	__u64 sprg6;
>> +	__u64 sprg7;
> 
> Hrm. You're touching sprg4-7 but don't remove the fields from vcpu->arch. That sounds wrong. Also, the entry/exit code needs to use these now instead of the vcpu struct fields to restore the correct values.

The original idea, as the comment states, was just to provide an area
that the guest could use for this, as we can't keep it fully synced with
the hardware registers since the hw regs don't trap on read, and the
paravirt doesn't trap on write.

However, I think it could work reasonably well to use this as the
backing store instead of vcpu->arch.sprg4-7.  The guest would still see
inconsistency if it writes to paravirt and then reads from the hw reg
without an intervening exit, so that restriction on use still applies,
but qemu would see the right thing in sregs, and we wouldn't have the
duplication.

>> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
>> index 5f078bc..34da20d 100644
>> --- a/arch/powerpc/kernel/asm-offsets.c
>> +++ b/arch/powerpc/kernel/asm-offsets.c
>> @@ -431,6 +431,15 @@ int main(void)
>> 	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
>> 	DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
>>
>> +#ifdef CONFIG_FSL_BOOKE
>> +	DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
>> +	DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
>> +	DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
>> +	DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
>> +	DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
>> +	DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
>> +#endif
> 
> While I agree that they only make sense on BookE, the fields in the ABI are not #ifdef'ed, so I don't see why the asm-offsets fields should be.

OK.

>> +#define SPR_FROM		0
>> +#define SPR_TO			0x100
>> +
>> +#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
>> +				    (((sprn) & 0x1f) << 16) | \
>> +				    (((sprn) & 0x3e0) << 6) | \
>> +				    (moveto))
> 
> #define KVM_INST_MFSPR(sprn) KVM_INST_MFSPR(sprn, SPR_FROM)
> #define KVM_INST_MTSPR(sprn) KVM_INST_MFSPR(sprn, SPR_TO)
> 
> makes it more readable really :)

OK.

>> @@ -618,6 +618,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
>> 	vcpu->arch.pc = 0;
>> 	vcpu->arch.shared->msr = 0;
>> 	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
>> +	vcpu->arch.shared->pir = vcpu->vcpu_id;
> 
> That one rings a bell. Are you sure this patch set is on top of the other one that fixes PIR?

Yes.  Now that it's paravirted we need to store it somewhere other than
just vcpu->vcpu_id.

-Scott


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
  2011-08-26 23:31 [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn Scott Wood
  2011-09-05 22:28 ` Alexander Graf
  2011-09-15 18:26 ` Scott Wood
@ 2011-09-19  9:23 ` Alexander Graf
  2011-09-19 16:05 ` Scott Wood
  3 siblings, 0 replies; 5+ messages in thread
From: Alexander Graf @ 2011-09-19  9:23 UTC (permalink / raw)
  To: kvm-ppc


On 15.09.2011, at 20:26, Scott Wood wrote:

> On 09/05/2011 05:28 PM, Alexander Graf wrote:
>>> +	/*
>>> +	 * SPRG4-7 are user-readable, so we can't keep these
>>> +	 * consistent between the magic page and the real
>>> +	 * registers.  We provide space in case the guest
>>> +	 * can deal with this.
>>> +	 *
>>> +	 * This also applies to SPRG3 on some chips.
>>> +	 */
>>> +	__u64 sprg4;
>>> +	__u64 sprg5;
>>> +	__u64 sprg6;
>>> +	__u64 sprg7;
>> 
>> Hrm. You're touching sprg4-7 but don't remove the fields from vcpu->arch. That sounds wrong. Also, the entry/exit code needs to use these now instead of the vcpu struct fields to restore the correct values.
> 
> The original idea, as the comment states, was just to provide an area
> that the guest could use for this, as we can't keep it fully synced with
> the hardware registers since the hw regs don't trap on read, and the
> paravirt doesn't trap on write.

Right, but the guest kernel can easily patch its own use and then still wants user space to see them for what they are. Syncing them manually from the kernel side is useless - we could just as well not PV them in the first place.

> However, I think it could work reasonably well to use this as the
> backing store instead of vcpu->arch.sprg4-7.  The guest would still see
> inconsistency if it writes to paravirt and then reads from the hw reg
> without an intervening exit, so that restriction on use still applies,
> but qemu would see the right thing in sregs, and we wouldn't have the
> duplication.

Yes, also guest user space would still be fine when guest kernel space writes to the PV reg.


Alex


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
  2011-08-26 23:31 [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn Scott Wood
                   ` (2 preceding siblings ...)
  2011-09-19  9:23 ` Alexander Graf
@ 2011-09-19 16:05 ` Scott Wood
  3 siblings, 0 replies; 5+ messages in thread
From: Scott Wood @ 2011-09-19 16:05 UTC (permalink / raw)
  To: kvm-ppc

On 09/19/2011 04:23 AM, Alexander Graf wrote:
> 
> On 15.09.2011, at 20:26, Scott Wood wrote:
> 
>> However, I think it could work reasonably well to use this as the
>> backing store instead of vcpu->arch.sprg4-7.  The guest would still see
>> inconsistency if it writes to paravirt and then reads from the hw reg
>> without an intervening exit, so that restriction on use still applies,
>> but qemu would see the right thing in sregs, and we wouldn't have the
>> duplication.
> 
> Yes, also guest user space would still be fine when guest kernel space writes to the PV reg.

We'd be relying on the fact that there's always an intervening exit to
do rfi without E.HV, and that the guest has direct read/write access
(thus no paravirt) with E.HV.  Will make a note of that in the comments.

-Scott


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2011-09-19 16:05 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-26 23:31 [PATCH 3/5] KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn Scott Wood
2011-09-05 22:28 ` Alexander Graf
2011-09-15 18:26 ` Scott Wood
2011-09-19  9:23 ` Alexander Graf
2011-09-19 16:05 ` Scott Wood

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.