linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/6] KVM: PPC: Book3E: AltiVec support
@ 2013-07-03 12:42 Mihai Caraman
  2013-07-03 12:42 ` [PATCH 1/6] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers Mihai Caraman
                   ` (5 more replies)
  0 siblings, 6 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Add KVM Book3E AltiVec support and enable e6500 core.

Mihai Caraman (6):
  KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers
  KVM: PPC: Book3E: Refactor SPE/FP exit handling
  KVM: PPC: Book3E: Increase FPU laziness
  KVM: PPC: Book3E: Add AltiVec support
  KVM: PPC: Book3E: Add ONE_REG AltiVec support
  KVM: PPC: Book3E: Enable e6500 core

 arch/powerpc/kvm/booke.c              |  211 +++++++++++++++++++++++++++------
 arch/powerpc/kvm/booke.h              |    5 +-
 arch/powerpc/kvm/bookehv_interrupts.S |    8 +-
 arch/powerpc/kvm/e500.c               |   10 +-
 arch/powerpc/kvm/e500_emulate.c       |    8 +-
 arch/powerpc/kvm/e500mc.c             |   12 ++-
 6 files changed, 201 insertions(+), 53 deletions(-)

-- 
1.7.3.4

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [PATCH 1/6] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  2013-07-03 12:42 ` [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling Mihai Caraman
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Use common BOOKE_IRQPRIO and BOOKE_INTERRUPT defines for SPE/FP/AltiVec
which share the same interrupts numbers.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c              |   16 ++++++++--------
 arch/powerpc/kvm/booke.h              |    4 ++--
 arch/powerpc/kvm/bookehv_interrupts.S |    8 ++++----
 arch/powerpc/kvm/e500.c               |   10 ++++++----
 arch/powerpc/kvm/e500_emulate.c       |    8 ++++----
 5 files changed, 24 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index d2fef74..fb47e85 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -362,8 +362,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
 	case BOOKE_IRQPRIO_ITLB_MISS:
 	case BOOKE_IRQPRIO_SYSCALL:
 	case BOOKE_IRQPRIO_FP_UNAVAIL:
-	case BOOKE_IRQPRIO_SPE_UNAVAIL:
-	case BOOKE_IRQPRIO_SPE_FP_DATA:
+	case BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL:
+	case BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST:
 	case BOOKE_IRQPRIO_SPE_FP_ROUND:
 	case BOOKE_IRQPRIO_AP_UNAVAIL:
 		allowed = 1;
@@ -944,18 +944,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		break;
 
 #ifdef CONFIG_SPE
-	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
 		if (vcpu->arch.shared->msr & MSR_SPE)
 			kvmppc_vcpu_enable_spe(vcpu);
 		else
 			kvmppc_booke_queue_irqprio(vcpu,
-						   BOOKE_IRQPRIO_SPE_UNAVAIL);
+						   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
 		r = RESUME_GUEST;
 		break;
 	}
 
-	case BOOKE_INTERRUPT_SPE_FP_DATA:
-		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
+	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
+		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
 		r = RESUME_GUEST;
 		break;
 
@@ -964,7 +964,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		r = RESUME_GUEST;
 		break;
 #else
-	case BOOKE_INTERRUPT_SPE_UNAVAIL:
+	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
 		/*
 		 * Guest wants SPE, but host kernel doesn't support it.  Send
 		 * an "unimplemented operation" program check to the guest.
@@ -977,7 +977,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 * These really should never happen without CONFIG_SPE,
 	 * as we should never enable the real MSR[SPE] in the guest.
 	 */
-	case BOOKE_INTERRUPT_SPE_FP_DATA:
+	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
 		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
 		       __func__, exit_nr, vcpu->arch.pc);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 5fd1ba6..9e92006 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -32,8 +32,8 @@
 #define BOOKE_IRQPRIO_ALIGNMENT 2
 #define BOOKE_IRQPRIO_PROGRAM 3
 #define BOOKE_IRQPRIO_FP_UNAVAIL 4
-#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
-#define BOOKE_IRQPRIO_SPE_FP_DATA 6
+#define BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL 5
+#define BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST 6
 #define BOOKE_IRQPRIO_SPE_FP_ROUND 7
 #define BOOKE_IRQPRIO_SYSCALL 8
 #define BOOKE_IRQPRIO_AP_UNAVAIL 9
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e8ed7d6..8d35dc0 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -295,9 +295,9 @@ kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
 	SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
 	SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
+kvm_handler BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \
 	SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, EX_PARAMS(GEN), \
 	SPRN_SRR0, SPRN_SRR1, 0
 kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
 	SPRN_SRR0, SPRN_SRR1, 0
@@ -398,8 +398,8 @@ kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
 kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
 	SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, SPRN_SRR0, SPRN_SRR1, 0
 kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
 kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
 kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ce6b73c..0fb2f4d 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -380,8 +380,10 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 	sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
 	sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 
-	sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
-	sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
+	sregs->u.e.ivor_high[0] =
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL];
+	sregs->u.e.ivor_high[1] =
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST];
 	sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
 	sregs->u.e.ivor_high[3] =
 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
@@ -409,9 +411,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 		return 0;
 
 	if (sregs->u.e.features & KVM_SREGS_E_SPE) {
-		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL] =
 			sregs->u.e.ivor_high[0];
-		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST] =
 			sregs->u.e.ivor_high[1];
 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
 			sregs->u.e.ivor_high[2];
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a012..b2e159b 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -211,10 +211,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 
 	/* extra exceptions */
 	case SPRN_IVOR32:
-		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL] = spr_val;
 		break;
 	case SPRN_IVOR33:
-		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
+		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST] = spr_val;
 		break;
 	case SPRN_IVOR34:
 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
@@ -329,10 +329,10 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 
 	/* extra exceptions */
 	case SPRN_IVOR32:
-		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
+		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL];
 		break;
 	case SPRN_IVOR33:
-		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
+		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST];
 		break;
 	case SPRN_IVOR34:
 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
  2013-07-03 12:42 ` [PATCH 1/6] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  2013-07-03 13:30   ` Alexander Graf
  2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

SPE/FP/AltiVec interrupts share the same numbers. Refactor SPE/FP exit handling
to accommodate AltiVec later. Detect the targeted unit at run time since it can
be configured in the kernel but not featured on hardware.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c |  102 +++++++++++++++++++++++++++++++---------------
 arch/powerpc/kvm/booke.h |    1 +
 2 files changed, 70 insertions(+), 33 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index fb47e85..113961f 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -89,6 +89,15 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
 	}
 }
 
+static inline bool kvmppc_supports_spe(void)
+{
+#ifdef CONFIG_SPE
+		if (cpu_has_feature(CPU_FTR_SPE))
+			return true;
+#endif
+	return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -99,7 +108,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 	preempt_enable();
 }
 
-static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
 {
 	preempt_disable();
 	enable_kernel_spe();
@@ -118,6 +127,14 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 	}
 }
 #else
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+}
+
 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
 {
 }
@@ -943,48 +960,67 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		r = RESUME_GUEST;
 		break;
 
-#ifdef CONFIG_SPE
 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-		if (vcpu->arch.shared->msr & MSR_SPE)
-			kvmppc_vcpu_enable_spe(vcpu);
-		else
-			kvmppc_booke_queue_irqprio(vcpu,
-						   BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+		if (kvmppc_supports_spe()) {
+			bool enabled = false;
+
+#ifndef CONFIG_KVM_BOOKE_HV
+			if (vcpu->arch.shared->msr & MSR_SPE) {
+				kvmppc_vcpu_enable_spe(vcpu);
+				enabled = true;
+			}
+#endif
+			if (!enabled)
+				kvmppc_booke_queue_irqprio(vcpu,
+					BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
+		} else {
+			/*
+			 * Guest wants SPE, but host kernel doesn't support it.
+			 * Send an "unimplemented operation" program check to
+			 * the guest.
+			 */
+			kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+		}
+
 		r = RESUME_GUEST;
 		break;
 	}
 
 	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
-		r = RESUME_GUEST;
-		break;
-
-	case BOOKE_INTERRUPT_SPE_FP_ROUND:
-		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
-		r = RESUME_GUEST;
-		break;
-#else
-	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL:
-		/*
-		 * Guest wants SPE, but host kernel doesn't support it.  Send
-		 * an "unimplemented operation" program check to the guest.
-		 */
-		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
-		r = RESUME_GUEST;
+		if (kvmppc_supports_spe()) {
+			kvmppc_booke_queue_irqprio(vcpu,
+				BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
+			r = RESUME_GUEST;
+		} else {
+			/*
+			 * These really should never happen without CONFIG_SPE,
+			 * as we should never enable the real MSR[SPE] in the
+			 * guest.
+			 */
+			printk(KERN_CRIT "%s: unexpected SPE interrupt %u at \
+				%08lx\n", __func__, exit_nr, vcpu->arch.pc);
+			run->hw.hardware_exit_reason = exit_nr;
+			r = RESUME_HOST;
+		}
 		break;
 
-	/*
-	 * These really should never happen without CONFIG_SPE,
-	 * as we should never enable the real MSR[SPE] in the guest.
-	 */
-	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
-		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
-		       __func__, exit_nr, vcpu->arch.pc);
-		run->hw.hardware_exit_reason = exit_nr;
-		r = RESUME_HOST;
+		if (kvmppc_supports_spe()) {
+			kvmppc_booke_queue_irqprio(vcpu,
+				BOOKE_IRQPRIO_SPE_FP_ROUND);
+			r = RESUME_GUEST;
+		} else {
+			/*
+			 * These really should never happen without CONFIG_SPE,
+			 * as we should never enable the real MSR[SPE] in the
+			 * guest.
+			 */
+			printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+			       __func__, exit_nr, vcpu->arch.pc);
+			run->hw.hardware_exit_reason = exit_nr;
+			r = RESUME_HOST;
+		}
 		break;
-#endif
 
 	case BOOKE_INTERRUPT_DATA_STORAGE:
 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 9e92006..e92ce5b 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -85,6 +85,7 @@ void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
 void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
 
 /* high-level function, manages flags, host state */
+void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu);
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
 
 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
  2013-07-03 12:42 ` [PATCH 1/6] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers Mihai Caraman
  2013-07-03 12:42 ` [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  2013-07-03 13:45   ` Alexander Graf
                     ` (2 more replies)
  2013-07-03 12:42 ` [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Mihai Caraman
                   ` (2 subsequent siblings)
  5 siblings, 3 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Increase FPU laziness by calling kvmppc_load_guest_fp() just before
returning to guest instead of each sched in. Without this improvement
an interrupt may also claim floting point corrupting guest state.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c  |    1 +
 arch/powerpc/kvm/e500mc.c |    2 --
 2 files changed, 1 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 113961f..3cae2e3 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
 		} else {
 			kvmppc_lazy_ee_enable();
+			kvmppc_load_guest_fp(vcpu);
 		}
 	}
 
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 19c8379..09da1ac 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -143,8 +143,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		kvmppc_e500_tlbil_all(vcpu_e500);
 		__get_cpu_var(last_vcpu_on_cpu) = vcpu;
 	}
-
-	kvmppc_load_guest_fp(vcpu);
 }
 
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
                   ` (2 preceding siblings ...)
  2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  2013-07-03 15:17   ` Alexander Graf
  2013-07-03 18:38   ` Scott Wood
  2013-07-03 12:42 ` [PATCH 5/6] KVM: PPC: Book3E: Add ONE_REG " Mihai Caraman
  2013-07-03 12:42 ` [PATCH 6/6] KVM: PPC: Book3E: Enable e6500 core Mihai Caraman
  5 siblings, 2 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully reuse host
infrastructure so follow the same approach for AltiVec.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c |   72 ++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 70 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3cae2e3..c3c3af6 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -98,6 +98,19 @@ static inline bool kvmppc_supports_spe(void)
 	return false;
 }
 
+/*
+ * Always returns true is AltiVec unit is present, see
+ * kvmppc_core_check_processor_compat().
+ */
+static inline bool kvmppc_supports_altivec(void)
+{
+#ifdef CONFIG_ALTIVEC
+		if (cpu_has_feature(CPU_FTR_ALTIVEC))
+			return true;
+#endif
+	return false;
+}
+
 #ifdef CONFIG_SPE
 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
 {
@@ -151,6 +164,21 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 }
 
 /*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+	if (kvmppc_supports_altivec()) {
+		if (!(current->thread.regs->msr & MSR_VEC)) {
+			load_up_altivec(NULL);
+			current->thread.regs->msr |= MSR_VEC;
+		}
+	}
+}
+
+/*
  * Helper function for "full" MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
  */
@@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	u64 fpr[32];
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	vector128 vr[32];
+	vector128 vscr;
+	int used_vr = 0;
+#endif
+
 	if (!vcpu->arch.sane) {
 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 		return -EINVAL;
@@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save userspace VEC state in stack */
+		enable_kernel_altivec();
+		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
+		vscr = current->thread.vscr;
+		used_vr = current->thread.used_vr;
+
+		/* Restore guest VEC state to thread */
+		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+		current->thread.vscr = vcpu->arch.vscr;
+
+		kvmppc_load_guest_altivec(vcpu);
+	}
+#endif
+
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
 	/* No need for kvm_guest_exit. It's done in handle_exit.
@@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	current->thread.fpexc_mode = fpexc_mode;
 #endif
 
+#ifdef CONFIG_ALTIVEC
+	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		/* Save AltiVec state to thread */
+		if (current->thread.regs->msr & MSR_VEC)
+			giveup_altivec(current);
+
+		/* Save guest state */
+		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu->arch.vr));
+		vcpu->arch.vscr = current->thread.vscr;
+
+		/* Restore userspace state */
+		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
+		current->thread.vscr = vscr;
+		current->thread.used_vr = used_vr;
+	}
+#endif
+
 out:
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	return ret;
@@ -961,7 +1028,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		break;
 
 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 			bool enabled = false;
 
 #ifndef CONFIG_KVM_BOOKE_HV
@@ -987,7 +1054,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	}
 
 	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
-		if (kvmppc_supports_spe()) {
+		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) {
 			kvmppc_booke_queue_irqprio(vcpu,
 				BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
 			r = RESUME_GUEST;
@@ -1205,6 +1272,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		} else {
 			kvmppc_lazy_ee_enable();
 			kvmppc_load_guest_fp(vcpu);
+			kvmppc_load_guest_altivec(vcpu);
 		}
 	}
 
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 5/6] KVM: PPC: Book3E: Add ONE_REG AltiVec support
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
                   ` (3 preceding siblings ...)
  2013-07-03 12:42 ` [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  2013-07-03 12:42 ` [PATCH 6/6] KVM: PPC: Book3E: Enable e6500 core Mihai Caraman
  5 siblings, 0 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Add ONE_REG support for AltiVec on Book3E.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/booke.c |   32 ++++++++++++++++++++++++++++++++
 1 files changed, 32 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index c3c3af6..6ac1f68 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1585,6 +1585,22 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 	case KVM_REG_PPC_DEBUG_INST:
 		val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV);
 		break;
+#ifdef CONFIG_ALTIVEC
+	case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+		if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+			r = -ENXIO;
+			break;
+		}
+		val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
+		break;
+	case KVM_REG_PPC_VSCR:
+		if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+			r = -ENXIO;
+			break;
+		}
+		val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
+		break;
+#endif /* CONFIG_ALTIVEC */
 	default:
 		r = kvmppc_get_one_reg(vcpu, reg->id, &val);
 		break;
@@ -1658,6 +1674,22 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 		kvmppc_set_tcr(vcpu, tcr);
 		break;
 	}
+#ifdef CONFIG_ALTIVEC
+	case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+		if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+			r = -ENXIO;
+			break;
+		}
+		vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+		break;
+	case KVM_REG_PPC_VSCR:
+		if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+			r = -ENXIO;
+			break;
+		}
+		vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
+		break;
+#endif /* CONFIG_ALTIVEC */
 	default:
 		r = kvmppc_set_one_reg(vcpu, reg->id, &val);
 		break;
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 6/6] KVM: PPC: Book3E: Enable e6500 core
  2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
                   ` (4 preceding siblings ...)
  2013-07-03 12:42 ` [PATCH 5/6] KVM: PPC: Book3E: Add ONE_REG " Mihai Caraman
@ 2013-07-03 12:42 ` Mihai Caraman
  5 siblings, 0 replies; 37+ messages in thread
From: Mihai Caraman @ 2013-07-03 12:42 UTC (permalink / raw)
  To: kvm-ppc; +Cc: Mihai Caraman, linuxppc-dev, kvm

Now that AltiVec support is in place enable e6500 core.

Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
---
 arch/powerpc/kvm/e500mc.c |   10 ++++++++++
 1 files changed, 10 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 09da1ac..bec897c 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -175,6 +175,16 @@ int kvmppc_core_check_processor_compat(void)
 		r = 0;
 	else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
 		r = 0;
+#ifdef CONFIG_ALTIVEC
+	/*
+	 * Since guests have the priviledge to enable AltiVec, we need AltiVec
+	 * support in the host to save/restore their context.
+	 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+	 * because it's cleared in the absence of CONFIG_ALTIVEC!
+	 */
+	else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
+		r = 0;
+#endif
 	else
 		r = -ENOTSUPP;
 
-- 
1.7.3.4

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* Re: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 12:42 ` [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling Mihai Caraman
@ 2013-07-03 13:30   ` Alexander Graf
  2013-07-03 13:53     ` Caraman Mihai Claudiu-B02008
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 13:30 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 14:42, Mihai Caraman wrote:

> SPE/FP/AltiVec interrupts share the same numbers. Refactor SPE/FP exit =
handling
> to accommodate AltiVec later. Detect the targeted unit at run time =
since it can
> be configured in the kernel but not featured on hardware.
>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
> arch/powerpc/kvm/booke.c |  102 =
+++++++++++++++++++++++++++++++---------------
> arch/powerpc/kvm/booke.h |    1 +
> 2 files changed, 70 insertions(+), 33 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index fb47e85..113961f 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -89,6 +89,15 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
> 	}
> }
>=20
> +static inline bool kvmppc_supports_spe(void)
> +{
> +#ifdef CONFIG_SPE
> +		if (cpu_has_feature(CPU_FTR_SPE))
> +			return true;
> +#endif
> +	return false;
> +}
> +
> #ifdef CONFIG_SPE
> void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
> {
> @@ -99,7 +108,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
> 	preempt_enable();
> }
>=20
> -static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
> +void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
> {
> 	preempt_disable();
> 	enable_kernel_spe();
> @@ -118,6 +127,14 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu =
*vcpu)
> 	}
> }
> #else
> +void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
> +{
> +}
> +
> +void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
> +{
> +}
> +
> static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
> {
> }
> @@ -943,48 +960,67 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
> 		r =3D RESUME_GUEST;
> 		break;
>=20
> -#ifdef CONFIG_SPE
> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
> -		if (vcpu->arch.shared->msr & MSR_SPE)
> -			kvmppc_vcpu_enable_spe(vcpu);
> -		else
> -			kvmppc_booke_queue_irqprio(vcpu,
> -						   =
BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
> +		if (kvmppc_supports_spe()) {
> +			bool enabled =3D false;
> +
> +#ifndef CONFIG_KVM_BOOKE_HV
> +			if (vcpu->arch.shared->msr & MSR_SPE) {
> +				kvmppc_vcpu_enable_spe(vcpu);
> +				enabled =3D true;
> +			}
> +#endif

Why the #ifdef? On HV capable systems kvmppc_supports_spe() will just =
always return false. And I don't really understand why HV would be =
special in the first place here. Is it because we're accessing =
shared->msr?

> +			if (!enabled)
> +				kvmppc_booke_queue_irqprio(vcpu,
> +					=
BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
> +		} else {
> +			/*
> +			 * Guest wants SPE, but host kernel doesn't =
support it.

host kernel or hardware


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
@ 2013-07-03 13:45   ` Alexander Graf
  2013-07-03 13:55     ` Caraman Mihai Claudiu-B02008
  2013-07-03 17:18   ` Scott Wood
  2013-07-03 18:37   ` Scott Wood
  2 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 13:45 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 14:42, Mihai Caraman wrote:

> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> returning to guest instead of each sched in. Without this improvement
> an interrupt may also claim floting point corrupting guest state.

Not sure I follow. Could you please describe exactly what's happening?


Alex

>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
> arch/powerpc/kvm/booke.c  |    1 +
> arch/powerpc/kvm/e500mc.c |    2 --
> 2 files changed, 1 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 113961f..3cae2e3 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
> 			r =3D (s << 2) | RESUME_HOST | (r & =
RESUME_FLAG_NV);
> 		} else {
> 			kvmppc_lazy_ee_enable();
> +			kvmppc_load_guest_fp(vcpu);
> 		}
> 	}
>=20
> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
> index 19c8379..09da1ac 100644
> --- a/arch/powerpc/kvm/e500mc.c
> +++ b/arch/powerpc/kvm/e500mc.c
> @@ -143,8 +143,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, =
int cpu)
> 		kvmppc_e500_tlbil_all(vcpu_e500);
> 		__get_cpu_var(last_vcpu_on_cpu) =3D vcpu;
> 	}
> -
> -	kvmppc_load_guest_fp(vcpu);
> }
>=20
> void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
> --=20
> 1.7.3.4
>=20
>=20
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 13:30   ` Alexander Graf
@ 2013-07-03 13:53     ` Caraman Mihai Claudiu-B02008
  2013-07-03 15:13       ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-03 13:53 UTC (permalink / raw)
  To: Alexander Graf; +Cc: linuxppc-dev, kvm, kvm-ppc

> > -#ifdef CONFIG_SPE
> > 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
> > -		if (vcpu->arch.shared->msr & MSR_SPE)
> > -			kvmppc_vcpu_enable_spe(vcpu);
> > -		else
> > -			kvmppc_booke_queue_irqprio(vcpu,
> > -
> BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
> > +		if (kvmppc_supports_spe()) {
> > +			bool enabled =3D false;
> > +
> > +#ifndef CONFIG_KVM_BOOKE_HV
> > +			if (vcpu->arch.shared->msr & MSR_SPE) {
> > +				kvmppc_vcpu_enable_spe(vcpu);
> > +				enabled =3D true;
> > +			}
> > +#endif
>=20
> Why the #ifdef? On HV capable systems kvmppc_supports_spe() will just
> always return false.=20

AltiVec and SPE unavailable exceptions follows the same path. While=20
kvmppc_supports_spe() will always return false kvmppc_supports_altivec()
may not.

> And I don't really understand why HV would be special in the first place
> here. Is it because we're accessing shared->msr?

You are right on HV case MSP[SPV] should be always zero when an unavailabe
exception take place. The distrinction was made because on non HV the guest
doesn't have direct access to MSR[SPE]. The name of the bit (not the positi=
on)
was changed on HV cores.

>=20
> > +			if (!enabled)
> > +				kvmppc_booke_queue_irqprio(vcpu,
> > +					BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
> > +		} else {
> > +			/*
> > +			 * Guest wants SPE, but host kernel doesn't support it.
>=20
> host kernel or hardware

Ok.

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 13:45   ` Alexander Graf
@ 2013-07-03 13:55     ` Caraman Mihai Claudiu-B02008
  2013-07-03 15:11       ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-03 13:55 UTC (permalink / raw)
  To: Alexander Graf; +Cc: linuxppc-dev, kvm, kvm-ppc

> -----Original Message-----
> From: Alexander Graf [mailto:agraf@suse.de]
> Sent: Wednesday, July 03, 2013 4:45 PM
> To: Caraman Mihai Claudiu-B02008
> Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org; linuxppc-
> dev@lists.ozlabs.org
> Subject: Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
>=20
>=20
> On 03.07.2013, at 14:42, Mihai Caraman wrote:
>=20
> > Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> > returning to guest instead of each sched in. Without this improvement
> > an interrupt may also claim floting point corrupting guest state.
>=20
> Not sure I follow. Could you please describe exactly what's happening?

This was already discussed on the list, I will forward you the thread.

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 13:55     ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 15:11       ` Alexander Graf
  2013-07-03 15:41         ` Caraman Mihai Claudiu-B02008
  2013-07-03 17:07         ` Scott Wood
  0 siblings, 2 replies; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 15:11 UTC (permalink / raw)
  To: Caraman Mihai Claudiu-B02008; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 15:55, Caraman Mihai Claudiu-B02008 wrote:

>> -----Original Message-----
>> From: Alexander Graf [mailto:agraf@suse.de]
>> Sent: Wednesday, July 03, 2013 4:45 PM
>> To: Caraman Mihai Claudiu-B02008
>> Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org; linuxppc-
>> dev@lists.ozlabs.org
>> Subject: Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
>>=20
>>=20
>> On 03.07.2013, at 14:42, Mihai Caraman wrote:
>>=20
>>> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
>>> returning to guest instead of each sched in. Without this =
improvement
>>> an interrupt may also claim floting point corrupting guest state.
>>=20
>> Not sure I follow. Could you please describe exactly what's =
happening?
>=20
> This was already discussed on the list, I will forward you the thread.

The only thing I've seen in that thread was some pathetic theoretical =
case where an interrupt handler would enable fp and clobber state =
carelessly. That's not something I'm worried about.

I really don't see where this patch improves anything tbh. It certainly =
makes the code flow more awkward.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 13:53     ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 15:13       ` Alexander Graf
  2013-07-03 18:28         ` Scott Wood
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 15:13 UTC (permalink / raw)
  To: Caraman Mihai Claudiu-B02008; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 15:53, Caraman Mihai Claudiu-B02008 wrote:

>>> -#ifdef CONFIG_SPE
>>> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
>>> -		if (vcpu->arch.shared->msr & MSR_SPE)
>>> -			kvmppc_vcpu_enable_spe(vcpu);
>>> -		else
>>> -			kvmppc_booke_queue_irqprio(vcpu,
>>> -
>> BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
>>> +		if (kvmppc_supports_spe()) {
>>> +			bool enabled =3D false;
>>> +
>>> +#ifndef CONFIG_KVM_BOOKE_HV
>>> +			if (vcpu->arch.shared->msr & MSR_SPE) {
>>> +				kvmppc_vcpu_enable_spe(vcpu);
>>> +				enabled =3D true;
>>> +			}
>>> +#endif
>>=20
>> Why the #ifdef? On HV capable systems kvmppc_supports_spe() will just
>> always return false.=20
>=20
> AltiVec and SPE unavailable exceptions follows the same path. While=20
> kvmppc_supports_spe() will always return false =
kvmppc_supports_altivec()
> may not.

There is no chip that supports SPE and HV at the same time. So we'll =
never hit this anyway, since kvmppc_supports_spe() always returns false =
on HV capable systems.

Just add a comment saying so and remove the ifdef :).


Alex

>=20
>> And I don't really understand why HV would be special in the first =
place
>> here. Is it because we're accessing shared->msr?
>=20
> You are right on HV case MSP[SPV] should be always zero when an =
unavailabe
> exception take place. The distrinction was made because on non HV the =
guest
> doesn't have direct access to MSR[SPE]. The name of the bit (not the =
position)
> was changed on HV cores.
>=20
>>=20
>>> +			if (!enabled)
>>> +				kvmppc_booke_queue_irqprio(vcpu,
>>> +					=
BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
>>> +		} else {
>>> +			/*
>>> +			 * Guest wants SPE, but host kernel doesn't =
support it.
>>=20
>> host kernel or hardware
>=20
> Ok.
>=20
> -Mike
>=20

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 12:42 ` [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Mihai Caraman
@ 2013-07-03 15:17   ` Alexander Graf
  2013-07-03 16:09     ` Caraman Mihai Claudiu-B02008
  2013-07-03 18:38   ` Scott Wood
  1 sibling, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 15:17 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 14:42, Mihai Caraman wrote:

> Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully =
reuse host
> infrastructure so follow the same approach for AltiVec.
>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
> arch/powerpc/kvm/booke.c |   72 =
++++++++++++++++++++++++++++++++++++++++++++-
> 1 files changed, 70 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 3cae2e3..c3c3af6 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -98,6 +98,19 @@ static inline bool kvmppc_supports_spe(void)
> 	return false;
> }
>=20
> +/*
> + * Always returns true is AltiVec unit is present, see
> + * kvmppc_core_check_processor_compat().
> + */
> +static inline bool kvmppc_supports_altivec(void)
> +{
> +#ifdef CONFIG_ALTIVEC
> +		if (cpu_has_feature(CPU_FTR_ALTIVEC))
> +			return true;
> +#endif
> +	return false;
> +}
> +
> #ifdef CONFIG_SPE
> void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
> {
> @@ -151,6 +164,21 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu =
*vcpu)
> }
>=20
> /*
> + * Simulate AltiVec unavailable fault to load guest state
> + * from thread to AltiVec unit.
> + * It requires to be called with preemption disabled.
> + */
> +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
> +{
> +	if (kvmppc_supports_altivec()) {
> +		if (!(current->thread.regs->msr & MSR_VEC)) {
> +			load_up_altivec(NULL);
> +			current->thread.regs->msr |=3D MSR_VEC;

Does this ensure that the kernel saves / restores all altivec state on =
task switch? Does it load it again when it schedules us in? Would =
definitely be worth a comment.

> +		}
> +	}
> +}
> +
> +/*
>  * Helper function for "full" MSR writes.  No need to call this if =
only
>  * EE/CE/ME/DE/RI are changing.
>  */
> @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, =
struct kvm_vcpu *vcpu)
> 	u64 fpr[32];
> #endif
>=20
> +#ifdef CONFIG_ALTIVEC
> +	vector128 vr[32];
> +	vector128 vscr;
> +	int used_vr =3D 0;

bool

> +#endif
> +
> 	if (!vcpu->arch.sane) {
> 		kvm_run->exit_reason =3D KVM_EXIT_INTERNAL_ERROR;
> 		return -EINVAL;
> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, =
struct kvm_vcpu *vcpu)
> 	kvmppc_load_guest_fp(vcpu);
> #endif
>=20
> +#ifdef CONFIG_ALTIVEC

/* Switch from user space Altivec to guest Altivec state */

> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {

Why not use your kvmppc_supports_altivec() helper here?

> +		/* Save userspace VEC state in stack */
> +		enable_kernel_altivec();

Can't you hide this in the load function? We only want to enable kernel =
altivec for a short time while we shuffle the registers around.

> +		memcpy(vr, current->thread.vr, =
sizeof(current->thread.vr));

vr =3D current->thread.vr;

> +		vscr =3D current->thread.vscr;
> +		used_vr =3D current->thread.used_vr;
> +
> +		/* Restore guest VEC state to thread */
> +		memcpy(current->thread.vr, vcpu->arch.vr, =
sizeof(vcpu->arch.vr));

current->thread.vr =3D vcpu->arch.vr;

> +		current->thread.vscr =3D vcpu->arch.vscr;
> +
> +		kvmppc_load_guest_altivec(vcpu);
> +	}

Do we need to do this even when the guest doesn't use Altivec? Can't we =
just load it on demand then once we fault? This code path really should =
only be a prefetch enable when MSR_VEC is already set in guest context.

> +#endif
> +
> 	ret =3D __kvmppc_vcpu_run(kvm_run, vcpu);
>=20
> 	/* No need for kvm_guest_exit. It's done in handle_exit.
> @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, =
struct kvm_vcpu *vcpu)
> 	current->thread.fpexc_mode =3D fpexc_mode;
> #endif
>=20
> +#ifdef CONFIG_ALTIVEC

/* Switch from guest Altivec to user space Altivec state */

> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> +		/* Save AltiVec state to thread */
> +		if (current->thread.regs->msr & MSR_VEC)
> +			giveup_altivec(current);
> +
> +		/* Save guest state */
> +		memcpy(vcpu->arch.vr, current->thread.vr, =
sizeof(vcpu->arch.vr));
> +		vcpu->arch.vscr =3D current->thread.vscr;
> +
> +		/* Restore userspace state */
> +		memcpy(current->thread.vr, vr, =
sizeof(current->thread.vr));
> +		current->thread.vscr =3D vscr;
> +		current->thread.used_vr =3D used_vr;
> +	}

Same comments here. If the guest never touched Altivec state, don't =
bother restoring it, as it's still good.


Alex

> +#endif
> +
> out:
> 	vcpu->mode =3D OUTSIDE_GUEST_MODE;
> 	return ret;
> @@ -961,7 +1028,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
> 		break;
>=20
> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
> -		if (kvmppc_supports_spe()) {
> +		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) =
{
> 			bool enabled =3D false;
>=20
> #ifndef CONFIG_KVM_BOOKE_HV
> @@ -987,7 +1054,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
> 	}
>=20
> 	case BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST:
> -		if (kvmppc_supports_spe()) {
> +		if (kvmppc_supports_altivec() || kvmppc_supports_spe()) =
{
> 			kvmppc_booke_queue_irqprio(vcpu,
> 				=
BOOKE_IRQPRIO_SPE_FP_DATA_ALTIVEC_ASSIST);
> 			r =3D RESUME_GUEST;
> @@ -1205,6 +1272,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
> 		} else {
> 			kvmppc_lazy_ee_enable();
> 			kvmppc_load_guest_fp(vcpu);
> +			kvmppc_load_guest_altivec(vcpu);
> 		}
> 	}
>=20
> --=20
> 1.7.3.4
>=20
>=20
> --
> To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 15:11       ` Alexander Graf
@ 2013-07-03 15:41         ` Caraman Mihai Claudiu-B02008
  2013-07-03 16:59           ` Alexander Graf
  2013-07-03 17:07         ` Scott Wood
  1 sibling, 1 reply; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-03 15:41 UTC (permalink / raw)
  To: Alexander Graf; +Cc: linuxppc-dev, kvm, kvm-ppc

> >>> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> >>> returning to guest instead of each sched in. Without this improvement
> >>> an interrupt may also claim floting point corrupting guest state.
> >>
> >> Not sure I follow. Could you please describe exactly what's happening?
> >
> > This was already discussed on the list, I will forward you the thread.
>=20
> The only thing I've seen in that thread was some pathetic theoretical
> case where an interrupt handler would enable fp and clobber state
> carelessly. That's not something I'm worried about.

Neither me though I don't find it pathetic. Please refer it to Scott.

>=20
> I really don't see where this patch improves anything tbh. It certainly
> makes the code flow more awkward.

I was pointing you to this: The idea of FPU/AltiVec laziness that the kerne=
l
is struggling to achieve is to reduce the number of store/restore operation=
s.
Without this improvement we restore the unit each time we are sched it. If =
an
other process take the ownership of the unit (on SMP it's even worse but do=
n't
bother with this) the kernel store the unit state to qemu task. This can ha=
ppen
multiple times during handle_exit().

Do you see it now?=20

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 15:17   ` Alexander Graf
@ 2013-07-03 16:09     ` Caraman Mihai Claudiu-B02008
  2013-07-03 16:43       ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-03 16:09 UTC (permalink / raw)
  To: Alexander Graf; +Cc: linuxppc-dev, kvm, kvm-ppc

> > + * Simulate AltiVec unavailable fault to load guest state
> > + * from thread to AltiVec unit.
> > + * It requires to be called with preemption disabled.
> > + */
> > +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
> > +{
> > +	if (kvmppc_supports_altivec()) {
> > +		if (!(current->thread.regs->msr & MSR_VEC)) {
> > +			load_up_altivec(NULL);
> > +			current->thread.regs->msr |=3D MSR_VEC;
>=20
> Does this ensure that the kernel saves / restores all altivec state on
> task switch? Does it load it again when it schedules us in? Would
> definitely be worth a comment.

These units are _LAZY_ !!! Only SMP kernel save the state when it schedules=
 out.

>=20
> > +		}
> > +	}
> > +}
> > +
> > +/*
> >  * Helper function for "full" MSR writes.  No need to call this if only
> >  * EE/CE/ME/DE/RI are changing.
> >  */
> > @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	u64 fpr[32];
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
> > +	vector128 vr[32];
> > +	vector128 vscr;
> > +	int used_vr =3D 0;
>=20
> bool

Why don't you ask first to change the type of used_vr member in
/include/asm/processor.h?

>=20
> > +#endif
> > +
> > 	if (!vcpu->arch.sane) {
> > 		kvm_run->exit_reason =3D KVM_EXIT_INTERNAL_ERROR;
> > 		return -EINVAL;
> > @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	kvmppc_load_guest_fp(vcpu);
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
>=20
> /* Switch from user space Altivec to guest Altivec state */
>=20
> > +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>=20
> Why not use your kvmppc_supports_altivec() helper here?

Give it a try ... because Linus guarded this members with CONFIG_ALTIVEC :)

>=20
> > +		/* Save userspace VEC state in stack */
> > +		enable_kernel_altivec();
>=20
> Can't you hide this in the load function? We only want to enable kernel
> altivec for a short time while we shuffle the registers around.
>=20
> > +		memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
>=20
> vr =3D current->thread.vr;

Are you kidding, be more careful with arrays :)=20

>=20
> > +		vscr =3D current->thread.vscr;
> > +		used_vr =3D current->thread.used_vr;
> > +
> > +		/* Restore guest VEC state to thread */
> > +		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu-
> >arch.vr));
>=20
> current->thread.vr =3D vcpu->arch.vr;
>=20
> > +		current->thread.vscr =3D vcpu->arch.vscr;
> > +
> > +		kvmppc_load_guest_altivec(vcpu);
> > +	}
>=20
> Do we need to do this even when the guest doesn't use Altivec? Can't we
> just load it on demand then once we fault? This code path really should
> only be a prefetch enable when MSR_VEC is already set in guest context.

No we can't, read 6/6.=20

>=20
> > +#endif
> > +
> > 	ret =3D __kvmppc_vcpu_run(kvm_run, vcpu);
> >
> > 	/* No need for kvm_guest_exit. It's done in handle_exit.
> > @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> struct kvm_vcpu *vcpu)
> > 	current->thread.fpexc_mode =3D fpexc_mode;
> > #endif
> >
> > +#ifdef CONFIG_ALTIVEC
>=20
> /* Switch from guest Altivec to user space Altivec state */
>=20
> > +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> > +		/* Save AltiVec state to thread */
> > +		if (current->thread.regs->msr & MSR_VEC)
> > +			giveup_altivec(current);
> > +
> > +		/* Save guest state */
> > +		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu-
> >arch.vr));
> > +		vcpu->arch.vscr =3D current->thread.vscr;
> > +
> > +		/* Restore userspace state */
> > +		memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
> > +		current->thread.vscr =3D vscr;
> > +		current->thread.used_vr =3D used_vr;
> > +	}
>=20
> Same comments here. If the guest never touched Altivec state, don't
> bother restoring it, as it's still good.

LOL, the mighty guest kernel does whatever he wants with AltiVec and
doesn't inform us.

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 16:09     ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 16:43       ` Alexander Graf
  2013-07-03 16:49         ` Caraman Mihai Claudiu-B02008
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 16:43 UTC (permalink / raw)
  To: Caraman Mihai Claudiu-B02008; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 18:09, Caraman Mihai Claudiu-B02008 wrote:

>>> + * Simulate AltiVec unavailable fault to load guest state
>>> + * from thread to AltiVec unit.
>>> + * It requires to be called with preemption disabled.
>>> + */
>>> +static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
>>> +{
>>> +	if (kvmppc_supports_altivec()) {
>>> +		if (!(current->thread.regs->msr & MSR_VEC)) {
>>> +			load_up_altivec(NULL);
>>> +			current->thread.regs->msr |=3D MSR_VEC;
>>=20
>> Does this ensure that the kernel saves / restores all altivec state =
on
>> task switch? Does it load it again when it schedules us in? Would
>> definitely be worth a comment.
>=20
> These units are _LAZY_ !!! Only SMP kernel save the state when it =
schedules out.

Then how do you ensure that altivec state is still consistent after =
another altivec user got scheduled? Have I missed a vcpu_load hook =
anywhere?

>=20
>>=20
>>> +		}
>>> +	}
>>> +}
>>> +
>>> +/*
>>> * Helper function for "full" MSR writes.  No need to call this if =
only
>>> * EE/CE/ME/DE/RI are changing.
>>> */
>>> @@ -678,6 +706,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	u64 fpr[32];
>>> #endif
>>>=20
>>> +#ifdef CONFIG_ALTIVEC
>>> +	vector128 vr[32];
>>> +	vector128 vscr;
>>> +	int used_vr =3D 0;
>>=20
>> bool
>=20
> Why don't you ask first to change the type of used_vr member in
> /include/asm/processor.h?

Ah, it's a copy from thread. Fair enough.

>=20
>>=20
>>> +#endif
>>> +
>>> 	if (!vcpu->arch.sane) {
>>> 		kvm_run->exit_reason =3D KVM_EXIT_INTERNAL_ERROR;
>>> 		return -EINVAL;
>>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	kvmppc_load_guest_fp(vcpu);
>>> #endif
>>>=20
>>> +#ifdef CONFIG_ALTIVEC
>>=20
>> /* Switch from user space Altivec to guest Altivec state */
>>=20
>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>>=20
>> Why not use your kvmppc_supports_altivec() helper here?
>=20
> Give it a try ... because Linus guarded this members with =
CONFIG_ALTIVEC :)

Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a =
good idea to be consistent in helper usage. And the name you gave to the =
helper (kvmppc_supports_altivec) is actually quite nice and tells us =
exactly what we're asking for.

>=20
>>=20
>>> +		/* Save userspace VEC state in stack */
>>> +		enable_kernel_altivec();
>>=20
>> Can't you hide this in the load function? We only want to enable =
kernel
>> altivec for a short time while we shuffle the registers around.
>>=20
>>> +		memcpy(vr, current->thread.vr, =
sizeof(current->thread.vr));
>>=20
>> vr =3D current->thread.vr;
>=20
> Are you kidding, be more careful with arrays :)=20

Bleks :).

>=20
>>=20
>>> +		vscr =3D current->thread.vscr;
>>> +		used_vr =3D current->thread.used_vr;
>>> +
>>> +		/* Restore guest VEC state to thread */
>>> +		memcpy(current->thread.vr, vcpu->arch.vr, sizeof(vcpu-
>>> arch.vr));
>>=20
>> current->thread.vr =3D vcpu->arch.vr;
>>=20
>>> +		current->thread.vscr =3D vcpu->arch.vscr;
>>> +
>>> +		kvmppc_load_guest_altivec(vcpu);
>>> +	}
>>=20
>> Do we need to do this even when the guest doesn't use Altivec? Can't =
we
>> just load it on demand then once we fault? This code path really =
should
>> only be a prefetch enable when MSR_VEC is already set in guest =
context.
>=20
> No we can't, read 6/6.=20

So we have to make sure we're completely unlazy when it comes to a KVM =
guest. Are we?


Alex

>=20
>>=20
>>> +#endif
>>> +
>>> 	ret =3D __kvmppc_vcpu_run(kvm_run, vcpu);
>>>=20
>>> 	/* No need for kvm_guest_exit. It's done in handle_exit.
>>> @@ -736,6 +786,23 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>> struct kvm_vcpu *vcpu)
>>> 	current->thread.fpexc_mode =3D fpexc_mode;
>>> #endif
>>>=20
>>> +#ifdef CONFIG_ALTIVEC
>>=20
>> /* Switch from guest Altivec to user space Altivec state */
>>=20
>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>>> +		/* Save AltiVec state to thread */
>>> +		if (current->thread.regs->msr & MSR_VEC)
>>> +			giveup_altivec(current);
>>> +
>>> +		/* Save guest state */
>>> +		memcpy(vcpu->arch.vr, current->thread.vr, sizeof(vcpu-
>>> arch.vr));
>>> +		vcpu->arch.vscr =3D current->thread.vscr;
>>> +
>>> +		/* Restore userspace state */
>>> +		memcpy(current->thread.vr, vr, =
sizeof(current->thread.vr));
>>> +		current->thread.vscr =3D vscr;
>>> +		current->thread.used_vr =3D used_vr;
>>> +	}
>>=20
>> Same comments here. If the guest never touched Altivec state, don't
>> bother restoring it, as it's still good.
>=20
> LOL, the mighty guest kernel does whatever he wants with AltiVec and
> doesn't inform us.
>=20
> -Mike
>=20

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 16:43       ` Alexander Graf
@ 2013-07-03 16:49         ` Caraman Mihai Claudiu-B02008
  2013-07-03 17:07           ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-03 16:49 UTC (permalink / raw)
  To: Alexander Graf; +Cc: linuxppc-dev, kvm, kvm-ppc

> >>> +
> >>> 	if (!vcpu->arch.sane) {
> >>> 		kvm_run->exit_reason =3D KVM_EXIT_INTERNAL_ERROR;
> >>> 		return -EINVAL;
> >>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
> >> struct kvm_vcpu *vcpu)
> >>> 	kvmppc_load_guest_fp(vcpu);
> >>> #endif
> >>>
> >>> +#ifdef CONFIG_ALTIVEC
> >>
> >> /* Switch from user space Altivec to guest Altivec state */
> >>
> >>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
> >>
> >> Why not use your kvmppc_supports_altivec() helper here?
> >
> > Give it a try ... because Linus guarded this members with
> CONFIG_ALTIVEC :)
>=20
> Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a
> good idea to be consistent in helper usage. And the name you gave to the
> helper (kvmppc_supports_altivec) is actually quite nice and tells us
> exactly what we're asking for.

I thought you asking to replace #ifdef CONFIG_ALTIVEC.

> >> Do we need to do this even when the guest doesn't use Altivec? Can't
> we
> >> just load it on demand then once we fault? This code path really
> should
> >> only be a prefetch enable when MSR_VEC is already set in guest
> context.
> >
> > No we can't, read 6/6.
>=20
> So we have to make sure we're completely unlazy when it comes to a KVM
> guest. Are we?

Yes, because MSR[SPV] is under its control.

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 15:41         ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 16:59           ` Alexander Graf
  2013-07-03 17:17             ` Scott Wood
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 16:59 UTC (permalink / raw)
  To: Caraman Mihai Claudiu-B02008; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 17:41, Caraman Mihai Claudiu-B02008 wrote:

>>>>> Increase FPU laziness by calling kvmppc_load_guest_fp() just =
before
>>>>> returning to guest instead of each sched in. Without this =
improvement
>>>>> an interrupt may also claim floting point corrupting guest state.
>>>>=20
>>>> Not sure I follow. Could you please describe exactly what's =
happening?
>>>=20
>>> This was already discussed on the list, I will forward you the =
thread.
>>=20
>> The only thing I've seen in that thread was some pathetic theoretical
>> case where an interrupt handler would enable fp and clobber state
>> carelessly. That's not something I'm worried about.
>=20
> Neither me though I don't find it pathetic. Please refer it to Scott.

If from Linux's point of view we look like a user space program with =
active floating point registers, we don't have to worry about this case. =
Kernel code that would clobber that fp state would clobber random user =
space's fp state too.

>=20
>>=20
>> I really don't see where this patch improves anything tbh. It =
certainly
>> makes the code flow more awkward.
>=20
> I was pointing you to this: The idea of FPU/AltiVec laziness that the =
kernel
> is struggling to achieve is to reduce the number of store/restore =
operations.
> Without this improvement we restore the unit each time we are sched =
it. If an
> other process take the ownership of the unit (on SMP it's even worse =
but don't
> bother with this) the kernel store the unit state to qemu task. This =
can happen
> multiple times during handle_exit().
>=20
> Do you see it now?=20

Yup. Looks good. The code flow is very hard to follow though - there are =
a lot of implicit assumptions that don't get documented anywhere. For =
example the fact that we rely on giveup_fpu() to remove MSR_FP from our =
thread.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 15:11       ` Alexander Graf
  2013-07-03 15:41         ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 17:07         ` Scott Wood
  2013-07-03 17:08           ` Alexander Graf
  1 sibling, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 17:07 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 10:11:50 AM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 15:55, Caraman Mihai Claudiu-B02008 wrote:
>=20
> >> -----Original Message-----
> >> From: Alexander Graf [mailto:agraf@suse.de]
> >> Sent: Wednesday, July 03, 2013 4:45 PM
> >> To: Caraman Mihai Claudiu-B02008
> >> Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org; linuxppc-
> >> dev@lists.ozlabs.org
> >> Subject: Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
> >>
> >>
> >> On 03.07.2013, at 14:42, Mihai Caraman wrote:
> >>
> >>> Increase FPU laziness by calling kvmppc_load_guest_fp() just =20
> before
> >>> returning to guest instead of each sched in. Without this =20
> improvement
> >>> an interrupt may also claim floting point corrupting guest state.
> >>
> >> Not sure I follow. Could you please describe exactly what's =20
> happening?
> >
> > This was already discussed on the list, I will forward you the =20
> thread.
>=20
> The only thing I've seen in that thread was some pathetic theoretical =20
> case where an interrupt handler would enable fp and clobber state =20
> carelessly. That's not something I'm worried about.

On x86 floating point registers can be used for memcpy(), which can be =20
used in interrupt handlers.  Just because it doesn't happen on PPC =20
today doesn't make it a "pathetic theoretical case" that we should =20
ignore and leave a landmine buried in the KVM code.  Even power7 is =20
using something similar for copyuser (which isn't called from interrupt =20
context, but it's not a huge leap from that to doing it in memcpy).

It also doesn't seem *that* farfetched that some driver for unusual =20
hardware could decide it needs FP in its interrupt handler, and call =20
the function that is specifically meant to ensure that.  It's frowned =20
upon, but that doesn't mean nobody will ever do it.

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 16:49         ` Caraman Mihai Claudiu-B02008
@ 2013-07-03 17:07           ` Alexander Graf
  2013-07-03 18:36             ` Scott Wood
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 17:07 UTC (permalink / raw)
  To: Caraman Mihai Claudiu-B02008; +Cc: linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:

>>>>> +
>>>>> 	if (!vcpu->arch.sane) {
>>>>> 		kvm_run->exit_reason =3D KVM_EXIT_INTERNAL_ERROR;
>>>>> 		return -EINVAL;
>>>>> @@ -716,6 +750,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run,
>>>> struct kvm_vcpu *vcpu)
>>>>> 	kvmppc_load_guest_fp(vcpu);
>>>>> #endif
>>>>>=20
>>>>> +#ifdef CONFIG_ALTIVEC
>>>>=20
>>>> /* Switch from user space Altivec to guest Altivec state */
>>>>=20
>>>>> +	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
>>>>=20
>>>> Why not use your kvmppc_supports_altivec() helper here?
>>>=20
>>> Give it a try ... because Linus guarded this members with
>> CONFIG_ALTIVEC :)
>>=20
>> Huh? You already are in an #ifdef CONFIG_ALTIVEC here. I think it's a
>> good idea to be consistent in helper usage. And the name you gave to =
the
>> helper (kvmppc_supports_altivec) is actually quite nice and tells us
>> exactly what we're asking for.
>=20
> I thought you asking to replace #ifdef CONFIG_ALTIVEC.
>=20
>>>> Do we need to do this even when the guest doesn't use Altivec? =
Can't
>> we
>>>> just load it on demand then once we fault? This code path really
>> should
>>>> only be a prefetch enable when MSR_VEC is already set in guest
>> context.
>>>=20
>>> No we can't, read 6/6.
>>=20
>> So we have to make sure we're completely unlazy when it comes to a =
KVM
>> guest. Are we?
>=20
> Yes, because MSR[SPV] is under its control.

Oh, sure, KVM wants it unlazy. That part is obvious. But does the kernel =
always give us unlazyness? The way I read the code, process.c goes lazy =
when !CONFIG_SMP.

So the big question is why we're manually enforcing FPU giveup, but not =
Altivec giveup? One of the 2 probably is wrong :).


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 17:07         ` Scott Wood
@ 2013-07-03 17:08           ` Alexander Graf
  0 siblings, 0 replies; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 17:08 UTC (permalink / raw)
  To: Scott Wood; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 19:07, Scott Wood wrote:

> On 07/03/2013 10:11:50 AM, Alexander Graf wrote:
>> On 03.07.2013, at 15:55, Caraman Mihai Claudiu-B02008 wrote:
>> >> -----Original Message-----
>> >> From: Alexander Graf [mailto:agraf@suse.de]
>> >> Sent: Wednesday, July 03, 2013 4:45 PM
>> >> To: Caraman Mihai Claudiu-B02008
>> >> Cc: kvm-ppc@vger.kernel.org; kvm@vger.kernel.org; linuxppc-
>> >> dev@lists.ozlabs.org
>> >> Subject: Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
>> >>
>> >>
>> >> On 03.07.2013, at 14:42, Mihai Caraman wrote:
>> >>
>> >>> Increase FPU laziness by calling kvmppc_load_guest_fp() just =
before
>> >>> returning to guest instead of each sched in. Without this =
improvement
>> >>> an interrupt may also claim floting point corrupting guest state.
>> >>
>> >> Not sure I follow. Could you please describe exactly what's =
happening?
>> >
>> > This was already discussed on the list, I will forward you the =
thread.
>> The only thing I've seen in that thread was some pathetic theoretical =
case where an interrupt handler would enable fp and clobber state =
carelessly. That's not something I'm worried about.
>=20
> On x86 floating point registers can be used for memcpy(), which can be =
used in interrupt handlers.  Just because it doesn't happen on PPC today =
doesn't make it a "pathetic theoretical case" that we should ignore and =
leave a landmine buried in the KVM code.  Even power7 is using something =
similar for copyuser (which isn't called from interrupt context, but =
it's not a huge leap from that to doing it in memcpy).
>=20
> It also doesn't seem *that* farfetched that some driver for unusual =
hardware could decide it needs FP in its interrupt handler, and call the =
function that is specifically meant to ensure that.  It's frowned upon, =
but that doesn't mean nobody will ever do it.

Oh, sure. But in that case I would strongly hope that the driver first =
saves off the current FPU state to the thread struct before it goes off =
and uses them for itself. Otherwise it would break user space, no?


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 16:59           ` Alexander Graf
@ 2013-07-03 17:17             ` Scott Wood
  2013-07-03 17:22               ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 17:17 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 11:59:45 AM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 17:41, Caraman Mihai Claudiu-B02008 wrote:
>=20
> >>>>> Increase FPU laziness by calling kvmppc_load_guest_fp() just =20
> before
> >>>>> returning to guest instead of each sched in. Without this =20
> improvement
> >>>>> an interrupt may also claim floting point corrupting guest =20
> state.
> >>>>
> >>>> Not sure I follow. Could you please describe exactly what's =20
> happening?
> >>>
> >>> This was already discussed on the list, I will forward you the =20
> thread.
> >>
> >> The only thing I've seen in that thread was some pathetic =20
> theoretical
> >> case where an interrupt handler would enable fp and clobber state
> >> carelessly. That's not something I'm worried about.
> >
> > Neither me though I don't find it pathetic. Please refer it to =20
> Scott.
>=20
> If from Linux's point of view we look like a user space program with =20
> active floating point registers, we don't have to worry about this =20
> case. Kernel code that would clobber that fp state would clobber =20
> random user space's fp state too.

This patch makes it closer to how it works with a user space program.  =20
Or rather, it reduces the time window when we don't (and can't) act =20
like a normal userspace program -- and ensures that we have interrupts =20
disabled during that window.  An interrupt can't randomly clobber FP =20
state; it has to call enable_kernel_fp() just like KVM does.  =20
enable_kernel_fp() clears the userspace MSR_FP to ensure that the state =20
it saves gets restored before userspace uses it again, but that won't =20
have any effect on guest execution (especially in HV-mode).  Thus =20
kvmppc_load_guest_fp() needs to be atomic with guest entry.  =20
Conceptually it's like taking an automatic FP unavailable trap when we =20
enter the guest, since we can't be lazy in HV-mode.

> >> I really don't see where this patch improves anything tbh. It =20
> certainly
> >> makes the code flow more awkward.
> >
> > I was pointing you to this: The idea of FPU/AltiVec laziness that =20
> the kernel
> > is struggling to achieve is to reduce the number of store/restore =20
> operations.
> > Without this improvement we restore the unit each time we are sched =20
> it. If an
> > other process take the ownership of the unit (on SMP it's even =20
> worse but don't
> > bother with this) the kernel store the unit state to qemu task. =20
> This can happen
> > multiple times during handle_exit().
> >
> > Do you see it now?
>=20
> Yup. Looks good. The code flow is very hard to follow though - there =20
> are a lot of implicit assumptions that don't get documented anywhere. =20
> For example the fact that we rely on giveup_fpu() to remove MSR_FP =20
> from our thread.

That's not new to this patch...

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
  2013-07-03 13:45   ` Alexander Graf
@ 2013-07-03 17:18   ` Scott Wood
  2013-07-03 17:23     ` Alexander Graf
  2013-07-03 18:37   ` Scott Wood
  2 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 17:18 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> returning to guest instead of each sched in. Without this improvement
> an interrupt may also claim floting point corrupting guest state.
>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
>  arch/powerpc/kvm/booke.c  |    1 +
>  arch/powerpc/kvm/e500mc.c |    2 --
>  2 files changed, 1 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 113961f..3cae2e3 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =20
> struct kvm_vcpu *vcpu,
>  			r =3D (s << 2) | RESUME_HOST | (r & =20
> RESUME_FLAG_NV);
>  		} else {
>  			kvmppc_lazy_ee_enable();
> +			kvmppc_load_guest_fp(vcpu);
>  		}

This should go before the kvmppc_lazy_ee_enable().

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 17:17             ` Scott Wood
@ 2013-07-03 17:22               ` Alexander Graf
  0 siblings, 0 replies; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 17:22 UTC (permalink / raw)
  To: Scott Wood; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 19:17, Scott Wood wrote:

> On 07/03/2013 11:59:45 AM, Alexander Graf wrote:
>> On 03.07.2013, at 17:41, Caraman Mihai Claudiu-B02008 wrote:
>> >>>>> Increase FPU laziness by calling kvmppc_load_guest_fp() just =
before
>> >>>>> returning to guest instead of each sched in. Without this =
improvement
>> >>>>> an interrupt may also claim floting point corrupting guest =
state.
>> >>>>
>> >>>> Not sure I follow. Could you please describe exactly what's =
happening?
>> >>>
>> >>> This was already discussed on the list, I will forward you the =
thread.
>> >>
>> >> The only thing I've seen in that thread was some pathetic =
theoretical
>> >> case where an interrupt handler would enable fp and clobber state
>> >> carelessly. That's not something I'm worried about.
>> >
>> > Neither me though I don't find it pathetic. Please refer it to =
Scott.
>> If from Linux's point of view we look like a user space program with =
active floating point registers, we don't have to worry about this case. =
Kernel code that would clobber that fp state would clobber random user =
space's fp state too.
>=20
> This patch makes it closer to how it works with a user space program.  =
Or rather, it reduces the time window when we don't (and can't) act like =
a normal userspace program -- and ensures that we have interrupts =
disabled during that window.  An interrupt can't randomly clobber FP =
state; it has to call enable_kernel_fp() just like KVM does.  =
enable_kernel_fp() clears the userspace MSR_FP to ensure that the state =
it saves gets restored before userspace uses it again, but that won't =
have any effect on guest execution (especially in HV-mode).  Thus =
kvmppc_load_guest_fp() needs to be atomic with guest entry.  =
Conceptually it's like taking an automatic FP unavailable trap when we =
enter the guest, since we can't be lazy in HV-mode.

Yep. Once I understood that point things became clear to me :).

>=20
>> >> I really don't see where this patch improves anything tbh. It =
certainly
>> >> makes the code flow more awkward.
>> >
>> > I was pointing you to this: The idea of FPU/AltiVec laziness that =
the kernel
>> > is struggling to achieve is to reduce the number of store/restore =
operations.
>> > Without this improvement we restore the unit each time we are sched =
it. If an
>> > other process take the ownership of the unit (on SMP it's even =
worse but don't
>> > bother with this) the kernel store the unit state to qemu task. =
This can happen
>> > multiple times during handle_exit().
>> >
>> > Do you see it now?
>> Yup. Looks good. The code flow is very hard to follow though - there =
are a lot of implicit assumptions that don't get documented anywhere. =
For example the fact that we rely on giveup_fpu() to remove MSR_FP from =
our thread.
>=20
> That's not new to this patch...

Would be nice to fix nevertheless. I'm probably not going to be the last =
person forgetting how this works.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 17:18   ` Scott Wood
@ 2013-07-03 17:23     ` Alexander Graf
  2013-07-03 17:44       ` Scott Wood
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 17:23 UTC (permalink / raw)
  To: Scott Wood; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 19:18, Scott Wood wrote:

> On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
>> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
>> returning to guest instead of each sched in. Without this improvement
>> an interrupt may also claim floting point corrupting guest state.
>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>> ---
>> arch/powerpc/kvm/booke.c  |    1 +
>> arch/powerpc/kvm/e500mc.c |    2 --
>> 2 files changed, 1 insertions(+), 2 deletions(-)
>> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
>> index 113961f..3cae2e3 100644
>> --- a/arch/powerpc/kvm/booke.c
>> +++ b/arch/powerpc/kvm/booke.c
>> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
>> 			r =3D (s << 2) | RESUME_HOST | (r & =
RESUME_FLAG_NV);
>> 		} else {
>> 			kvmppc_lazy_ee_enable();
>> +			kvmppc_load_guest_fp(vcpu);
>> 		}
>=20
> This should go before the kvmppc_lazy_ee_enable().

Why? What difference does that make? We're running with interrupts =
disabled here, right?


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 17:23     ` Alexander Graf
@ 2013-07-03 17:44       ` Scott Wood
  2013-07-03 18:39         ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 17:44 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 12:23:16 PM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 19:18, Scott Wood wrote:
>=20
> > On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
> >> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> >> returning to guest instead of each sched in. Without this =20
> improvement
> >> an interrupt may also claim floting point corrupting guest state.
> >> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> >> ---
> >> arch/powerpc/kvm/booke.c  |    1 +
> >> arch/powerpc/kvm/e500mc.c |    2 --
> >> 2 files changed, 1 insertions(+), 2 deletions(-)
> >> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> >> index 113961f..3cae2e3 100644
> >> --- a/arch/powerpc/kvm/booke.c
> >> +++ b/arch/powerpc/kvm/booke.c
> >> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =20
> struct kvm_vcpu *vcpu,
> >> 			r =3D (s << 2) | RESUME_HOST | (r & =20
> RESUME_FLAG_NV);
> >> 		} else {
> >> 			kvmppc_lazy_ee_enable();
> >> +			kvmppc_load_guest_fp(vcpu);
> >> 		}
> >
> > This should go before the kvmppc_lazy_ee_enable().
>=20
> Why? What difference does that make? We're running with interrupts =20
> disabled here, right?

Yes, and we want to minimize the code we run where we have interrupts =20
disabled but the lazy ee state says they're enabled.  So =20
kvmppc_lazy_ee_enable() should be the last thing we do before entering =20
asm code.

See http://patchwork.ozlabs.org/patch/249565/

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 15:13       ` Alexander Graf
@ 2013-07-03 18:28         ` Scott Wood
  2013-07-03 18:42           ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 18:28 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc

  On 07/03/2013 10:13:57 AM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 15:53, Caraman Mihai Claudiu-B02008 wrote:
>=20
> >>> -#ifdef CONFIG_SPE
> >>> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
> >>> -		if (vcpu->arch.shared->msr & MSR_SPE)
> >>> -			kvmppc_vcpu_enable_spe(vcpu);
> >>> -		else
> >>> -			kvmppc_booke_queue_irqprio(vcpu,
> >>> -
> >> BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
> >>> +		if (kvmppc_supports_spe()) {
> >>> +			bool enabled =3D false;
> >>> +
> >>> +#ifndef CONFIG_KVM_BOOKE_HV
> >>> +			if (vcpu->arch.shared->msr & MSR_SPE) {
> >>> +				kvmppc_vcpu_enable_spe(vcpu);
> >>> +				enabled =3D true;
> >>> +			}
> >>> +#endif
> >>
> >> Why the #ifdef? On HV capable systems kvmppc_supports_spe() will =20
> just
> >> always return false.
> >
> > AltiVec and SPE unavailable exceptions follows the same path. While
> > kvmppc_supports_spe() will always return false =20
> kvmppc_supports_altivec()
> > may not.
>=20
> There is no chip that supports SPE and HV at the same time. So we'll =20
> never hit this anyway, since kvmppc_supports_spe() always returns =20
> false on HV capable systems.
>=20
> Just add a comment saying so and remove the ifdef :).

kvmppc_vcpu_enable_spe isn't defined unless CONFIG_SPE is defined.  =20
More seriously, MSR_SPE is the same as MSR_VEC, so we shouldn't =20
interpret it as SPE unless CONFIG_SPE is defined.  And we can't rely on =20
the "if (kvmppc_supports_spe())" here because a later patch changes it =20
to "if (kvmppc_supports_altivec() || kvmppc_supports_spe())".  So I =20
think we still need the ifdef CONFIG_SPE here.

As for the HV ifndef, we should try not to confuse HV/PR with =20
e500mc/e500v2, even if we happen to only run HV on e500mc and PR on =20
e500v2.  We would not want to call kvmppc_vcpu_enable_spe() here on a =20
hypothetical HV target with SPE.  And we *would* want to call =20
kvmppc_vcpu_enable_fp() here on a hypothetical PR target with normal =20
FP.  It's one thing to leave out the latter, since it would involve =20
writing actual code that we have no way to test at this point, but =20
quite another to leave out the proper conditions for when we want to =20
run code that we do have.

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 17:07           ` Alexander Graf
@ 2013-07-03 18:36             ` Scott Wood
  2013-07-03 18:45               ` Alexander Graf
  0 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 18:36 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 12:07:30 PM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:
>=20
> >>>> Do we need to do this even when the guest doesn't use Altivec? =20
> Can't
> >> we
> >>>> just load it on demand then once we fault? This code path really
> >> should
> >>>> only be a prefetch enable when MSR_VEC is already set in guest
> >> context.
> >>>
> >>> No we can't, read 6/6.
> >>
> >> So we have to make sure we're completely unlazy when it comes to a =20
> KVM
> >> guest. Are we?
> >
> > Yes, because MSR[SPV] is under its control.
>=20
> Oh, sure, KVM wants it unlazy. That part is obvious. But does the =20
> kernel always give us unlazyness? The way I read the code, process.c =20
> goes lazy when !CONFIG_SMP.
>=20
> So the big question is why we're manually enforcing FPU giveup, but =20
> not Altivec giveup? One of the 2 probably is wrong :).

Why do you think we're not enforcing it for Altivec?  Is there some =20
specific piece of code you're referring to that is different in this =20
regard?

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
  2013-07-03 13:45   ` Alexander Graf
  2013-07-03 17:18   ` Scott Wood
@ 2013-07-03 18:37   ` Scott Wood
  2013-07-03 18:40     ` Alexander Graf
  2 siblings, 1 reply; 37+ messages in thread
From: Scott Wood @ 2013-07-03 18:37 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> returning to guest instead of each sched in. Without this improvement
> an interrupt may also claim floting point corrupting guest state.
>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
>  arch/powerpc/kvm/booke.c  |    1 +
>  arch/powerpc/kvm/e500mc.c |    2 --
>  2 files changed, 1 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 113961f..3cae2e3 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =20
> struct kvm_vcpu *vcpu,
>  			r =3D (s << 2) | RESUME_HOST | (r & =20
> RESUME_FLAG_NV);
>  		} else {
>  			kvmppc_lazy_ee_enable();
> +			kvmppc_load_guest_fp(vcpu);
>  		}
>  	}
>=20
> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
> index 19c8379..09da1ac 100644
> --- a/arch/powerpc/kvm/e500mc.c
> +++ b/arch/powerpc/kvm/e500mc.c
> @@ -143,8 +143,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, =20
> int cpu)
>  		kvmppc_e500_tlbil_all(vcpu_e500);
>  		__get_cpu_var(last_vcpu_on_cpu) =3D vcpu;
>  	}
> -
> -	kvmppc_load_guest_fp(vcpu);
>  }
>=20
>  void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)

Can we now remove vcpu->fpu_active, and the comment that says "Kernel =20
usage of FP (via
enable_kernel_fp()) in this thread must not occur while =20
vcpu->fpu_active is set."?

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 12:42 ` [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Mihai Caraman
  2013-07-03 15:17   ` Alexander Graf
@ 2013-07-03 18:38   ` Scott Wood
  1 sibling, 0 replies; 37+ messages in thread
From: Scott Wood @ 2013-07-03 18:38 UTC (permalink / raw)
  To: Mihai Caraman; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 07:42:37 AM, Mihai Caraman wrote:
> Add KVM Book3E AltiVec support. KVM Book3E FPU support gracefully =20
> reuse host
> infrastructure so follow the same approach for AltiVec.
>=20
> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> ---
>  arch/powerpc/kvm/booke.c |   72 =20
> ++++++++++++++++++++++++++++++++++++++++++++-
>  1 files changed, 70 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 3cae2e3..c3c3af6 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -98,6 +98,19 @@ static inline bool kvmppc_supports_spe(void)
>  	return false;
>  }
>=20
> +/*
> + * Always returns true is AltiVec unit is present, see
> + * kvmppc_core_check_processor_compat().
> + */
> +static inline bool kvmppc_supports_altivec(void)
> +{
> +#ifdef CONFIG_ALTIVEC
> +		if (cpu_has_feature(CPU_FTR_ALTIVEC))
> +			return true;
> +#endif
> +	return false;
> +}

Whitespace.

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 17:44       ` Scott Wood
@ 2013-07-03 18:39         ` Alexander Graf
  0 siblings, 0 replies; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 18:39 UTC (permalink / raw)
  To: Scott Wood; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 19:44, Scott Wood wrote:

> On 07/03/2013 12:23:16 PM, Alexander Graf wrote:
>> On 03.07.2013, at 19:18, Scott Wood wrote:
>> > On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
>> >> Increase FPU laziness by calling kvmppc_load_guest_fp() just =
before
>> >> returning to guest instead of each sched in. Without this =
improvement
>> >> an interrupt may also claim floting point corrupting guest state.
>> >> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>> >> ---
>> >> arch/powerpc/kvm/booke.c  |    1 +
>> >> arch/powerpc/kvm/e500mc.c |    2 --
>> >> 2 files changed, 1 insertions(+), 2 deletions(-)
>> >> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
>> >> index 113961f..3cae2e3 100644
>> >> --- a/arch/powerpc/kvm/booke.c
>> >> +++ b/arch/powerpc/kvm/booke.c
>> >> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
>> >> 			r =3D (s << 2) | RESUME_HOST | (r & =
RESUME_FLAG_NV);
>> >> 		} else {
>> >> 			kvmppc_lazy_ee_enable();
>> >> +			kvmppc_load_guest_fp(vcpu);
>> >> 		}
>> >
>> > This should go before the kvmppc_lazy_ee_enable().
>> Why? What difference does that make? We're running with interrupts =
disabled here, right?
>=20
> Yes, and we want to minimize the code we run where we have interrupts =
disabled but the lazy ee state says they're enabled.  So =
kvmppc_lazy_ee_enable() should be the last thing we do before entering =
asm code.
>=20
> See http://patchwork.ozlabs.org/patch/249565/

Ah, cool. So we should add a comment saying that this should be the last =
thing before entering asm code then :). That way we make sure nobody =
else repeats the same mistake.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 18:37   ` Scott Wood
@ 2013-07-03 18:40     ` Alexander Graf
  2013-07-04  6:50       ` Caraman Mihai Claudiu-B02008
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 18:40 UTC (permalink / raw)
  To: Scott Wood; +Cc: Mihai Caraman, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 20:37, Scott Wood wrote:

> On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
>> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
>> returning to guest instead of each sched in. Without this improvement
>> an interrupt may also claim floting point corrupting guest state.
>> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
>> ---
>> arch/powerpc/kvm/booke.c  |    1 +
>> arch/powerpc/kvm/e500mc.c |    2 --
>> 2 files changed, 1 insertions(+), 2 deletions(-)
>> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
>> index 113961f..3cae2e3 100644
>> --- a/arch/powerpc/kvm/booke.c
>> +++ b/arch/powerpc/kvm/booke.c
>> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run, =
struct kvm_vcpu *vcpu,
>> 			r =3D (s << 2) | RESUME_HOST | (r & =
RESUME_FLAG_NV);
>> 		} else {
>> 			kvmppc_lazy_ee_enable();
>> +			kvmppc_load_guest_fp(vcpu);
>> 		}
>> 	}
>> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
>> index 19c8379..09da1ac 100644
>> --- a/arch/powerpc/kvm/e500mc.c
>> +++ b/arch/powerpc/kvm/e500mc.c
>> @@ -143,8 +143,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, =
int cpu)
>> 		kvmppc_e500_tlbil_all(vcpu_e500);
>> 		__get_cpu_var(last_vcpu_on_cpu) =3D vcpu;
>> 	}
>> -
>> -	kvmppc_load_guest_fp(vcpu);
>> }
>> void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
>=20
> Can we now remove vcpu->fpu_active, and the comment that says "Kernel =
usage of FP (via
> enable_kernel_fp()) in this thread must not occur while =
vcpu->fpu_active is set."?

I think so, yes.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 18:28         ` Scott Wood
@ 2013-07-03 18:42           ` Alexander Graf
  2013-07-03 18:44             ` Scott Wood
  0 siblings, 1 reply; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 18:42 UTC (permalink / raw)
  To: Scott Wood; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 20:28, Scott Wood wrote:

> On 07/03/2013 10:13:57 AM, Alexander Graf wrote:
>> On 03.07.2013, at 15:53, Caraman Mihai Claudiu-B02008 wrote:
>> >>> -#ifdef CONFIG_SPE
>> >>> 	case BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL: {
>> >>> -		if (vcpu->arch.shared->msr & MSR_SPE)
>> >>> -			kvmppc_vcpu_enable_spe(vcpu);
>> >>> -		else
>> >>> -			kvmppc_booke_queue_irqprio(vcpu,
>> >>> -
>> >> BOOKE_IRQPRIO_SPE_ALTIVEC_UNAVAIL);
>> >>> +		if (kvmppc_supports_spe()) {
>> >>> +			bool enabled =3D false;
>> >>> +
>> >>> +#ifndef CONFIG_KVM_BOOKE_HV
>> >>> +			if (vcpu->arch.shared->msr & MSR_SPE) {
>> >>> +				kvmppc_vcpu_enable_spe(vcpu);
>> >>> +				enabled =3D true;
>> >>> +			}
>> >>> +#endif
>> >>
>> >> Why the #ifdef? On HV capable systems kvmppc_supports_spe() will =
just
>> >> always return false.
>> >
>> > AltiVec and SPE unavailable exceptions follows the same path. While
>> > kvmppc_supports_spe() will always return false =
kvmppc_supports_altivec()
>> > may not.
>> There is no chip that supports SPE and HV at the same time. So we'll =
never hit this anyway, since kvmppc_supports_spe() always returns false =
on HV capable systems.
>> Just add a comment saying so and remove the ifdef :).
>=20
> kvmppc_vcpu_enable_spe isn't defined unless CONFIG_SPE is defined.  =
More seriously, MSR_SPE is the same as MSR_VEC, so we shouldn't =
interpret it as SPE unless CONFIG_SPE is defined.  And we can't rely on =
the "if (kvmppc_supports_spe())" here because a later patch changes it =
to "if (kvmppc_supports_altivec() || kvmppc_supports_spe())".  So I =
think we still need the ifdef CONFIG_SPE here.
>=20
> As for the HV ifndef, we should try not to confuse HV/PR with =
e500mc/e500v2, even if we happen to only run HV on e500mc and PR on =
e500v2.  We would not want to call kvmppc_vcpu_enable_spe() here on a =
hypothetical HV target with SPE.  And we *would* want to call =
kvmppc_vcpu_enable_fp() here on a hypothetical PR target with normal FP. =
 It's one thing to leave out the latter, since it would involve writing =
actual code that we have no way to test at this point, but quite another =
to leave out the proper conditions for when we want to run code that we =
do have.

So we should make this an #ifdef CONFIG_SPE rather than #ifndef =
CONFIG_KVM_BOOKE_HV?


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling
  2013-07-03 18:42           ` Alexander Graf
@ 2013-07-03 18:44             ` Scott Wood
  0 siblings, 0 replies; 37+ messages in thread
From: Scott Wood @ 2013-07-03 18:44 UTC (permalink / raw)
  To: Alexander Graf; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc

On 07/03/2013 01:42:12 PM, Alexander Graf wrote:
>=20
> On 03.07.2013, at 20:28, Scott Wood wrote:
>=20
> > On 07/03/2013 10:13:57 AM, Alexander Graf wrote:
> >> There is no chip that supports SPE and HV at the same time. So =20
> we'll never hit this anyway, since kvmppc_supports_spe() always =20
> returns false on HV capable systems.
> >> Just add a comment saying so and remove the ifdef :).
> >
> > kvmppc_vcpu_enable_spe isn't defined unless CONFIG_SPE is defined.  =20
> More seriously, MSR_SPE is the same as MSR_VEC, so we shouldn't =20
> interpret it as SPE unless CONFIG_SPE is defined.  And we can't rely =20
> on the "if (kvmppc_supports_spe())" here because a later patch =20
> changes it to "if (kvmppc_supports_altivec() || =20
> kvmppc_supports_spe())".  So I think we still need the ifdef =20
> CONFIG_SPE here.
> >
> > As for the HV ifndef, we should try not to confuse HV/PR with =20
> e500mc/e500v2, even if we happen to only run HV on e500mc and PR on =20
> e500v2.  We would not want to call kvmppc_vcpu_enable_spe() here on a =20
> hypothetical HV target with SPE.  And we *would* want to call =20
> kvmppc_vcpu_enable_fp() here on a hypothetical PR target with normal =20
> FP.  It's one thing to leave out the latter, since it would involve =20
> writing actual code that we have no way to test at this point, but =20
> quite another to leave out the proper conditions for when we want to =20
> run code that we do have.
>=20
> So we should make this an #ifdef CONFIG_SPE rather than #ifndef =20
> CONFIG_KVM_BOOKE_HV?

I think it should be "#if !defined(CONFIG_KVM_BOOKE_HV) && =20
defined(CONFIG_SPE)" for the reasons I described in my second paragraph.

-Scott=

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support
  2013-07-03 18:36             ` Scott Wood
@ 2013-07-03 18:45               ` Alexander Graf
  0 siblings, 0 replies; 37+ messages in thread
From: Alexander Graf @ 2013-07-03 18:45 UTC (permalink / raw)
  To: Scott Wood; +Cc: Caraman Mihai Claudiu-B02008, linuxppc-dev, kvm, kvm-ppc


On 03.07.2013, at 20:36, Scott Wood wrote:

> On 07/03/2013 12:07:30 PM, Alexander Graf wrote:
>> On 03.07.2013, at 18:49, Caraman Mihai Claudiu-B02008 wrote:
>> >>>> Do we need to do this even when the guest doesn't use Altivec? =
Can't
>> >> we
>> >>>> just load it on demand then once we fault? This code path really
>> >> should
>> >>>> only be a prefetch enable when MSR_VEC is already set in guest
>> >> context.
>> >>>
>> >>> No we can't, read 6/6.
>> >>
>> >> So we have to make sure we're completely unlazy when it comes to a =
KVM
>> >> guest. Are we?
>> >
>> > Yes, because MSR[SPV] is under its control.
>> Oh, sure, KVM wants it unlazy. That part is obvious. But does the =
kernel always give us unlazyness? The way I read the code, process.c =
goes lazy when !CONFIG_SMP.
>> So the big question is why we're manually enforcing FPU giveup, but =
not Altivec giveup? One of the 2 probably is wrong :).
>=20
> Why do you think we're not enforcing it for Altivec?  Is there some =
specific piece of code you're referring to that is different in this =
regard?

Well, apparently because I misread the code :). All is well.


Alex

^ permalink raw reply	[flat|nested] 37+ messages in thread

* RE: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
  2013-07-03 18:40     ` Alexander Graf
@ 2013-07-04  6:50       ` Caraman Mihai Claudiu-B02008
  0 siblings, 0 replies; 37+ messages in thread
From: Caraman Mihai Claudiu-B02008 @ 2013-07-04  6:50 UTC (permalink / raw)
  To: Alexander Graf, Wood Scott-B07421; +Cc: linuxppc-dev, kvm, kvm-ppc



> -----Original Message-----
> From: kvm-ppc-owner@vger.kernel.org [mailto:kvm-ppc-
> owner@vger.kernel.org] On Behalf Of Alexander Graf
> Sent: Wednesday, July 03, 2013 9:40 PM
> To: Wood Scott-B07421
> Cc: Caraman Mihai Claudiu-B02008; kvm-ppc@vger.kernel.org;
> kvm@vger.kernel.org; linuxppc-dev@lists.ozlabs.org
> Subject: Re: [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness
>=20
>=20
> On 03.07.2013, at 20:37, Scott Wood wrote:
>=20
> > On 07/03/2013 07:42:36 AM, Mihai Caraman wrote:
> >> Increase FPU laziness by calling kvmppc_load_guest_fp() just before
> >> returning to guest instead of each sched in. Without this improvement
> >> an interrupt may also claim floting point corrupting guest state.
> >> Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
> >> ---
> >> arch/powerpc/kvm/booke.c  |    1 +
> >> arch/powerpc/kvm/e500mc.c |    2 --
> >> 2 files changed, 1 insertions(+), 2 deletions(-)
> >> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> >> index 113961f..3cae2e3 100644
> >> --- a/arch/powerpc/kvm/booke.c
> >> +++ b/arch/powerpc/kvm/booke.c
> >> @@ -1204,6 +1204,7 @@ int kvmppc_handle_exit(struct kvm_run *run,
> struct kvm_vcpu *vcpu,
> >> 			r =3D (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
> >> 		} else {
> >> 			kvmppc_lazy_ee_enable();
> >> +			kvmppc_load_guest_fp(vcpu);
> >> 		}
> >> 	}
> >> diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
> >> index 19c8379..09da1ac 100644
> >> --- a/arch/powerpc/kvm/e500mc.c
> >> +++ b/arch/powerpc/kvm/e500mc.c
> >> @@ -143,8 +143,6 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu,
> int cpu)
> >> 		kvmppc_e500_tlbil_all(vcpu_e500);
> >> 		__get_cpu_var(last_vcpu_on_cpu) =3D vcpu;
> >> 	}
> >> -
> >> -	kvmppc_load_guest_fp(vcpu);
> >> }
> >> void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
> >
> > Can we now remove vcpu->fpu_active, and the comment that says "Kernel
> usage of FP (via
> > enable_kernel_fp()) in this thread must not occur while vcpu-
> >fpu_active is set."?
>=20
> I think so, yes.

Yes, as I already did this for AltiVec.

-Mike

^ permalink raw reply	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2013-07-04  6:51 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-03 12:42 [PATCH 0/6] KVM: PPC: Book3E: AltiVec support Mihai Caraman
2013-07-03 12:42 ` [PATCH 1/6] KVM: PPC: Book3E: Use common defines for SPE/FP/AltiVec int numbers Mihai Caraman
2013-07-03 12:42 ` [PATCH 2/6] KVM: PPC: Book3E: Refactor SPE/FP exit handling Mihai Caraman
2013-07-03 13:30   ` Alexander Graf
2013-07-03 13:53     ` Caraman Mihai Claudiu-B02008
2013-07-03 15:13       ` Alexander Graf
2013-07-03 18:28         ` Scott Wood
2013-07-03 18:42           ` Alexander Graf
2013-07-03 18:44             ` Scott Wood
2013-07-03 12:42 ` [PATCH 3/6] KVM: PPC: Book3E: Increase FPU laziness Mihai Caraman
2013-07-03 13:45   ` Alexander Graf
2013-07-03 13:55     ` Caraman Mihai Claudiu-B02008
2013-07-03 15:11       ` Alexander Graf
2013-07-03 15:41         ` Caraman Mihai Claudiu-B02008
2013-07-03 16:59           ` Alexander Graf
2013-07-03 17:17             ` Scott Wood
2013-07-03 17:22               ` Alexander Graf
2013-07-03 17:07         ` Scott Wood
2013-07-03 17:08           ` Alexander Graf
2013-07-03 17:18   ` Scott Wood
2013-07-03 17:23     ` Alexander Graf
2013-07-03 17:44       ` Scott Wood
2013-07-03 18:39         ` Alexander Graf
2013-07-03 18:37   ` Scott Wood
2013-07-03 18:40     ` Alexander Graf
2013-07-04  6:50       ` Caraman Mihai Claudiu-B02008
2013-07-03 12:42 ` [PATCH 4/6] KVM: PPC: Book3E: Add AltiVec support Mihai Caraman
2013-07-03 15:17   ` Alexander Graf
2013-07-03 16:09     ` Caraman Mihai Claudiu-B02008
2013-07-03 16:43       ` Alexander Graf
2013-07-03 16:49         ` Caraman Mihai Claudiu-B02008
2013-07-03 17:07           ` Alexander Graf
2013-07-03 18:36             ` Scott Wood
2013-07-03 18:45               ` Alexander Graf
2013-07-03 18:38   ` Scott Wood
2013-07-03 12:42 ` [PATCH 5/6] KVM: PPC: Book3E: Add ONE_REG " Mihai Caraman
2013-07-03 12:42 ` [PATCH 6/6] KVM: PPC: Book3E: Enable e6500 core Mihai Caraman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).