All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms
@ 2022-07-11  3:49 Kajol Jain
  2022-07-11  3:49 ` [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem Kajol Jain
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Kajol Jain @ 2022-07-11  3:49 UTC (permalink / raw)
  To: mpe, linuxppc-dev, npiggin; +Cc: kjain, atrajeev, maddy, disgoel, rnsastry

File book3s_hv_p9_entry.c in powerpc/kvm folder consists of functions
like freeze_pmu, switch_pmu_to_guest and switch_pmu_to_host which are
specific to Performance Monitoring Unit(PMU) for power9 and later
platforms.

For better maintenance, moving pmu related code from
book3s_hv_p9_entry.c to a new file called book3s_hv_p9_perf.c,
without any logic change.
Also make corresponding changes in the Makefile to include
book3s_hv_p9_perf.c during compilation.

Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
---
 arch/powerpc/kvm/Makefile             |   1 +
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 221 -------------------------
 arch/powerpc/kvm/book3s_hv_p9_perf.c  | 225 ++++++++++++++++++++++++++
 3 files changed, 226 insertions(+), 221 deletions(-)
 create mode 100644 arch/powerpc/kvm/book3s_hv_p9_perf.c

diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 0cd23ce07d68..5319d889b184 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -86,6 +86,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
 	book3s_hv_rm_mmu.o \
 	book3s_hv_ras.o \
 	book3s_hv_builtin.o \
+	book3s_hv_p9_perf.o \
 	$(kvm-book3s_64-builtin-tm-objs-y) \
 	$(kvm-book3s_64-builtin-xics-objs-y)
 endif
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 112a09b33328..34d81898bf47 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -3,231 +3,10 @@
 #include <linux/kvm_host.h>
 #include <asm/asm-prototypes.h>
 #include <asm/dbell.h>
-#include <asm/kvm_ppc.h>
-#include <asm/pmc.h>
 #include <asm/ppc-opcode.h>
 
 #include "book3s_hv.h"
 
-static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
-{
-	if (!(mmcr0 & MMCR0_FC))
-		goto do_freeze;
-	if (mmcra & MMCRA_SAMPLE_ENABLE)
-		goto do_freeze;
-	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-		if (!(mmcr0 & MMCR0_PMCCEXT))
-			goto do_freeze;
-		if (!(mmcra & MMCRA_BHRB_DISABLE))
-			goto do_freeze;
-	}
-	return;
-
-do_freeze:
-	mmcr0 = MMCR0_FC;
-	mmcra = 0;
-	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-		mmcr0 |= MMCR0_PMCCEXT;
-		mmcra = MMCRA_BHRB_DISABLE;
-	}
-
-	mtspr(SPRN_MMCR0, mmcr0);
-	mtspr(SPRN_MMCRA, mmcra);
-	isync();
-}
-
-void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
-			 struct p9_host_os_sprs *host_os_sprs)
-{
-	struct lppaca *lp;
-	int load_pmu = 1;
-
-	lp = vcpu->arch.vpa.pinned_addr;
-	if (lp)
-		load_pmu = lp->pmcregs_in_use;
-
-	/* Save host */
-	if (ppc_get_pmu_inuse()) {
-		/*
-		 * It might be better to put PMU handling (at least for the
-		 * host) in the perf subsystem because it knows more about what
-		 * is being used.
-		 */
-
-		/* POWER9, POWER10 do not implement HPMC or SPMC */
-
-		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
-		host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
-
-		freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
-
-		host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
-		host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
-		host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
-		host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
-		host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
-		host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
-		host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
-		host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
-		host_os_sprs->sdar = mfspr(SPRN_SDAR);
-		host_os_sprs->siar = mfspr(SPRN_SIAR);
-		host_os_sprs->sier1 = mfspr(SPRN_SIER);
-
-		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-			host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
-			host_os_sprs->sier2 = mfspr(SPRN_SIER2);
-			host_os_sprs->sier3 = mfspr(SPRN_SIER3);
-		}
-	}
-
-#ifdef CONFIG_PPC_PSERIES
-	/* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
-	if (kvmhv_on_pseries()) {
-		barrier();
-		get_lppaca()->pmcregs_in_use = load_pmu;
-		barrier();
-	}
-#endif
-
-	/*
-	 * Load guest. If the VPA said the PMCs are not in use but the guest
-	 * tried to access them anyway, HFSCR[PM] will be set by the HFAC
-	 * fault so we can make forward progress.
-	 */
-	if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
-		mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
-		mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
-		mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
-		mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
-		mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
-		mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
-		mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
-		mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
-		mtspr(SPRN_SDAR, vcpu->arch.sdar);
-		mtspr(SPRN_SIAR, vcpu->arch.siar);
-		mtspr(SPRN_SIER, vcpu->arch.sier[0]);
-
-		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-			mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
-			mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
-			mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
-		}
-
-		/* Set MMCRA then MMCR0 last */
-		mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
-		mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
-		/* No isync necessary because we're starting counters */
-
-		if (!vcpu->arch.nested &&
-				(vcpu->arch.hfscr_permitted & HFSCR_PM))
-			vcpu->arch.hfscr |= HFSCR_PM;
-	}
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
-
-void switch_pmu_to_host(struct kvm_vcpu *vcpu,
-			struct p9_host_os_sprs *host_os_sprs)
-{
-	struct lppaca *lp;
-	int save_pmu = 1;
-
-	lp = vcpu->arch.vpa.pinned_addr;
-	if (lp)
-		save_pmu = lp->pmcregs_in_use;
-	if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
-		/*
-		 * Save pmu if this guest is capable of running nested guests.
-		 * This is option is for old L1s that do not set their
-		 * lppaca->pmcregs_in_use properly when entering their L2.
-		 */
-		save_pmu |= nesting_enabled(vcpu->kvm);
-	}
-
-	if (save_pmu) {
-		vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
-		vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
-
-		freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
-
-		vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
-		vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
-		vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
-		vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
-		vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
-		vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
-		vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
-		vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
-		vcpu->arch.sdar = mfspr(SPRN_SDAR);
-		vcpu->arch.siar = mfspr(SPRN_SIAR);
-		vcpu->arch.sier[0] = mfspr(SPRN_SIER);
-
-		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-			vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
-			vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
-			vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
-		}
-
-	} else if (vcpu->arch.hfscr & HFSCR_PM) {
-		/*
-		 * The guest accessed PMC SPRs without specifying they should
-		 * be preserved, or it cleared pmcregs_in_use after the last
-		 * access. Just ensure they are frozen.
-		 */
-		freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
-
-		/*
-		 * Demand-fault PMU register access in the guest.
-		 *
-		 * This is used to grab the guest's VPA pmcregs_in_use value
-		 * and reflect it into the host's VPA in the case of a nested
-		 * hypervisor.
-		 *
-		 * It also avoids having to zero-out SPRs after each guest
-		 * exit to avoid side-channels when.
-		 *
-		 * This is cleared here when we exit the guest, so later HFSCR
-		 * interrupt handling can add it back to run the guest with
-		 * PM enabled next time.
-		 */
-		if (!vcpu->arch.nested)
-			vcpu->arch.hfscr &= ~HFSCR_PM;
-	} /* otherwise the PMU should still be frozen */
-
-#ifdef CONFIG_PPC_PSERIES
-	if (kvmhv_on_pseries()) {
-		barrier();
-		get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
-		barrier();
-	}
-#endif
-
-	if (ppc_get_pmu_inuse()) {
-		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
-		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
-		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
-		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
-		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
-		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
-		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
-		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
-		mtspr(SPRN_SDAR, host_os_sprs->sdar);
-		mtspr(SPRN_SIAR, host_os_sprs->siar);
-		mtspr(SPRN_SIER, host_os_sprs->sier1);
-
-		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
-			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
-			mtspr(SPRN_SIER2, host_os_sprs->sier2);
-			mtspr(SPRN_SIER3, host_os_sprs->sier3);
-		}
-
-		/* Set MMCRA then MMCR0 last */
-		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
-		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
-		isync();
-	}
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_host);
-
 static void load_spr_state(struct kvm_vcpu *vcpu,
 				struct p9_host_os_sprs *host_os_sprs)
 {
diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
new file mode 100644
index 000000000000..da3135cab9ea
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/kvm_ppc.h>
+#include <asm/pmc.h>
+
+#include "book3s_hv.h"
+
+static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
+{
+	if (!(mmcr0 & MMCR0_FC))
+		goto do_freeze;
+	if (mmcra & MMCRA_SAMPLE_ENABLE)
+		goto do_freeze;
+	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+		if (!(mmcr0 & MMCR0_PMCCEXT))
+			goto do_freeze;
+		if (!(mmcra & MMCRA_BHRB_DISABLE))
+			goto do_freeze;
+	}
+	return;
+
+do_freeze:
+	mmcr0 = MMCR0_FC;
+	mmcra = 0;
+	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+		mmcr0 |= MMCR0_PMCCEXT;
+		mmcra = MMCRA_BHRB_DISABLE;
+	}
+
+	mtspr(SPRN_MMCR0, mmcr0);
+	mtspr(SPRN_MMCRA, mmcra);
+	isync();
+}
+
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+			 struct p9_host_os_sprs *host_os_sprs)
+{
+	struct lppaca *lp;
+	int load_pmu = 1;
+
+	lp = vcpu->arch.vpa.pinned_addr;
+	if (lp)
+		load_pmu = lp->pmcregs_in_use;
+
+	/* Save host */
+	if (ppc_get_pmu_inuse()) {
+		/*
+		 * It might be better to put PMU handling (at least for the
+		 * host) in the perf subsystem because it knows more about what
+		 * is being used.
+		 */
+
+		/* POWER9, POWER10 do not implement HPMC or SPMC */
+
+		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
+		host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
+
+		freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
+
+		host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
+		host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
+		host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
+		host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
+		host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
+		host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
+		host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
+		host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
+		host_os_sprs->sdar = mfspr(SPRN_SDAR);
+		host_os_sprs->siar = mfspr(SPRN_SIAR);
+		host_os_sprs->sier1 = mfspr(SPRN_SIER);
+
+		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+			host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
+			host_os_sprs->sier2 = mfspr(SPRN_SIER2);
+			host_os_sprs->sier3 = mfspr(SPRN_SIER3);
+		}
+	}
+
+#ifdef CONFIG_PPC_PSERIES
+	/* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
+	if (kvmhv_on_pseries()) {
+		barrier();
+		get_lppaca()->pmcregs_in_use = load_pmu;
+		barrier();
+	}
+#endif
+
+	/*
+	 * Load guest. If the VPA said the PMCs are not in use but the guest
+	 * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+	 * fault so we can make forward progress.
+	 */
+	if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+		mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+		mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+		mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+		mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+		mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+		mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+		mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+		mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+		mtspr(SPRN_SDAR, vcpu->arch.sdar);
+		mtspr(SPRN_SIAR, vcpu->arch.siar);
+		mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+			mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+			mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+			mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+		}
+
+		/* Set MMCRA then MMCR0 last */
+		mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+		mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+		/* No isync necessary because we're starting counters */
+
+		if (!vcpu->arch.nested &&
+		    (vcpu->arch.hfscr_permitted & HFSCR_PM))
+			vcpu->arch.hfscr |= HFSCR_PM;
+	}
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
+
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+			struct p9_host_os_sprs *host_os_sprs)
+{
+	struct lppaca *lp;
+	int save_pmu = 1;
+
+	lp = vcpu->arch.vpa.pinned_addr;
+	if (lp)
+		save_pmu = lp->pmcregs_in_use;
+	if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
+		/*
+		 * Save pmu if this guest is capable of running nested guests.
+		 * This is option is for old L1s that do not set their
+		 * lppaca->pmcregs_in_use properly when entering their L2.
+		 */
+		save_pmu |= nesting_enabled(vcpu->kvm);
+	}
+
+	if (save_pmu) {
+		vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
+		vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
+
+		freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
+
+		vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
+		vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
+		vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
+		vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
+		vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
+		vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
+		vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
+		vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
+		vcpu->arch.sdar = mfspr(SPRN_SDAR);
+		vcpu->arch.siar = mfspr(SPRN_SIAR);
+		vcpu->arch.sier[0] = mfspr(SPRN_SIER);
+
+		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+			vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
+			vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
+			vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
+		}
+
+	} else if (vcpu->arch.hfscr & HFSCR_PM) {
+		/*
+		 * The guest accessed PMC SPRs without specifying they should
+		 * be preserved, or it cleared pmcregs_in_use after the last
+		 * access. Just ensure they are frozen.
+		 */
+		freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
+
+		/*
+		 * Demand-fault PMU register access in the guest.
+		 *
+		 * This is used to grab the guest's VPA pmcregs_in_use value
+		 * and reflect it into the host's VPA in the case of a nested
+		 * hypervisor.
+		 *
+		 * It also avoids having to zero-out SPRs after each guest
+		 * exit to avoid side-channels when.
+		 *
+		 * This is cleared here when we exit the guest, so later HFSCR
+		 * interrupt handling can add it back to run the guest with
+		 * PM enabled next time.
+		 */
+		if (!vcpu->arch.nested)
+			vcpu->arch.hfscr &= ~HFSCR_PM;
+	} /* otherwise the PMU should still be frozen */
+
+#ifdef CONFIG_PPC_PSERIES
+	if (kvmhv_on_pseries()) {
+		barrier();
+		get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+		barrier();
+	}
+#endif
+
+	if (ppc_get_pmu_inuse()) {
+		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
+		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
+		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
+		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
+		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
+		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
+		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
+		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
+		mtspr(SPRN_SDAR, host_os_sprs->sdar);
+		mtspr(SPRN_SIAR, host_os_sprs->siar);
+		mtspr(SPRN_SIER, host_os_sprs->sier1);
+
+		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
+			mtspr(SPRN_SIER2, host_os_sprs->sier2);
+			mtspr(SPRN_SIER3, host_os_sprs->sier3);
+		}
+
+		/* Set MMCRA then MMCR0 last */
+		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
+		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
+		isync();
+	}
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_host);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem
  2022-07-11  3:49 [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Kajol Jain
@ 2022-07-11  3:49 ` Kajol Jain
  2022-07-13  5:43   ` Nicholas Piggin
  2022-07-13  5:41 ` [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Nicholas Piggin
  2022-08-02 11:01 ` Michael Ellerman
  2 siblings, 1 reply; 6+ messages in thread
From: Kajol Jain @ 2022-07-11  3:49 UTC (permalink / raw)
  To: mpe, linuxppc-dev, npiggin; +Cc: kjain, atrajeev, maddy, disgoel, rnsastry

Commit aabcaf6ae2a0 ("KVM: PPC: Book3S HV P9: Move host OS save/restore
functions to  built-in") added a comment in switch_pmu_to_guest
function, indicating possibility of moving PMU handling code
to perf subsystem. But perf subsystem code compilation depends upon
the enablement of CONFIG_PERF_EVENTS whereas, kvm code don't have
any dependency on this config.
Patch remove this comment as switch_pmu_to_guest functionality is
needed even if perf subsystem is disabled.

Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_p9_perf.c | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
index da3135cab9ea..44d24cca3df1 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_perf.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
@@ -44,12 +44,6 @@ void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
 
 	/* Save host */
 	if (ppc_get_pmu_inuse()) {
-		/*
-		 * It might be better to put PMU handling (at least for the
-		 * host) in the perf subsystem because it knows more about what
-		 * is being used.
-		 */
-
 		/* POWER9, POWER10 do not implement HPMC or SPMC */
 
 		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms
  2022-07-11  3:49 [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Kajol Jain
  2022-07-11  3:49 ` [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem Kajol Jain
@ 2022-07-13  5:41 ` Nicholas Piggin
  2022-07-14  7:54   ` Madhavan Srinivasan
  2022-08-02 11:01 ` Michael Ellerman
  2 siblings, 1 reply; 6+ messages in thread
From: Nicholas Piggin @ 2022-07-13  5:41 UTC (permalink / raw)
  To: Kajol Jain, linuxppc-dev, mpe; +Cc: atrajeev, maddy, disgoel, rnsastry

Excerpts from Kajol Jain's message of July 11, 2022 1:49 pm:
> File book3s_hv_p9_entry.c in powerpc/kvm folder consists of functions
> like freeze_pmu, switch_pmu_to_guest and switch_pmu_to_host which are
> specific to Performance Monitoring Unit(PMU) for power9 and later
> platforms.
> 
> For better maintenance, moving pmu related code from
> book3s_hv_p9_entry.c to a new file called book3s_hv_p9_perf.c,
> without any logic change.
> Also make corresponding changes in the Makefile to include
> book3s_hv_p9_perf.c during compilation.


> +
> +	if (ppc_get_pmu_inuse()) {
> +		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
> +		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
> +		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
> +		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
> +		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
> +		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
> +		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
> +		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
> +		mtspr(SPRN_SDAR, host_os_sprs->sdar);
> +		mtspr(SPRN_SIAR, host_os_sprs->siar);
> +		mtspr(SPRN_SIER, host_os_sprs->sier1);
> +
> +		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
> +			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
> +			mtspr(SPRN_SIER2, host_os_sprs->sier2);
> +			mtspr(SPRN_SIER3, host_os_sprs->sier3);
> +		}
> +
> +		/* Set MMCRA then MMCR0 last */
> +		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
> +		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
> +		isync();
> +	}
> +}
> +EXPORT_SYMBOL_GPL(switch_pmu_to_host);
> 

I'm still thinking these parts of the code in particular that do the 
host PMU save/restore could be handled by calls into perf subsystem.  In 
some cases it doesn't need to save SPRs because it can recreate them or 
is not using them. Maybe it's not so simple.

Either way, I'm fine with this move to stat with.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem
  2022-07-11  3:49 ` [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem Kajol Jain
@ 2022-07-13  5:43   ` Nicholas Piggin
  0 siblings, 0 replies; 6+ messages in thread
From: Nicholas Piggin @ 2022-07-13  5:43 UTC (permalink / raw)
  To: Kajol Jain, linuxppc-dev, mpe; +Cc: atrajeev, maddy, disgoel, rnsastry

Excerpts from Kajol Jain's message of July 11, 2022 1:49 pm:
> Commit aabcaf6ae2a0 ("KVM: PPC: Book3S HV P9: Move host OS save/restore
> functions to  built-in") added a comment in switch_pmu_to_guest
> function, indicating possibility of moving PMU handling code
> to perf subsystem. But perf subsystem code compilation depends upon
> the enablement of CONFIG_PERF_EVENTS whereas, kvm code don't have
> any dependency on this config.
> Patch remove this comment as switch_pmu_to_guest functionality is
> needed even if perf subsystem is disabled.
> 
> Signed-off-by: Kajol Jain <kjain@linux.ibm.com>

Does the host PMU state need to be saved/restored if we don't have
PERF_EVENTS enabled? Guest yes, but host maybe could become a no-op?

Thanks,
Nick

> ---
>  arch/powerpc/kvm/book3s_hv_p9_perf.c | 6 ------
>  1 file changed, 6 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
> index da3135cab9ea..44d24cca3df1 100644
> --- a/arch/powerpc/kvm/book3s_hv_p9_perf.c
> +++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
> @@ -44,12 +44,6 @@ void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
>  
>  	/* Save host */
>  	if (ppc_get_pmu_inuse()) {
> -		/*
> -		 * It might be better to put PMU handling (at least for the
> -		 * host) in the perf subsystem because it knows more about what
> -		 * is being used.
> -		 */
> -
>  		/* POWER9, POWER10 do not implement HPMC or SPMC */
>  
>  		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
> -- 
> 2.27.0
> 
> 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms
  2022-07-13  5:41 ` [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Nicholas Piggin
@ 2022-07-14  7:54   ` Madhavan Srinivasan
  0 siblings, 0 replies; 6+ messages in thread
From: Madhavan Srinivasan @ 2022-07-14  7:54 UTC (permalink / raw)
  To: Nicholas Piggin, Kajol Jain, linuxppc-dev, mpe
  Cc: atrajeev, disgoel, rnsastry


On 7/13/22 11:11 AM, Nicholas Piggin wrote:
> Excerpts from Kajol Jain's message of July 11, 2022 1:49 pm:
>> File book3s_hv_p9_entry.c in powerpc/kvm folder consists of functions
>> like freeze_pmu, switch_pmu_to_guest and switch_pmu_to_host which are
>> specific to Performance Monitoring Unit(PMU) for power9 and later
>> platforms.
>>
>> For better maintenance, moving pmu related code from
>> book3s_hv_p9_entry.c to a new file called book3s_hv_p9_perf.c,
>> without any logic change.
>> Also make corresponding changes in the Makefile to include
>> book3s_hv_p9_perf.c during compilation.
>
>> +
>> +	if (ppc_get_pmu_inuse()) {
>> +		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
>> +		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
>> +		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
>> +		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
>> +		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
>> +		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
>> +		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
>> +		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
>> +		mtspr(SPRN_SDAR, host_os_sprs->sdar);
>> +		mtspr(SPRN_SIAR, host_os_sprs->siar);
>> +		mtspr(SPRN_SIER, host_os_sprs->sier1);
>> +
>> +		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
>> +			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
>> +			mtspr(SPRN_SIER2, host_os_sprs->sier2);
>> +			mtspr(SPRN_SIER3, host_os_sprs->sier3);
>> +		}
>> +
>> +		/* Set MMCRA then MMCR0 last */
>> +		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
>> +		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
>> +		isync();
>> +	}
>> +}
>> +EXPORT_SYMBOL_GPL(switch_pmu_to_host);
>>
> I'm still thinking these parts of the code in particular that do the
> host PMU save/restore could be handled by calls into perf subsystem.  In
> some cases it doesn't need to save SPRs because it can recreate them or
> is not using them. Maybe it's not so simple.
Yes, we looked at this. Concern for me is counter data leak.
Host application will still have read access to these SPRs
in power9 and before.So I would recommend to save/restore host values as 
part of it.

Maddy
>
> Either way, I'm fine with this move to stat with.
>
> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms
  2022-07-11  3:49 [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Kajol Jain
  2022-07-11  3:49 ` [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem Kajol Jain
  2022-07-13  5:41 ` [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Nicholas Piggin
@ 2022-08-02 11:01 ` Michael Ellerman
  2 siblings, 0 replies; 6+ messages in thread
From: Michael Ellerman @ 2022-08-02 11:01 UTC (permalink / raw)
  To: linuxppc-dev, Kajol Jain, npiggin, mpe; +Cc: atrajeev, maddy, disgoel, rnsastry

On Mon, 11 Jul 2022 09:19:26 +0530, Kajol Jain wrote:
> File book3s_hv_p9_entry.c in powerpc/kvm folder consists of functions
> like freeze_pmu, switch_pmu_to_guest and switch_pmu_to_host which are
> specific to Performance Monitoring Unit(PMU) for power9 and later
> platforms.
> 
> For better maintenance, moving pmu related code from
> book3s_hv_p9_entry.c to a new file called book3s_hv_p9_perf.c,
> without any logic change.
> Also make corresponding changes in the Makefile to include
> book3s_hv_p9_perf.c during compilation.
> 
> [...]

Applied to powerpc/topic/ppc-kvm.

[1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms
      https://git.kernel.org/powerpc/c/db5360840f09eded71009a981084ab10a93a08d8
[2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem
      https://git.kernel.org/powerpc/c/4008d54e29531813e800580f8309133b9b14a921

cheers

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-08-02 11:02 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-11  3:49 [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Kajol Jain
2022-07-11  3:49 ` [PATCH 2/2] powerpc/kvm: Remove comment related to moving PMU code to perf subsystem Kajol Jain
2022-07-13  5:43   ` Nicholas Piggin
2022-07-13  5:41 ` [PATCH 1/2] powerpc/kvm: Move pmu code in kvm folder to separate file for power9 and later platforms Nicholas Piggin
2022-07-14  7:54   ` Madhavan Srinivasan
2022-08-02 11:01 ` Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.