* [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
@ 2019-12-18 4:30 ` Sukadev Bhattiprolu
0 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 4:30 UTC (permalink / raw)
To: Michael Ellerman, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
Ultravisor disables some CPU features like EBB and BHRB in the HFSCR
for secure virtual machines (SVMs). If the SVMs attempt to access
related registers, they will get a Program Interrupt.
Use macros/wrappers to skip accessing EBB and BHRB registers in secure
VMs.
Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
---
---
arch/powerpc/include/asm/reg.h | 35 ++++++++++++++++++
arch/powerpc/kernel/process.c | 12 +++----
arch/powerpc/kvm/book3s_hv.c | 24 ++++++-------
arch/powerpc/kvm/book3s_hv_rmhandlers.S | 48 ++++++++++++++++++-------
arch/powerpc/kvm/book3s_hv_tm_builtin.c | 6 ++--
arch/powerpc/perf/core-book3s.c | 5 +--
arch/powerpc/xmon/xmon.c | 2 +-
7 files changed, 96 insertions(+), 36 deletions(-)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index b3cbb1136bce..026eb20f6d13 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1379,6 +1379,41 @@ static inline void msr_check_and_clear(unsigned long bits)
__msr_check_and_clear(bits);
}
+#ifdef CONFIG_PPC_SVM
+/*
+ * Move from some "restricted" sprs.
+ * Secure VMs should not access some registers as the related features
+ * are disabled in the CPU. If an SVM is attempting read from the given
+ * SPR, return 0. Otherwise behave like a normal mfspr.
+ */
+#define mfspr_r(rn) \
+({ \
+ unsigned long rval = 0ULL; \
+ \
+ if (!(mfmsr() & MSR_S)) \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); \
+ rval; \
+})
+
+/*
+ * Move to some "restricted" sprs.
+ * Secure VMs should not access some registers as the related features
+ * are disabled in the CPU. If an SVM is attempting write to the given
+ * SPR, ignore the write. Otherwise behave like a normal mtspr.
+ */
+#define mtspr_r(rn, v) \
+({ \
+ if (!(mfmsr() & MSR_S)) \
+ asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory"); \
+})
+#else
+#define mfspr_r mfspr
+#define mtspr_r mtspr
+#endif
+
#ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 639ceae7da9d..9a691452ea3b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1059,9 +1059,9 @@ static inline void save_sprs(struct thread_struct *t)
t->dscr = mfspr(SPRN_DSCR);
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- t->bescr = mfspr(SPRN_BESCR);
- t->ebbhr = mfspr(SPRN_EBBHR);
- t->ebbrr = mfspr(SPRN_EBBRR);
+ t->bescr = mfspr_r(SPRN_BESCR);
+ t->ebbhr = mfspr_r(SPRN_EBBHR);
+ t->ebbrr = mfspr_r(SPRN_EBBRR);
t->fscr = mfspr(SPRN_FSCR);
@@ -1098,11 +1098,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
if (old_thread->bescr != new_thread->bescr)
- mtspr(SPRN_BESCR, new_thread->bescr);
+ mtspr_r(SPRN_BESCR, new_thread->bescr);
if (old_thread->ebbhr != new_thread->ebbhr)
- mtspr(SPRN_EBBHR, new_thread->ebbhr);
+ mtspr_r(SPRN_EBBHR, new_thread->ebbhr);
if (old_thread->ebbrr != new_thread->ebbrr)
- mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ mtspr_r(SPRN_EBBRR, new_thread->ebbrr);
if (old_thread->fscr != new_thread->fscr)
mtspr(SPRN_FSCR, new_thread->fscr);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 709cf1fd4cf4..dba21b0e1d22 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3568,9 +3568,9 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PSPB, vcpu->arch.pspb);
mtspr(SPRN_FSCR, vcpu->arch.fscr);
mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr_r(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr_r(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr_r(SPRN_BESCR, vcpu->arch.bescr);
mtspr(SPRN_WORT, vcpu->arch.wort);
mtspr(SPRN_TIDR, vcpu->arch.tid);
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
@@ -3641,9 +3641,9 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.pspb = mfspr(SPRN_PSPB);
vcpu->arch.fscr = mfspr(SPRN_FSCR);
vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.ebbhr = mfspr_r(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr_r(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr_r(SPRN_BESCR);
vcpu->arch.wort = mfspr(SPRN_WORT);
vcpu->arch.tid = mfspr(SPRN_TIDR);
vcpu->arch.amr = mfspr(SPRN_AMR);
@@ -4272,9 +4272,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- ebb_regs[0] = mfspr(SPRN_EBBHR);
- ebb_regs[1] = mfspr(SPRN_EBBRR);
- ebb_regs[2] = mfspr(SPRN_BESCR);
+ ebb_regs[0] = mfspr_r(SPRN_EBBHR);
+ ebb_regs[1] = mfspr_r(SPRN_EBBRR);
+ ebb_regs[2] = mfspr_r(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
@@ -4320,9 +4320,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- mtspr(SPRN_EBBHR, ebb_regs[0]);
- mtspr(SPRN_EBBRR, ebb_regs[1]);
- mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr_r(SPRN_EBBHR, ebb_regs[0]);
+ mtspr_r(SPRN_EBBRR, ebb_regs[1]);
+ mtspr_r(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index faebcbb8c4db..8e305ca04dc5 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,15 +810,27 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4)
- ld r8, VCPU_EBBHR(r4)
mtspr SPRN_IC, r5
- mtspr SPRN_EBBHR, r8
- ld r5, VCPU_EBBRR(r4)
- ld r6, VCPU_BESCR(r4)
+
+ /* EBB and TM are disabled for secure VMs so skip them */
+ mfmsr r8
+ andis. r8, r8, MSR_S@high
+ bne clear_ebb0
+ ld r5, VCPU_EBBHR(r4)
+ ld r6, VCPU_EBBRR(r4)
+ ld r7, VCPU_BESCR(r4)
+ b store_ebb0
+clear_ebb0:
+ li r5, 0
+ li r6, 0
+ li r7, 0
+store_ebb0:
+ mtspr SPRN_EBBHR, r5
+ mtspr SPRN_EBBRR, r6
+ mtspr SPRN_BESCR, r7
+
lwz r7, VCPU_GUEST_PID(r4)
ld r8, VCPU_WORT(r4)
- mtspr SPRN_EBBRR, r5
- mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
@@ -1615,14 +1627,26 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9)
std r7, VCPU_TAR(r9)
- mfspr r8, SPRN_EBBHR
- std r8, VCPU_EBBHR(r9)
- mfspr r5, SPRN_EBBRR
- mfspr r6, SPRN_BESCR
+
+ /* EBB and TM are disabled for secure VMs so skip them */
+ mfmsr r8
+ andis. r8, r8, MSR_S@high
+ bne clear_ebb1
+ mfspr r5, SPRN_EBBHR
+ mfspr r6, SPRN_EBBRR
+ mfspr r7, SPRN_BESCR
+ b store_ebb1
+clear_ebb1:
+ li r5, 0
+ li r6, 0
+ li r7, 0
+store_ebb1:
+ std r5, VCPU_EBBHR(r9)
+ std r6, VCPU_EBBRR(r9)
+ std r7, VCPU_BESCR(r9)
+
mfspr r7, SPRN_PID
mfspr r8, SPRN_WORT
- std r5, VCPU_EBBRR(r9)
- std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
BEGIN_FTR_SECTION
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index 217246279dfa..bc3071c0a436 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -45,18 +45,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr & HFSCR_EBB) ||
((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB)))
return 0;
- bescr = mfspr(SPRN_BESCR);
+ bescr = mfspr_r(SPRN_BESCR);
/* expect to see a S->T transition requested */
if (((bescr >> 30) & 3) != 2)
return 0;
bescr &= ~BESCR_GE;
if (instr & (1 << 11))
bescr |= BESCR_GE;
- mtspr(SPRN_BESCR, bescr);
+ mtspr_r(SPRN_BESCR, bescr);
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
- vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
+ vcpu->arch.regs.nip = mfspr_r(SPRN_EBBRR);
return 1;
case PPC_INST_MTMSRD:
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index ca92e01d0bd1..4e76b2251801 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -16,6 +16,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
+#include <asm/svm.h>
#include <asm/code-patching.h>
#ifdef CONFIG_PPC64
@@ -846,9 +847,9 @@ void perf_event_print_debug(void)
if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
- mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
+ mfspr(SPRN_MMCR2), mfspr_r(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
- mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+ mfspr_r(SPRN_EBBRR), mfspr_r(SPRN_BESCR));
}
#endif
pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d83364ebc5c5..4dae0537f85a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1871,7 +1871,7 @@ static void dump_207_sprs(void)
printf("sdar = %.16lx sier = %.16lx pmc6 = %.8lx\n",
mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n",
- mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+ mfspr_r(SPRN_EBBHR), mfspr_r(SPRN_EBBRR), mfspr_r(SPRN_BESCR));
printf("iamr = %.16lx\n", mfspr(SPRN_IAMR));
if (!(msr & MSR_HV))
--
2.17.2
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
@ 2019-12-18 4:30 ` Sukadev Bhattiprolu
0 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 4:30 UTC (permalink / raw)
To: Michael Ellerman, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
Ultravisor disables some CPU features like EBB and BHRB in the HFSCR
for secure virtual machines (SVMs). If the SVMs attempt to access
related registers, they will get a Program Interrupt.
Use macros/wrappers to skip accessing EBB and BHRB registers in secure
VMs.
Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
---
---
arch/powerpc/include/asm/reg.h | 35 ++++++++++++++++++
arch/powerpc/kernel/process.c | 12 +++----
arch/powerpc/kvm/book3s_hv.c | 24 ++++++-------
arch/powerpc/kvm/book3s_hv_rmhandlers.S | 48 ++++++++++++++++++-------
arch/powerpc/kvm/book3s_hv_tm_builtin.c | 6 ++--
arch/powerpc/perf/core-book3s.c | 5 +--
arch/powerpc/xmon/xmon.c | 2 +-
7 files changed, 96 insertions(+), 36 deletions(-)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index b3cbb1136bce..026eb20f6d13 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1379,6 +1379,41 @@ static inline void msr_check_and_clear(unsigned long bits)
__msr_check_and_clear(bits);
}
+#ifdef CONFIG_PPC_SVM
+/*
+ * Move from some "restricted" sprs.
+ * Secure VMs should not access some registers as the related features
+ * are disabled in the CPU. If an SVM is attempting read from the given
+ * SPR, return 0. Otherwise behave like a normal mfspr.
+ */
+#define mfspr_r(rn) \
+({ \
+ unsigned long rval = 0ULL; \
+ \
+ if (!(mfmsr() & MSR_S)) \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); \
+ rval; \
+})
+
+/*
+ * Move to some "restricted" sprs.
+ * Secure VMs should not access some registers as the related features
+ * are disabled in the CPU. If an SVM is attempting write to the given
+ * SPR, ignore the write. Otherwise behave like a normal mtspr.
+ */
+#define mtspr_r(rn, v) \
+({ \
+ if (!(mfmsr() & MSR_S)) \
+ asm volatile("mtspr " __stringify(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory"); \
+})
+#else
+#define mfspr_r mfspr
+#define mtspr_r mtspr
+#endif
+
#ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 639ceae7da9d..9a691452ea3b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1059,9 +1059,9 @@ static inline void save_sprs(struct thread_struct *t)
t->dscr = mfspr(SPRN_DSCR);
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- t->bescr = mfspr(SPRN_BESCR);
- t->ebbhr = mfspr(SPRN_EBBHR);
- t->ebbrr = mfspr(SPRN_EBBRR);
+ t->bescr = mfspr_r(SPRN_BESCR);
+ t->ebbhr = mfspr_r(SPRN_EBBHR);
+ t->ebbrr = mfspr_r(SPRN_EBBRR);
t->fscr = mfspr(SPRN_FSCR);
@@ -1098,11 +1098,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
if (old_thread->bescr != new_thread->bescr)
- mtspr(SPRN_BESCR, new_thread->bescr);
+ mtspr_r(SPRN_BESCR, new_thread->bescr);
if (old_thread->ebbhr != new_thread->ebbhr)
- mtspr(SPRN_EBBHR, new_thread->ebbhr);
+ mtspr_r(SPRN_EBBHR, new_thread->ebbhr);
if (old_thread->ebbrr != new_thread->ebbrr)
- mtspr(SPRN_EBBRR, new_thread->ebbrr);
+ mtspr_r(SPRN_EBBRR, new_thread->ebbrr);
if (old_thread->fscr != new_thread->fscr)
mtspr(SPRN_FSCR, new_thread->fscr);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 709cf1fd4cf4..dba21b0e1d22 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3568,9 +3568,9 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PSPB, vcpu->arch.pspb);
mtspr(SPRN_FSCR, vcpu->arch.fscr);
mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr_r(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr_r(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr_r(SPRN_BESCR, vcpu->arch.bescr);
mtspr(SPRN_WORT, vcpu->arch.wort);
mtspr(SPRN_TIDR, vcpu->arch.tid);
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
@@ -3641,9 +3641,9 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.pspb = mfspr(SPRN_PSPB);
vcpu->arch.fscr = mfspr(SPRN_FSCR);
vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.ebbhr = mfspr_r(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr_r(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr_r(SPRN_BESCR);
vcpu->arch.wort = mfspr(SPRN_WORT);
vcpu->arch.tid = mfspr(SPRN_TIDR);
vcpu->arch.amr = mfspr(SPRN_AMR);
@@ -4272,9 +4272,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- ebb_regs[0] = mfspr(SPRN_EBBHR);
- ebb_regs[1] = mfspr(SPRN_EBBRR);
- ebb_regs[2] = mfspr(SPRN_BESCR);
+ ebb_regs[0] = mfspr_r(SPRN_EBBHR);
+ ebb_regs[1] = mfspr_r(SPRN_EBBRR);
+ ebb_regs[2] = mfspr_r(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
@@ -4320,9 +4320,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- mtspr(SPRN_EBBHR, ebb_regs[0]);
- mtspr(SPRN_EBBRR, ebb_regs[1]);
- mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr_r(SPRN_EBBHR, ebb_regs[0]);
+ mtspr_r(SPRN_EBBRR, ebb_regs[1]);
+ mtspr_r(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index faebcbb8c4db..8e305ca04dc5 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,15 +810,27 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4)
- ld r8, VCPU_EBBHR(r4)
mtspr SPRN_IC, r5
- mtspr SPRN_EBBHR, r8
- ld r5, VCPU_EBBRR(r4)
- ld r6, VCPU_BESCR(r4)
+
+ /* EBB and TM are disabled for secure VMs so skip them */
+ mfmsr r8
+ andis. r8, r8, MSR_S@high
+ bne clear_ebb0
+ ld r5, VCPU_EBBHR(r4)
+ ld r6, VCPU_EBBRR(r4)
+ ld r7, VCPU_BESCR(r4)
+ b store_ebb0
+clear_ebb0:
+ li r5, 0
+ li r6, 0
+ li r7, 0
+store_ebb0:
+ mtspr SPRN_EBBHR, r5
+ mtspr SPRN_EBBRR, r6
+ mtspr SPRN_BESCR, r7
+
lwz r7, VCPU_GUEST_PID(r4)
ld r8, VCPU_WORT(r4)
- mtspr SPRN_EBBRR, r5
- mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
@@ -1615,14 +1627,26 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9)
std r7, VCPU_TAR(r9)
- mfspr r8, SPRN_EBBHR
- std r8, VCPU_EBBHR(r9)
- mfspr r5, SPRN_EBBRR
- mfspr r6, SPRN_BESCR
+
+ /* EBB and TM are disabled for secure VMs so skip them */
+ mfmsr r8
+ andis. r8, r8, MSR_S@high
+ bne clear_ebb1
+ mfspr r5, SPRN_EBBHR
+ mfspr r6, SPRN_EBBRR
+ mfspr r7, SPRN_BESCR
+ b store_ebb1
+clear_ebb1:
+ li r5, 0
+ li r6, 0
+ li r7, 0
+store_ebb1:
+ std r5, VCPU_EBBHR(r9)
+ std r6, VCPU_EBBRR(r9)
+ std r7, VCPU_BESCR(r9)
+
mfspr r7, SPRN_PID
mfspr r8, SPRN_WORT
- std r5, VCPU_EBBRR(r9)
- std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
BEGIN_FTR_SECTION
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index 217246279dfa..bc3071c0a436 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -45,18 +45,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr & HFSCR_EBB) ||
((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB)))
return 0;
- bescr = mfspr(SPRN_BESCR);
+ bescr = mfspr_r(SPRN_BESCR);
/* expect to see a S->T transition requested */
if (((bescr >> 30) & 3) != 2)
return 0;
bescr &= ~BESCR_GE;
if (instr & (1 << 11))
bescr |= BESCR_GE;
- mtspr(SPRN_BESCR, bescr);
+ mtspr_r(SPRN_BESCR, bescr);
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
- vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
+ vcpu->arch.regs.nip = mfspr_r(SPRN_EBBRR);
return 1;
case PPC_INST_MTMSRD:
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index ca92e01d0bd1..4e76b2251801 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -16,6 +16,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
+#include <asm/svm.h>
#include <asm/code-patching.h>
#ifdef CONFIG_PPC64
@@ -846,9 +847,9 @@ void perf_event_print_debug(void)
if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
- mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
+ mfspr(SPRN_MMCR2), mfspr_r(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
- mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+ mfspr_r(SPRN_EBBRR), mfspr_r(SPRN_BESCR));
}
#endif
pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d83364ebc5c5..4dae0537f85a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1871,7 +1871,7 @@ static void dump_207_sprs(void)
printf("sdar = %.16lx sier = %.16lx pmc6 = %.8lx\n",
mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n",
- mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+ mfspr_r(SPRN_EBBHR), mfspr_r(SPRN_EBBRR), mfspr_r(SPRN_BESCR));
printf("iamr = %.16lx\n", mfspr(SPRN_IAMR));
if (!(msr & MSR_HV))
--
2.17.2
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 2/2] powerpc/pseries/svm: Disable PMUs in SVMs
2019-12-18 4:30 ` Sukadev Bhattiprolu
@ 2019-12-18 4:30 ` Sukadev Bhattiprolu
-1 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 4:30 UTC (permalink / raw)
To: Michael Ellerman, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
For now, disable hardware PMU facilities in secure virtual
machines (SVMs) to prevent any information leak between SVMs
and the (untrusted) HV.
With this, a simple 'myperf' program that uses the perf_event_open()
fails for SVMs (with the corresponding fix to UV). In normal VMs and
on the bare-metal HV the syscall and performance counters work
Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
---
arch/powerpc/kernel/cpu_setup_power.S | 22 ++++++++++++++++++++++
arch/powerpc/perf/core-book3s.c | 6 ++++++
2 files changed, 28 insertions(+)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index a460298c7ddb..d5eb06e20b5a 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -206,14 +206,36 @@ __init_PMU_HV_ISA207:
blr
__init_PMU:
+#ifdef CONFIG_PPC_SVM
+ /*
+ * For now, SVM's are restricted from accessing PMU
+ * features, so skip accordingly.
+ */
+ mfmsr r5
+ rldicl r5, r5, 64-MSR_S_LG, 62
+ cmpwi r5,1
+ beq skip1
+#endif
li r5,0
mtspr SPRN_MMCRA,r5
mtspr SPRN_MMCR0,r5
mtspr SPRN_MMCR1,r5
mtspr SPRN_MMCR2,r5
+skip1:
blr
__init_PMU_ISA207:
+#ifdef CONFIG_PPC_SVM
+ /*
+ * For now, SVM's are restricted from accessing PMU
+ * features, so skip accordingly.
+ */
+ mfmsr r5
+ rldicl r5, r5, 64-MSR_S_LG, 62
+ cmpwi r5,1
+ beq skip2
+#endif
li r5,0
mtspr SPRN_MMCRS,r5
+skip2:
blr
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4e76b2251801..9e6a9f1803f6 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2275,6 +2275,12 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
int register_power_pmu(struct power_pmu *pmu)
{
+ /*
+ * PMU events are not currently supported in SVMs
+ */
+ if (is_secure_guest())
+ return -ENOSYS;
+
if (ppmu)
return -EBUSY; /* something's already registered */
--
2.17.2
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH 2/2] powerpc/pseries/svm: Disable PMUs in SVMs
@ 2019-12-18 4:30 ` Sukadev Bhattiprolu
0 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 4:30 UTC (permalink / raw)
To: Michael Ellerman, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
For now, disable hardware PMU facilities in secure virtual
machines (SVMs) to prevent any information leak between SVMs
and the (untrusted) HV.
With this, a simple 'myperf' program that uses the perf_event_open()
fails for SVMs (with the corresponding fix to UV). In normal VMs and
on the bare-metal HV the syscall and performance counters work
Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
---
arch/powerpc/kernel/cpu_setup_power.S | 22 ++++++++++++++++++++++
arch/powerpc/perf/core-book3s.c | 6 ++++++
2 files changed, 28 insertions(+)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index a460298c7ddb..d5eb06e20b5a 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -206,14 +206,36 @@ __init_PMU_HV_ISA207:
blr
__init_PMU:
+#ifdef CONFIG_PPC_SVM
+ /*
+ * For now, SVM's are restricted from accessing PMU
+ * features, so skip accordingly.
+ */
+ mfmsr r5
+ rldicl r5, r5, 64-MSR_S_LG, 62
+ cmpwi r5,1
+ beq skip1
+#endif
li r5,0
mtspr SPRN_MMCRA,r5
mtspr SPRN_MMCR0,r5
mtspr SPRN_MMCR1,r5
mtspr SPRN_MMCR2,r5
+skip1:
blr
__init_PMU_ISA207:
+#ifdef CONFIG_PPC_SVM
+ /*
+ * For now, SVM's are restricted from accessing PMU
+ * features, so skip accordingly.
+ */
+ mfmsr r5
+ rldicl r5, r5, 64-MSR_S_LG, 62
+ cmpwi r5,1
+ beq skip2
+#endif
li r5,0
mtspr SPRN_MMCRS,r5
+skip2:
blr
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4e76b2251801..9e6a9f1803f6 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2275,6 +2275,12 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
int register_power_pmu(struct power_pmu *pmu)
{
+ /*
+ * PMU events are not currently supported in SVMs
+ */
+ if (is_secure_guest())
+ return -ENOSYS;
+
if (ppmu)
return -EBUSY; /* something's already registered */
--
2.17.2
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
2019-12-18 4:30 ` Sukadev Bhattiprolu
@ 2019-12-18 10:48 ` Michael Ellerman
-1 siblings, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2019-12-18 10:48 UTC (permalink / raw)
To: Sukadev Bhattiprolu, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
Sukadev Bhattiprolu <sukadev@linux.ibm.com> writes:
> Ultravisor disables some CPU features like EBB and BHRB in the HFSCR
> for secure virtual machines (SVMs). If the SVMs attempt to access
> related registers, they will get a Program Interrupt.
>
> Use macros/wrappers to skip accessing EBB and BHRB registers in secure
> VMs.
I continue to dislike this approach.
The result is code that at a glance looks like it's doing one thing,
ie. reading or writing an SPR, but is in fact doing nothing.
It's confusing for readers.
It also slows down all these already slow register accesses further, by
inserting an mfmsr() in front of every single one.
> diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
> index b3cbb1136bce..026eb20f6d13 100644
> --- a/arch/powerpc/include/asm/reg.h
> +++ b/arch/powerpc/include/asm/reg.h
> @@ -1379,6 +1379,41 @@ static inline void msr_check_and_clear(unsigned long bits)
> __msr_check_and_clear(bits);
> }
>
> +#ifdef CONFIG_PPC_SVM
> +/*
> + * Move from some "restricted" sprs.
> + * Secure VMs should not access some registers as the related features
> + * are disabled in the CPU. If an SVM is attempting read from the given
> + * SPR, return 0. Otherwise behave like a normal mfspr.
> + */
> +#define mfspr_r(rn) \
> +({ \
> + unsigned long rval = 0ULL; \
> + \
> + if (!(mfmsr() & MSR_S)) \
> + asm volatile("mfspr %0," __stringify(rn) \
> + : "=r" (rval)); \
> + rval; \
> +})
> +
> +/*
> + * Move to some "restricted" sprs.
> + * Secure VMs should not access some registers as the related features
> + * are disabled in the CPU. If an SVM is attempting write to the given
> + * SPR, ignore the write. Otherwise behave like a normal mtspr.
> + */
> +#define mtspr_r(rn, v) \
> +({ \
> + if (!(mfmsr() & MSR_S)) \
> + asm volatile("mtspr " __stringify(rn) ",%0" : \
> + : "r" ((unsigned long)(v)) \
> + : "memory"); \
> +})
> +#else
> +#define mfspr_r mfspr
> +#define mtspr_r mtspr
> +#endif
> +
> #ifdef __powerpc64__
> #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
> #define mftb() ({unsigned long rval; \
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 639ceae7da9d..9a691452ea3b 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -1059,9 +1059,9 @@ static inline void save_sprs(struct thread_struct *t)
> t->dscr = mfspr(SPRN_DSCR);
>
> if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
> - t->bescr = mfspr(SPRN_BESCR);
> - t->ebbhr = mfspr(SPRN_EBBHR);
> - t->ebbrr = mfspr(SPRN_EBBRR);
> + t->bescr = mfspr_r(SPRN_BESCR);
> + t->ebbhr = mfspr_r(SPRN_EBBHR);
> + t->ebbrr = mfspr_r(SPRN_EBBRR);
eg. here.
This is the fast path of context switch.
That expands to:
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
If the Ultravisor is going to disable EBB and BHRB then we need new
CPU_FTR bits for those, and the code that accesses those registers
needs to be put behind cpu_has_feature(EBB) etc.
cheers
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
@ 2019-12-18 10:48 ` Michael Ellerman
0 siblings, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2019-12-18 10:48 UTC (permalink / raw)
To: Sukadev Bhattiprolu, Paul Mackerras, linuxram
Cc: linuxppc-dev, andmike, kvm-ppc, bauerman
Sukadev Bhattiprolu <sukadev@linux.ibm.com> writes:
> Ultravisor disables some CPU features like EBB and BHRB in the HFSCR
> for secure virtual machines (SVMs). If the SVMs attempt to access
> related registers, they will get a Program Interrupt.
>
> Use macros/wrappers to skip accessing EBB and BHRB registers in secure
> VMs.
I continue to dislike this approach.
The result is code that at a glance looks like it's doing one thing,
ie. reading or writing an SPR, but is in fact doing nothing.
It's confusing for readers.
It also slows down all these already slow register accesses further, by
inserting an mfmsr() in front of every single one.
> diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
> index b3cbb1136bce..026eb20f6d13 100644
> --- a/arch/powerpc/include/asm/reg.h
> +++ b/arch/powerpc/include/asm/reg.h
> @@ -1379,6 +1379,41 @@ static inline void msr_check_and_clear(unsigned long bits)
> __msr_check_and_clear(bits);
> }
>
> +#ifdef CONFIG_PPC_SVM
> +/*
> + * Move from some "restricted" sprs.
> + * Secure VMs should not access some registers as the related features
> + * are disabled in the CPU. If an SVM is attempting read from the given
> + * SPR, return 0. Otherwise behave like a normal mfspr.
> + */
> +#define mfspr_r(rn) \
> +({ \
> + unsigned long rval = 0ULL; \
> + \
> + if (!(mfmsr() & MSR_S)) \
> + asm volatile("mfspr %0," __stringify(rn) \
> + : "=r" (rval)); \
> + rval; \
> +})
> +
> +/*
> + * Move to some "restricted" sprs.
> + * Secure VMs should not access some registers as the related features
> + * are disabled in the CPU. If an SVM is attempting write to the given
> + * SPR, ignore the write. Otherwise behave like a normal mtspr.
> + */
> +#define mtspr_r(rn, v) \
> +({ \
> + if (!(mfmsr() & MSR_S)) \
> + asm volatile("mtspr " __stringify(rn) ",%0" : \
> + : "r" ((unsigned long)(v)) \
> + : "memory"); \
> +})
> +#else
> +#define mfspr_r mfspr
> +#define mtspr_r mtspr
> +#endif
> +
> #ifdef __powerpc64__
> #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
> #define mftb() ({unsigned long rval; \
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 639ceae7da9d..9a691452ea3b 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -1059,9 +1059,9 @@ static inline void save_sprs(struct thread_struct *t)
> t->dscr = mfspr(SPRN_DSCR);
>
> if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
> - t->bescr = mfspr(SPRN_BESCR);
> - t->ebbhr = mfspr(SPRN_EBBHR);
> - t->ebbrr = mfspr(SPRN_EBBRR);
> + t->bescr = mfspr_r(SPRN_BESCR);
> + t->ebbhr = mfspr_r(SPRN_EBBHR);
> + t->ebbrr = mfspr_r(SPRN_EBBRR);
eg. here.
This is the fast path of context switch.
That expands to:
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
if (!(mfmsr() & MSR_S))
asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
If the Ultravisor is going to disable EBB and BHRB then we need new
CPU_FTR bits for those, and the code that accesses those registers
needs to be put behind cpu_has_feature(EBB) etc.
cheers
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
2019-12-18 10:48 ` Michael Ellerman
@ 2019-12-18 23:57 ` Sukadev Bhattiprolu
-1 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 23:57 UTC (permalink / raw)
To: Michael Ellerman
Cc: andmike, linuxram, kvm-ppc, linuxppc-dev, Sukadev Bhattiprolu, bauerman
Michael Ellerman [mpe@ellerman.id.au] wrote:
>
> eg. here.
>
> This is the fast path of context switch.
>
> That expands to:
>
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
>
Yes, should have optimized this at least :-)
>
> If the Ultravisor is going to disable EBB and BHRB then we need new
> CPU_FTR bits for those, and the code that accesses those registers
> needs to be put behind cpu_has_feature(EBB) etc.
Will try the cpu_has_feature(). Would it be ok to use a single feature
bit, like UV or make it per-register group as that could need more
feature bits?
Thanks,
Sukadev
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
@ 2019-12-18 23:57 ` Sukadev Bhattiprolu
0 siblings, 0 replies; 10+ messages in thread
From: Sukadev Bhattiprolu @ 2019-12-18 23:57 UTC (permalink / raw)
To: Michael Ellerman
Cc: andmike, linuxram, kvm-ppc, linuxppc-dev, Sukadev Bhattiprolu, bauerman
Michael Ellerman [mpe@ellerman.id.au] wrote:
>
> eg. here.
>
> This is the fast path of context switch.
>
> That expands to:
>
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
> if (!(mfmsr() & MSR_S))
> asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
>
Yes, should have optimized this at least :-)
>
> If the Ultravisor is going to disable EBB and BHRB then we need new
> CPU_FTR bits for those, and the code that accesses those registers
> needs to be put behind cpu_has_feature(EBB) etc.
Will try the cpu_has_feature(). Would it be ok to use a single feature
bit, like UV or make it per-register group as that could need more
feature bits?
Thanks,
Sukadev
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
2019-12-18 23:57 ` Sukadev Bhattiprolu
@ 2019-12-19 10:59 ` Michael Ellerman
-1 siblings, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2019-12-19 10:59 UTC (permalink / raw)
To: Sukadev Bhattiprolu
Cc: andmike, linuxram, kvm-ppc, linuxppc-dev, Sukadev Bhattiprolu, bauerman
Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> writes:
> Michael Ellerman [mpe@ellerman.id.au] wrote:
>>
>> eg. here.
>>
>> This is the fast path of context switch.
>>
>> That expands to:
>>
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
>>
>
> Yes, should have optimized this at least :-)
>>
>> If the Ultravisor is going to disable EBB and BHRB then we need new
>> CPU_FTR bits for those, and the code that accesses those registers
>> needs to be put behind cpu_has_feature(EBB) etc.
>
> Will try the cpu_has_feature(). Would it be ok to use a single feature
> bit, like UV or make it per-register group as that could need more
> feature bits?
We already have a number of places using is_secure_guest():
arch/powerpc/include/asm/mem_encrypt.h: return is_secure_guest();
arch/powerpc/include/asm/mem_encrypt.h: return is_secure_guest();
arch/powerpc/include/asm/svm.h:#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
arch/powerpc/kernel/machine_kexec_64.c: if (is_secure_guest() && !(image->preserve_context ||
arch/powerpc/kernel/paca.c: if (is_secure_guest())
arch/powerpc/kernel/sysfs.c: return sprintf(buf, "%u\n", is_secure_guest());
arch/powerpc/platforms/pseries/iommu.c: if (!is_secure_guest())
arch/powerpc/platforms/pseries/smp.c: if (cpu_has_feature(CPU_FTR_DBELL) && !is_secure_guest())
arch/powerpc/platforms/pseries/svm.c: if (!is_secure_guest())
Which could all (or mostly) be converted to use a cpu_has_feature(CPU_FTR_SVM).
So yeah I guess it makes sense to do that, create a CPU_FTR_SVM and set
it early in boot based on MSR_S.
You could argue it's a firmware feature, so should be FW_FEATURE_SVM,
but we don't use jump_labels for firmware features so they're not as
nice for hot-path code like register switching. Also the distinction
between CPU and firmware features is a bit arbitrary.
cheers
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs
@ 2019-12-19 10:59 ` Michael Ellerman
0 siblings, 0 replies; 10+ messages in thread
From: Michael Ellerman @ 2019-12-19 10:59 UTC (permalink / raw)
To: Sukadev Bhattiprolu
Cc: andmike, linuxram, kvm-ppc, linuxppc-dev, Sukadev Bhattiprolu, bauerman
Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> writes:
> Michael Ellerman [mpe@ellerman.id.au] wrote:
>>
>> eg. here.
>>
>> This is the fast path of context switch.
>>
>> That expands to:
>>
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_BESCR" : "=r" (rval));
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_EBBHR" : "=r" (rval));
>> if (!(mfmsr() & MSR_S))
>> asm volatile("mfspr %0, SPRN_EBBRR" : "=r" (rval));
>>
>
> Yes, should have optimized this at least :-)
>>
>> If the Ultravisor is going to disable EBB and BHRB then we need new
>> CPU_FTR bits for those, and the code that accesses those registers
>> needs to be put behind cpu_has_feature(EBB) etc.
>
> Will try the cpu_has_feature(). Would it be ok to use a single feature
> bit, like UV or make it per-register group as that could need more
> feature bits?
We already have a number of places using is_secure_guest():
arch/powerpc/include/asm/mem_encrypt.h: return is_secure_guest();
arch/powerpc/include/asm/mem_encrypt.h: return is_secure_guest();
arch/powerpc/include/asm/svm.h:#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
arch/powerpc/kernel/machine_kexec_64.c: if (is_secure_guest() && !(image->preserve_context ||
arch/powerpc/kernel/paca.c: if (is_secure_guest())
arch/powerpc/kernel/sysfs.c: return sprintf(buf, "%u\n", is_secure_guest());
arch/powerpc/platforms/pseries/iommu.c: if (!is_secure_guest())
arch/powerpc/platforms/pseries/smp.c: if (cpu_has_feature(CPU_FTR_DBELL) && !is_secure_guest())
arch/powerpc/platforms/pseries/svm.c: if (!is_secure_guest())
Which could all (or mostly) be converted to use a cpu_has_feature(CPU_FTR_SVM).
So yeah I guess it makes sense to do that, create a CPU_FTR_SVM and set
it early in boot based on MSR_S.
You could argue it's a firmware feature, so should be FW_FEATURE_SVM,
but we don't use jump_labels for firmware features so they're not as
nice for hot-path code like register switching. Also the distinction
between CPU and firmware features is a bit arbitrary.
cheers
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2019-12-19 11:03 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-18 4:30 [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs Sukadev Bhattiprolu
2019-12-18 4:30 ` Sukadev Bhattiprolu
2019-12-18 4:30 ` [PATCH 2/2] powerpc/pseries/svm: Disable PMUs in SVMs Sukadev Bhattiprolu
2019-12-18 4:30 ` Sukadev Bhattiprolu
2019-12-18 10:48 ` [PATCH 1/2] powerpc/pseries/svm: Don't access some SPRs Michael Ellerman
2019-12-18 10:48 ` Michael Ellerman
2019-12-18 23:57 ` Sukadev Bhattiprolu
2019-12-18 23:57 ` Sukadev Bhattiprolu
2019-12-19 10:59 ` Michael Ellerman
2019-12-19 10:59 ` Michael Ellerman
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.