All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
@ 2013-07-11 10:51 ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-07-11 10:51 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently HV-style KVM does not save and restore the SIAR and SDAR
registers in the PMU (performance monitor unit) on guest entry and
exit.  The result is that performance monitoring tools in the guest
could get false information about where a program was executing and
what data it was accessing at the time of a performance monitor
interrupt.  This fixes it by saving and restoring these registers
along with the other PMU registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.  There is a gap between the values for
MMCRA and SIAR in order to leave room for two more MMCR registers
that exist on POWER8.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
This is against Alex Graf's kvm-ppc-queue branch.

 arch/powerpc/include/asm/kvm_host.h     |    2 ++
 arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
 arch/powerpc/kernel/asm-offsets.c       |    2 ++
 arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
 5 files changed, 26 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..3cf47c8 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -429,6 +429,8 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffa..ad66058 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -505,6 +505,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2b95c45..ee2352a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -750,6 +750,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -834,6 +840,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..bfb4b0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
@ 2013-07-11 10:51 ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-07-11 10:51 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently HV-style KVM does not save and restore the SIAR and SDAR
registers in the PMU (performance monitor unit) on guest entry and
exit.  The result is that performance monitoring tools in the guest
could get false information about where a program was executing and
what data it was accessing at the time of a performance monitor
interrupt.  This fixes it by saving and restoring these registers
along with the other PMU registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.  There is a gap between the values for
MMCRA and SIAR in order to leave room for two more MMCR registers
that exist on POWER8.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
This is against Alex Graf's kvm-ppc-queue branch.

 arch/powerpc/include/asm/kvm_host.h     |    2 ++
 arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
 arch/powerpc/kernel/asm-offsets.c       |    2 ++
 arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
 5 files changed, 26 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..3cf47c8 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -429,6 +429,8 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6f16ffa..ad66058 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -505,6 +505,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2b95c45..ee2352a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -750,6 +750,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -834,6 +840,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..bfb4b0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
  2013-07-11 10:51 ` Paul Mackerras
@ 2013-07-11 11:06   ` Alexander Graf
  -1 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2013-07-11 11:06 UTC (permalink / raw)
  To: Paul Mackerras; +Cc: kvm-ppc, kvm


On 11.07.2013, at 12:51, Paul Mackerras wrote:

> Currently HV-style KVM does not save and restore the SIAR and SDAR
> registers in the PMU (performance monitor unit) on guest entry and
> exit.  The result is that performance monitoring tools in the guest
> could get false information about where a program was executing and
> what data it was accessing at the time of a performance monitor
> interrupt.  This fixes it by saving and restoring these registers
> along with the other PMU registers on guest entry/exit.
> 
> This also provides a way for userspace to access these values for a
> vcpu via the one_reg interface.  There is a gap between the values for
> MMCRA and SIAR in order to leave room for two more MMCR registers
> that exist on POWER8.

Can we add the ONE_REG defines for those right away in a prepending patch? No need for the implementation yet if that requires additional work that needs to go through Ben.

> 
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
> This is against Alex Graf's kvm-ppc-queue branch.
> 
> arch/powerpc/include/asm/kvm_host.h     |    2 ++
> arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
> arch/powerpc/kernel/asm-offsets.c       |    2 ++
> arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
> arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
> 5 files changed, 26 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 3328353..91b833d 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
> 
> 	u64 mmcr[3];
> 	u32 pmc[8];
> +	u64 siar;
> +	u64 sdar;
> 
> #ifdef CONFIG_KVM_EXIT_TIMING
> 	struct mutex exit_timing_lock;
> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
> index 0fb1a6e..3cf47c8 100644
> --- a/arch/powerpc/include/uapi/asm/kvm.h
> +++ b/arch/powerpc/include/uapi/asm/kvm.h
> @@ -429,6 +429,8 @@ struct kvm_get_htab_header {
> #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
> #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
> #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
> +#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
> +#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)

These are missing entries in Documentation/virtual/kvm/api.txt :).


Otherwise looks good.

Alex

> 
> #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
> #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index 6f16ffa..ad66058 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -505,6 +505,8 @@ int main(void)
> 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
> 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
> 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
> +	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
> +	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
> 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
> 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
> 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 2b95c45..ee2352a 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -750,6 +750,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		*val = get_reg_val(id, vcpu->arch.siar);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		*val = get_reg_val(id, vcpu->arch.sdar);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> @@ -834,6 +840,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		vcpu->arch.siar = set_reg_val(id, *val);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		vcpu->arch.sdar = set_reg_val(id, *val);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 60dce5b..bfb4b0a 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
> 	ld	r3, VCPU_MMCR(r4)
> 	ld	r5, VCPU_MMCR + 8(r4)
> 	ld	r6, VCPU_MMCR + 16(r4)
> +	ld	r7, VCPU_SIAR(r4)
> +	ld	r8, VCPU_SDAR(r4)
> 	mtspr	SPRN_MMCR1, r5
> 	mtspr	SPRN_MMCRA, r6
> +	mtspr	SPRN_SIAR, r7
> +	mtspr	SPRN_SDAR, r8
> 	mtspr	SPRN_MMCR0, r3
> 	isync
> 
> @@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
> 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
> 	b	22f
> 21:	mfspr	r5, SPRN_MMCR1
> +	mfspr	r7, SPRN_SIAR
> +	mfspr	r8, SPRN_SDAR
> 	std	r4, VCPU_MMCR(r9)
> 	std	r5, VCPU_MMCR + 8(r9)
> 	std	r6, VCPU_MMCR + 16(r9)
> +	std	r7, VCPU_SIAR(r9)
> +	std	r8, VCPU_SDAR(r9)
> 	mfspr	r3, SPRN_PMC1
> 	mfspr	r4, SPRN_PMC2
> 	mfspr	r5, SPRN_PMC3
> -- 
> 1.7.10.4
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
@ 2013-07-11 11:06   ` Alexander Graf
  0 siblings, 0 replies; 14+ messages in thread
From: Alexander Graf @ 2013-07-11 11:06 UTC (permalink / raw)
  To: Paul Mackerras; +Cc: kvm-ppc, kvm


On 11.07.2013, at 12:51, Paul Mackerras wrote:

> Currently HV-style KVM does not save and restore the SIAR and SDAR
> registers in the PMU (performance monitor unit) on guest entry and
> exit.  The result is that performance monitoring tools in the guest
> could get false information about where a program was executing and
> what data it was accessing at the time of a performance monitor
> interrupt.  This fixes it by saving and restoring these registers
> along with the other PMU registers on guest entry/exit.
> 
> This also provides a way for userspace to access these values for a
> vcpu via the one_reg interface.  There is a gap between the values for
> MMCRA and SIAR in order to leave room for two more MMCR registers
> that exist on POWER8.

Can we add the ONE_REG defines for those right away in a prepending patch? No need for the implementation yet if that requires additional work that needs to go through Ben.

> 
> Signed-off-by: Paul Mackerras <paulus@samba.org>
> ---
> This is against Alex Graf's kvm-ppc-queue branch.
> 
> arch/powerpc/include/asm/kvm_host.h     |    2 ++
> arch/powerpc/include/uapi/asm/kvm.h     |    2 ++
> arch/powerpc/kernel/asm-offsets.c       |    2 ++
> arch/powerpc/kvm/book3s_hv.c            |   12 ++++++++++++
> arch/powerpc/kvm/book3s_hv_rmhandlers.S |    8 ++++++++
> 5 files changed, 26 insertions(+)
> 
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 3328353..91b833d 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
> 
> 	u64 mmcr[3];
> 	u32 pmc[8];
> +	u64 siar;
> +	u64 sdar;
> 
> #ifdef CONFIG_KVM_EXIT_TIMING
> 	struct mutex exit_timing_lock;
> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
> index 0fb1a6e..3cf47c8 100644
> --- a/arch/powerpc/include/uapi/asm/kvm.h
> +++ b/arch/powerpc/include/uapi/asm/kvm.h
> @@ -429,6 +429,8 @@ struct kvm_get_htab_header {
> #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
> #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
> #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
> +#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
> +#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)

These are missing entries in Documentation/virtual/kvm/api.txt :).


Otherwise looks good.

Alex

> 
> #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
> #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index 6f16ffa..ad66058 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -505,6 +505,8 @@ int main(void)
> 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
> 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
> 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
> +	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
> +	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
> 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
> 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
> 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 2b95c45..ee2352a 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -750,6 +750,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		*val = get_reg_val(id, vcpu->arch.siar);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		*val = get_reg_val(id, vcpu->arch.sdar);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> @@ -834,6 +840,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
> 		i = id - KVM_REG_PPC_PMC1;
> 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
> 		break;
> +	case KVM_REG_PPC_SIAR:
> +		vcpu->arch.siar = set_reg_val(id, *val);
> +		break;
> +	case KVM_REG_PPC_SDAR:
> +		vcpu->arch.sdar = set_reg_val(id, *val);
> +		break;
> #ifdef CONFIG_VSX
> 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
> 		if (cpu_has_feature(CPU_FTR_VSX)) {
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 60dce5b..bfb4b0a 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
> 	ld	r3, VCPU_MMCR(r4)
> 	ld	r5, VCPU_MMCR + 8(r4)
> 	ld	r6, VCPU_MMCR + 16(r4)
> +	ld	r7, VCPU_SIAR(r4)
> +	ld	r8, VCPU_SDAR(r4)
> 	mtspr	SPRN_MMCR1, r5
> 	mtspr	SPRN_MMCRA, r6
> +	mtspr	SPRN_SIAR, r7
> +	mtspr	SPRN_SDAR, r8
> 	mtspr	SPRN_MMCR0, r3
> 	isync
> 
> @@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
> 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
> 	b	22f
> 21:	mfspr	r5, SPRN_MMCR1
> +	mfspr	r7, SPRN_SIAR
> +	mfspr	r8, SPRN_SDAR
> 	std	r4, VCPU_MMCR(r9)
> 	std	r5, VCPU_MMCR + 8(r9)
> 	std	r6, VCPU_MMCR + 16(r9)
> +	std	r7, VCPU_SIAR(r9)
> +	std	r8, VCPU_SDAR(r9)
> 	mfspr	r3, SPRN_PMC1
> 	mfspr	r4, SPRN_PMC2
> 	mfspr	r5, SPRN_PMC3
> -- 
> 1.7.10.4
> 


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
  2013-07-11 10:51 ` Paul Mackerras
@ 2016-11-07  4:09 ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2016-11-07  4:09 UTC (permalink / raw)
  To: kvm, kvm-ppc

When switching from/to a guest that has a transaction in progress,
we need to save/restore the checkpointed register state.  Although
XER is part of the CPU state that gets checkpointed, the code that
does this saving and restoring doesn't save/restore XER.

This fixes it by saving and restoring the XER.  To allow userspace
to read/write the checkpointed XER value, we also add a new ONE_REG
specifier.

The visible effect of this bug is that the guest may see its XER
value being corrupted when it uses transactions.

Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
Cc: stable@vger.kernel.org # v3.15+
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
---
 Documentation/virtual/kvm/api.txt       | 1 +
 arch/powerpc/include/asm/kvm_host.h     | 1 +
 arch/powerpc/include/uapi/asm/kvm.h     | 1 +
 arch/powerpc/kernel/asm-offsets.c       | 1 +
 arch/powerpc/kvm/book3s_hv.c            | 6 ++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++++
 6 files changed, 14 insertions(+)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9a..a7596e9 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2039,6 +2039,7 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TM_VSCR           | 32
   PPC   | KVM_REG_PPC_TM_DSCR           | 64
   PPC   | KVM_REG_PPC_TM_TAR            | 64
+  PPC   | KVM_REG_PPC_TM_XER            | 64
         |                               |
   MIPS  | KVM_REG_MIPS_R0               | 64
           ...
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28350a2..5e12e19 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -546,6 +546,7 @@ struct kvm_vcpu_arch {
 	u64 tfiar;
 
 	u32 cr_tm;
+	u64 xer_tm;
 	u64 lr_tm;
 	u64 ctr_tm;
 	u64 amr_tm;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35..0fb1326 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -596,6 +596,7 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index caec7bf..c833d88 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -569,6 +569,7 @@ int main(void)
 	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
 	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
 	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
 	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
 	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
 	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3686471..094deb6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1288,6 +1288,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		*val = get_reg_val(id, vcpu->arch.cr_tm);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		*val = get_reg_val(id, vcpu->arch.xer_tm);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		*val = get_reg_val(id, vcpu->arch.lr_tm);
 		break;
@@ -1498,6 +1501,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		vcpu->arch.cr_tm = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		vcpu->arch.xer_tm = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		vcpu->arch.lr_tm = set_reg_val(id, *val);
 		break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c3c1d1b..6f81adb 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2600,11 +2600,13 @@ kvmppc_save_tm:
 	mfctr	r7
 	mfspr	r8, SPRN_AMR
 	mfspr	r10, SPRN_TAR
+	mfxer	r11
 	std	r5, VCPU_LR_TM(r9)
 	stw	r6, VCPU_CR_TM(r9)
 	std	r7, VCPU_CTR_TM(r9)
 	std	r8, VCPU_AMR_TM(r9)
 	std	r10, VCPU_TAR_TM(r9)
+	std	r11, VCPU_XER_TM(r9)
 
 	/* Restore r12 as trap number. */
 	lwz	r12, VCPU_TRAP(r9)
@@ -2697,11 +2699,13 @@ kvmppc_restore_tm:
 	ld	r7, VCPU_CTR_TM(r4)
 	ld	r8, VCPU_AMR_TM(r4)
 	ld	r9, VCPU_TAR_TM(r4)
+	ld	r10, VCPU_XER_TM(r4)
 	mtlr	r5
 	mtcr	r6
 	mtctr	r7
 	mtspr	SPRN_AMR, r8
 	mtspr	SPRN_TAR, r9
+	mtxer	r10
 
 	/*
 	 * Load up PPR and DSCR values but don't put them in the actual SPRs
-- 
2.10.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
@ 2016-11-07  4:09 ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2016-11-07  4:09 UTC (permalink / raw)
  To: kvm, kvm-ppc

When switching from/to a guest that has a transaction in progress,
we need to save/restore the checkpointed register state.  Although
XER is part of the CPU state that gets checkpointed, the code that
does this saving and restoring doesn't save/restore XER.

This fixes it by saving and restoring the XER.  To allow userspace
to read/write the checkpointed XER value, we also add a new ONE_REG
specifier.

The visible effect of this bug is that the guest may see its XER
value being corrupted when it uses transactions.

Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
Cc: stable@vger.kernel.org # v3.15+
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
---
 Documentation/virtual/kvm/api.txt       | 1 +
 arch/powerpc/include/asm/kvm_host.h     | 1 +
 arch/powerpc/include/uapi/asm/kvm.h     | 1 +
 arch/powerpc/kernel/asm-offsets.c       | 1 +
 arch/powerpc/kvm/book3s_hv.c            | 6 ++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++++
 6 files changed, 14 insertions(+)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9a..a7596e9 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2039,6 +2039,7 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TM_VSCR           | 32
   PPC   | KVM_REG_PPC_TM_DSCR           | 64
   PPC   | KVM_REG_PPC_TM_TAR            | 64
+  PPC   | KVM_REG_PPC_TM_XER            | 64
         |                               |
   MIPS  | KVM_REG_MIPS_R0               | 64
           ...
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28350a2..5e12e19 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -546,6 +546,7 @@ struct kvm_vcpu_arch {
 	u64 tfiar;
 
 	u32 cr_tm;
+	u64 xer_tm;
 	u64 lr_tm;
 	u64 ctr_tm;
 	u64 amr_tm;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35..0fb1326 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -596,6 +596,7 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index caec7bf..c833d88 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -569,6 +569,7 @@ int main(void)
 	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
 	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
 	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
 	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
 	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
 	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3686471..094deb6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1288,6 +1288,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		*val = get_reg_val(id, vcpu->arch.cr_tm);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		*val = get_reg_val(id, vcpu->arch.xer_tm);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		*val = get_reg_val(id, vcpu->arch.lr_tm);
 		break;
@@ -1498,6 +1501,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		vcpu->arch.cr_tm = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		vcpu->arch.xer_tm = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		vcpu->arch.lr_tm = set_reg_val(id, *val);
 		break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c3c1d1b..6f81adb 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2600,11 +2600,13 @@ kvmppc_save_tm:
 	mfctr	r7
 	mfspr	r8, SPRN_AMR
 	mfspr	r10, SPRN_TAR
+	mfxer	r11
 	std	r5, VCPU_LR_TM(r9)
 	stw	r6, VCPU_CR_TM(r9)
 	std	r7, VCPU_CTR_TM(r9)
 	std	r8, VCPU_AMR_TM(r9)
 	std	r10, VCPU_TAR_TM(r9)
+	std	r11, VCPU_XER_TM(r9)
 
 	/* Restore r12 as trap number. */
 	lwz	r12, VCPU_TRAP(r9)
@@ -2697,11 +2699,13 @@ kvmppc_restore_tm:
 	ld	r7, VCPU_CTR_TM(r4)
 	ld	r8, VCPU_AMR_TM(r4)
 	ld	r9, VCPU_TAR_TM(r4)
+	ld	r10, VCPU_XER_TM(r4)
 	mtlr	r5
 	mtcr	r6
 	mtctr	r7
 	mtspr	SPRN_AMR, r8
 	mtspr	SPRN_TAR, r9
+	mtxer	r10
 
 	/*
 	 * Load up PPR and DSCR values but don't put them in the actual SPRs
-- 
2.10.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
  2016-11-07  4:09 ` Paul Mackerras
@ 2016-11-07  7:38   ` Thomas Huth
  -1 siblings, 0 replies; 14+ messages in thread
From: Thomas Huth @ 2016-11-07  7:38 UTC (permalink / raw)
  To: Paul Mackerras, kvm, kvm-ppc

On 07.11.2016 05:09, Paul Mackerras wrote:
> When switching from/to a guest that has a transaction in progress,
> we need to save/restore the checkpointed register state.  Although
> XER is part of the CPU state that gets checkpointed, the code that
> does this saving and restoring doesn't save/restore XER.
> 
> This fixes it by saving and restoring the XER.  To allow userspace
> to read/write the checkpointed XER value, we also add a new ONE_REG
> specifier.
> 
> The visible effect of this bug is that the guest may see its XER
> value being corrupted when it uses transactions.
> 
> Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
> Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
> Cc: stable@vger.kernel.org # v3.15+
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> ---
>  Documentation/virtual/kvm/api.txt       | 1 +
>  arch/powerpc/include/asm/kvm_host.h     | 1 +
>  arch/powerpc/include/uapi/asm/kvm.h     | 1 +
>  arch/powerpc/kernel/asm-offsets.c       | 1 +
>  arch/powerpc/kvm/book3s_hv.c            | 6 ++++++
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++++
>  6 files changed, 14 insertions(+)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 739db9a..a7596e9 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -2039,6 +2039,7 @@ registers, find a list below:
>    PPC   | KVM_REG_PPC_TM_VSCR           | 32
>    PPC   | KVM_REG_PPC_TM_DSCR           | 64
>    PPC   | KVM_REG_PPC_TM_TAR            | 64
> +  PPC   | KVM_REG_PPC_TM_XER            | 64
>          |                               |
>    MIPS  | KVM_REG_MIPS_R0               | 64
>            ...
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 28350a2..5e12e19 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -546,6 +546,7 @@ struct kvm_vcpu_arch {
>  	u64 tfiar;
>  
>  	u32 cr_tm;
> +	u64 xer_tm;
>  	u64 lr_tm;
>  	u64 ctr_tm;
>  	u64 amr_tm;
> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
> index c93cf35..0fb1326 100644
> --- a/arch/powerpc/include/uapi/asm/kvm.h
> +++ b/arch/powerpc/include/uapi/asm/kvm.h
> @@ -596,6 +596,7 @@ struct kvm_get_htab_header {
>  #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
>  #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
>  #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
> +#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
>  
>  /* PPC64 eXternal Interrupt Controller Specification */
>  #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index caec7bf..c833d88 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -569,6 +569,7 @@ int main(void)
>  	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
>  	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
>  	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
> +	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
>  	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
>  	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
>  	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 3686471..094deb6 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -1288,6 +1288,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
>  	case KVM_REG_PPC_TM_CR:
>  		*val = get_reg_val(id, vcpu->arch.cr_tm);
>  		break;
> +	case KVM_REG_PPC_TM_XER:
> +		*val = get_reg_val(id, vcpu->arch.xer_tm);
> +		break;
>  	case KVM_REG_PPC_TM_LR:
>  		*val = get_reg_val(id, vcpu->arch.lr_tm);
>  		break;
> @@ -1498,6 +1501,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
>  	case KVM_REG_PPC_TM_CR:
>  		vcpu->arch.cr_tm = set_reg_val(id, *val);
>  		break;
> +	case KVM_REG_PPC_TM_XER:
> +		vcpu->arch.xer_tm = set_reg_val(id, *val);
> +		break;
>  	case KVM_REG_PPC_TM_LR:
>  		vcpu->arch.lr_tm = set_reg_val(id, *val);
>  		break;
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index c3c1d1b..6f81adb 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -2600,11 +2600,13 @@ kvmppc_save_tm:
>  	mfctr	r7
>  	mfspr	r8, SPRN_AMR
>  	mfspr	r10, SPRN_TAR
> +	mfxer	r11
>  	std	r5, VCPU_LR_TM(r9)
>  	stw	r6, VCPU_CR_TM(r9)
>  	std	r7, VCPU_CTR_TM(r9)
>  	std	r8, VCPU_AMR_TM(r9)
>  	std	r10, VCPU_TAR_TM(r9)
> +	std	r11, VCPU_XER_TM(r9)
>  
>  	/* Restore r12 as trap number. */
>  	lwz	r12, VCPU_TRAP(r9)
> @@ -2697,11 +2699,13 @@ kvmppc_restore_tm:
>  	ld	r7, VCPU_CTR_TM(r4)
>  	ld	r8, VCPU_AMR_TM(r4)
>  	ld	r9, VCPU_TAR_TM(r4)
> +	ld	r10, VCPU_XER_TM(r4)
>  	mtlr	r5
>  	mtcr	r6
>  	mtctr	r7
>  	mtspr	SPRN_AMR, r8
>  	mtspr	SPRN_TAR, r9
> +	mtxer	r10
>  
>  	/*
>  	 * Load up PPR and DSCR values but don't put them in the actual SPRs
> 

Reviewed-by: Thomas Huth <thuth@redhat.com>


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
@ 2016-11-07  7:38   ` Thomas Huth
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Huth @ 2016-11-07  7:38 UTC (permalink / raw)
  To: Paul Mackerras, kvm, kvm-ppc

On 07.11.2016 05:09, Paul Mackerras wrote:
> When switching from/to a guest that has a transaction in progress,
> we need to save/restore the checkpointed register state.  Although
> XER is part of the CPU state that gets checkpointed, the code that
> does this saving and restoring doesn't save/restore XER.
> 
> This fixes it by saving and restoring the XER.  To allow userspace
> to read/write the checkpointed XER value, we also add a new ONE_REG
> specifier.
> 
> The visible effect of this bug is that the guest may see its XER
> value being corrupted when it uses transactions.
> 
> Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
> Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
> Cc: stable@vger.kernel.org # v3.15+
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
> ---
>  Documentation/virtual/kvm/api.txt       | 1 +
>  arch/powerpc/include/asm/kvm_host.h     | 1 +
>  arch/powerpc/include/uapi/asm/kvm.h     | 1 +
>  arch/powerpc/kernel/asm-offsets.c       | 1 +
>  arch/powerpc/kvm/book3s_hv.c            | 6 ++++++
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++++
>  6 files changed, 14 insertions(+)
> 
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 739db9a..a7596e9 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -2039,6 +2039,7 @@ registers, find a list below:
>    PPC   | KVM_REG_PPC_TM_VSCR           | 32
>    PPC   | KVM_REG_PPC_TM_DSCR           | 64
>    PPC   | KVM_REG_PPC_TM_TAR            | 64
> +  PPC   | KVM_REG_PPC_TM_XER            | 64
>          |                               |
>    MIPS  | KVM_REG_MIPS_R0               | 64
>            ...
> diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
> index 28350a2..5e12e19 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -546,6 +546,7 @@ struct kvm_vcpu_arch {
>  	u64 tfiar;
>  
>  	u32 cr_tm;
> +	u64 xer_tm;
>  	u64 lr_tm;
>  	u64 ctr_tm;
>  	u64 amr_tm;
> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
> index c93cf35..0fb1326 100644
> --- a/arch/powerpc/include/uapi/asm/kvm.h
> +++ b/arch/powerpc/include/uapi/asm/kvm.h
> @@ -596,6 +596,7 @@ struct kvm_get_htab_header {
>  #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
>  #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
>  #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
> +#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
>  
>  /* PPC64 eXternal Interrupt Controller Specification */
>  #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
> diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
> index caec7bf..c833d88 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -569,6 +569,7 @@ int main(void)
>  	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
>  	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
>  	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
> +	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
>  	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
>  	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
>  	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 3686471..094deb6 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -1288,6 +1288,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
>  	case KVM_REG_PPC_TM_CR:
>  		*val = get_reg_val(id, vcpu->arch.cr_tm);
>  		break;
> +	case KVM_REG_PPC_TM_XER:
> +		*val = get_reg_val(id, vcpu->arch.xer_tm);
> +		break;
>  	case KVM_REG_PPC_TM_LR:
>  		*val = get_reg_val(id, vcpu->arch.lr_tm);
>  		break;
> @@ -1498,6 +1501,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
>  	case KVM_REG_PPC_TM_CR:
>  		vcpu->arch.cr_tm = set_reg_val(id, *val);
>  		break;
> +	case KVM_REG_PPC_TM_XER:
> +		vcpu->arch.xer_tm = set_reg_val(id, *val);
> +		break;
>  	case KVM_REG_PPC_TM_LR:
>  		vcpu->arch.lr_tm = set_reg_val(id, *val);
>  		break;
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index c3c1d1b..6f81adb 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -2600,11 +2600,13 @@ kvmppc_save_tm:
>  	mfctr	r7
>  	mfspr	r8, SPRN_AMR
>  	mfspr	r10, SPRN_TAR
> +	mfxer	r11
>  	std	r5, VCPU_LR_TM(r9)
>  	stw	r6, VCPU_CR_TM(r9)
>  	std	r7, VCPU_CTR_TM(r9)
>  	std	r8, VCPU_AMR_TM(r9)
>  	std	r10, VCPU_TAR_TM(r9)
> +	std	r11, VCPU_XER_TM(r9)
>  
>  	/* Restore r12 as trap number. */
>  	lwz	r12, VCPU_TRAP(r9)
> @@ -2697,11 +2699,13 @@ kvmppc_restore_tm:
>  	ld	r7, VCPU_CTR_TM(r4)
>  	ld	r8, VCPU_AMR_TM(r4)
>  	ld	r9, VCPU_TAR_TM(r4)
> +	ld	r10, VCPU_XER_TM(r4)
>  	mtlr	r5
>  	mtcr	r6
>  	mtctr	r7
>  	mtspr	SPRN_AMR, r8
>  	mtspr	SPRN_TAR, r9
> +	mtxer	r10
>  
>  	/*
>  	 * Load up PPR and DSCR values but don't put them in the actual SPRs
> 

Reviewed-by: Thomas Huth <thuth@redhat.com>


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
  2016-11-07  4:09 ` Paul Mackerras
@ 2016-11-21  5:07   ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2016-11-21  5:07 UTC (permalink / raw)
  To: kvm, kvm-ppc

On Mon, Nov 07, 2016 at 03:09:58PM +1100, Paul Mackerras wrote:
> When switching from/to a guest that has a transaction in progress,
> we need to save/restore the checkpointed register state.  Although
> XER is part of the CPU state that gets checkpointed, the code that
> does this saving and restoring doesn't save/restore XER.
> 
> This fixes it by saving and restoring the XER.  To allow userspace
> to read/write the checkpointed XER value, we also add a new ONE_REG
> specifier.
> 
> The visible effect of this bug is that the guest may see its XER
> value being corrupted when it uses transactions.
> 
> Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
> Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
> Cc: stable@vger.kernel.org # v3.15+
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>

Applied to kvm-ppc-next.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state
@ 2016-11-21  5:07   ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2016-11-21  5:07 UTC (permalink / raw)
  To: kvm, kvm-ppc

On Mon, Nov 07, 2016 at 03:09:58PM +1100, Paul Mackerras wrote:
> When switching from/to a guest that has a transaction in progress,
> we need to save/restore the checkpointed register state.  Although
> XER is part of the CPU state that gets checkpointed, the code that
> does this saving and restoring doesn't save/restore XER.
> 
> This fixes it by saving and restoring the XER.  To allow userspace
> to read/write the checkpointed XER value, we also add a new ONE_REG
> specifier.
> 
> The visible effect of this bug is that the guest may see its XER
> value being corrupted when it uses transactions.
> 
> Fixes: e4e38121507a ("KVM: PPC: Book3S HV: Add transactional memory support")
> Fixes: 0a8eccefcb34 ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
> Cc: stable@vger.kernel.org # v3.15+
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>

Applied to kvm-ppc-next.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH] KVM: PPC: Book3S HV: Save/restore vrsave register in kvmhv_p9_guest_entry()
  2013-07-11 10:51 ` Paul Mackerras
@ 2019-04-30  0:41 ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 14+ messages in thread
From: Suraj Jitindar Singh @ 2019-04-30  0:41 UTC (permalink / raw)
  To: kvm-ppc; +Cc: paulus, kvm, Suraj Jitindar Singh

On POWER9 and later processors where the host can schedule vcpus on a
per thread basis, there is a streamlined entry path used when the guest
is radix. This entry path saves/restores the fp and vr state in
kvmhv_p9_guest_entry() by calling store_[fp/vr]_state() and
load_[fp/vr]_state(). This is the same as the old entry path however the
old entry path also saved/restored the VRSAVE register, which isn't done
in the new entry path.

This means that the vrsave register is now volatile across guest exit,
which is an incorrect change in behaviour.

Fix this by saving/restoring the vrsave register in kvmhv_p9_guest_entry().
This restores the old, correct, behaviour.

Fixes: 95a6432ce9038 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 06964350b97a..700e125c08ce 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3511,6 +3511,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 #ifdef CONFIG_ALTIVEC
 	load_vr_state(&vcpu->arch.vr);
 #endif
+	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
 	mtspr(SPRN_DSCR, vcpu->arch.dscr);
 	mtspr(SPRN_IAMR, vcpu->arch.iamr);
@@ -3602,6 +3603,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 #ifdef CONFIG_ALTIVEC
 	store_vr_state(&vcpu->arch.vr);
 #endif
+	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 
 	if (cpu_has_feature(CPU_FTR_TM) ||
 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
-- 
2.13.6


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH] KVM: PPC: Book3S HV: Save/restore vrsave register in kvmhv_p9_guest_entry()
@ 2019-04-30  0:41 ` Suraj Jitindar Singh
  0 siblings, 0 replies; 14+ messages in thread
From: Suraj Jitindar Singh @ 2019-04-30  0:41 UTC (permalink / raw)
  To: kvm-ppc; +Cc: paulus, kvm, Suraj Jitindar Singh

On POWER9 and later processors where the host can schedule vcpus on a
per thread basis, there is a streamlined entry path used when the guest
is radix. This entry path saves/restores the fp and vr state in
kvmhv_p9_guest_entry() by calling store_[fp/vr]_state() and
load_[fp/vr]_state(). This is the same as the old entry path however the
old entry path also saved/restored the VRSAVE register, which isn't done
in the new entry path.

This means that the vrsave register is now volatile across guest exit,
which is an incorrect change in behaviour.

Fix this by saving/restoring the vrsave register in kvmhv_p9_guest_entry().
This restores the old, correct, behaviour.

Fixes: 95a6432ce9038 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 06964350b97a..700e125c08ce 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3511,6 +3511,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 #ifdef CONFIG_ALTIVEC
 	load_vr_state(&vcpu->arch.vr);
 #endif
+	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
 	mtspr(SPRN_DSCR, vcpu->arch.dscr);
 	mtspr(SPRN_IAMR, vcpu->arch.iamr);
@@ -3602,6 +3603,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 #ifdef CONFIG_ALTIVEC
 	store_vr_state(&vcpu->arch.vr);
 #endif
+	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 
 	if (cpu_has_feature(CPU_FTR_TM) ||
 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore vrsave register in kvmhv_p9_guest_entry()
  2019-04-30  0:41 ` Suraj Jitindar Singh
@ 2019-04-30 10:08   ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2019-04-30 10:08 UTC (permalink / raw)
  To: Suraj Jitindar Singh; +Cc: kvm-ppc, kvm

On Tue, Apr 30, 2019 at 10:41:23AM +1000, Suraj Jitindar Singh wrote:
> On POWER9 and later processors where the host can schedule vcpus on a
> per thread basis, there is a streamlined entry path used when the guest
> is radix. This entry path saves/restores the fp and vr state in
> kvmhv_p9_guest_entry() by calling store_[fp/vr]_state() and
> load_[fp/vr]_state(). This is the same as the old entry path however the
> old entry path also saved/restored the VRSAVE register, which isn't done
> in the new entry path.
> 
> This means that the vrsave register is now volatile across guest exit,
> which is an incorrect change in behaviour.
> 
> Fix this by saving/restoring the vrsave register in kvmhv_p9_guest_entry().
> This restores the old, correct, behaviour.
> 
> Fixes: 95a6432ce9038 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>

Thanks, patch applied to my kvm-ppc-next tree.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH] KVM: PPC: Book3S HV: Save/restore vrsave register in kvmhv_p9_guest_entry()
@ 2019-04-30 10:08   ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2019-04-30 10:08 UTC (permalink / raw)
  To: Suraj Jitindar Singh; +Cc: kvm-ppc, kvm

On Tue, Apr 30, 2019 at 10:41:23AM +1000, Suraj Jitindar Singh wrote:
> On POWER9 and later processors where the host can schedule vcpus on a
> per thread basis, there is a streamlined entry path used when the guest
> is radix. This entry path saves/restores the fp and vr state in
> kvmhv_p9_guest_entry() by calling store_[fp/vr]_state() and
> load_[fp/vr]_state(). This is the same as the old entry path however the
> old entry path also saved/restored the VRSAVE register, which isn't done
> in the new entry path.
> 
> This means that the vrsave register is now volatile across guest exit,
> which is an incorrect change in behaviour.
> 
> Fix this by saving/restoring the vrsave register in kvmhv_p9_guest_entry().
> This restores the old, correct, behaviour.
> 
> Fixes: 95a6432ce9038 ("KVM: PPC: Book3S HV: Streamlined guest entry/exit path on P9 for radix guests")
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>

Thanks, patch applied to my kvm-ppc-next tree.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2019-04-30 10:12 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-30  0:41 [PATCH] KVM: PPC: Book3S HV: Save/restore vrsave register in kvmhv_p9_guest_entry() Suraj Jitindar Singh
2019-04-30  0:41 ` Suraj Jitindar Singh
2019-04-30 10:08 ` Paul Mackerras
2019-04-30 10:08   ` Paul Mackerras
  -- strict thread matches above, loose matches on Subject: below --
2016-11-07  4:09 [PATCH] KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state Paul Mackerras
2016-11-07  4:09 ` Paul Mackerras
2016-11-07  7:38 ` Thomas Huth
2016-11-07  7:38   ` Thomas Huth
2016-11-21  5:07 ` Paul Mackerras
2016-11-21  5:07   ` Paul Mackerras
2013-07-11 10:51 [PATCH] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers Paul Mackerras
2013-07-11 10:51 ` Paul Mackerras
2013-07-11 11:06 ` Alexander Graf
2013-07-11 11:06   ` Alexander Graf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.