All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] Some fixes for PPC HV-style KVM
@ 2013-08-26 10:15 ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:15 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Here are 3 patches that add two PMU (performance monitor unit)
registers to the set being context-switched on guest entry and exit,
and implement a per-guest timebase offset that is needed when we
migrate a guest from one host to another that has a different timebase
origin.  The first patch just adds some one_reg register definitions
for extra PMU registers, including some that exist on POWER8.  These
new registers aren't yet handled by the kernel code, but their
definitions are included here so as to reserve the numbers.

These patches are against Alex Graf's kvm-ppc-queue branch.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 0/3] Some fixes for PPC HV-style KVM
@ 2013-08-26 10:15 ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:15 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Here are 3 patches that add two PMU (performance monitor unit)
registers to the set being context-switched on guest entry and exit,
and implement a per-guest timebase offset that is needed when we
migrate a guest from one host to another that has a different timebase
origin.  The first patch just adds some one_reg register definitions
for extra PMU registers, including some that exist on POWER8.  These
new registers aren't yet handled by the kernel code, but their
definitions are included here so as to reserve the numbers.

These patches are against Alex Graf's kvm-ppc-queue branch.

Paul.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 1/3] KVM: PPC: Book3S HV: Add one_reg definitions for more PMU registers
  2013-08-26 10:15 ` Paul Mackerras
@ 2013-08-26 10:16   ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:16 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

This adds one_reg register numbers for two performance monitor registers
that exist on POWER7 and later processors (SIAR and SDAR) and three that
will be introduced on POWER8 (MMCR2, MMCRS and SIER).

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 Documentation/virtual/kvm/api.txt   | 5 +++++
 arch/powerpc/include/uapi/asm/kvm.h | 5 +++++
 2 files changed, 10 insertions(+)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 66dd2aa..8b4d984 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1765,6 +1765,11 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_MMCR0     | 64
   PPC   | KVM_REG_PPC_MMCR1     | 64
   PPC   | KVM_REG_PPC_MMCRA     | 64
+  PPC   | KVM_REG_PPC_MMCR2     | 64
+  PPC   | KVM_REG_PPC_MMCRS     | 64
+  PPC   | KVM_REG_PPC_SIAR      | 64
+  PPC   | KVM_REG_PPC_SDAR      | 64
+  PPC   | KVM_REG_PPC_SIER      | 64
   PPC   | KVM_REG_PPC_PMC1      | 32
   PPC   | KVM_REG_PPC_PMC2      | 32
   PPC   | KVM_REG_PPC_PMC3      | 32
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..fb0a8a9 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -429,6 +429,11 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
-- 
1.8.4.rc3

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 1/3] KVM: PPC: Book3S HV: Add one_reg definitions for more PMU registers
@ 2013-08-26 10:16   ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:16 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

This adds one_reg register numbers for two performance monitor registers
that exist on POWER7 and later processors (SIAR and SDAR) and three that
will be introduced on POWER8 (MMCR2, MMCRS and SIER).

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 Documentation/virtual/kvm/api.txt   | 5 +++++
 arch/powerpc/include/uapi/asm/kvm.h | 5 +++++
 2 files changed, 10 insertions(+)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 66dd2aa..8b4d984 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1765,6 +1765,11 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_MMCR0     | 64
   PPC   | KVM_REG_PPC_MMCR1     | 64
   PPC   | KVM_REG_PPC_MMCRA     | 64
+  PPC   | KVM_REG_PPC_MMCR2     | 64
+  PPC   | KVM_REG_PPC_MMCRS     | 64
+  PPC   | KVM_REG_PPC_SIAR      | 64
+  PPC   | KVM_REG_PPC_SDAR      | 64
+  PPC   | KVM_REG_PPC_SIER      | 64
   PPC   | KVM_REG_PPC_PMC1      | 32
   PPC   | KVM_REG_PPC_PMC2      | 32
   PPC   | KVM_REG_PPC_PMC3      | 32
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..fb0a8a9 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -429,6 +429,11 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_MMCR0	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 #define KVM_REG_PPC_MMCR1	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 #define KVM_REG_PPC_MMCRA	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
 
 #define KVM_REG_PPC_PMC1	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 #define KVM_REG_PPC_PMC2	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/3] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
  2013-08-26 10:15 ` Paul Mackerras
@ 2013-08-26 10:16   ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:16 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently we are not saving and restoring the SIAR and SDAR registers in
the PMU (performance monitor unit) on guest entry and exit.  The result
is that performance monitoring tools in the guest could get false
information about where a program was executing and what data it was
accessing at the time of a performance monitor interrupt.  This fixes
it by saving and restoring these registers along with the other PMU
registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/kernel/asm-offsets.c       |  2 ++
 arch/powerpc/kvm/book3s_hv.c            | 12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 ++++++++++++
 4 files changed, 28 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a67c76e..822b6ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,6 +506,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2b95c45..9df824f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -771,6 +771,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		}
 		break;
 #endif /* CONFIG_VSX */
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		spin_lock(&vcpu->arch.vpa_update_lock);
 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -855,6 +861,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		}
 		break;
 #endif /* CONFIG_VSX */
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		addr = set_reg_val(id, *val);
 		r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..2e1dd6c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -198,6 +198,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r6, VCPU_MMCR + 16(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+BEGIN_FTR_SECTION
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1125,6 +1131,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+BEGIN_FTR_SECTION
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/3] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
@ 2013-08-26 10:16   ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 10:16 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently we are not saving and restoring the SIAR and SDAR registers in
the PMU (performance monitor unit) on guest entry and exit.  The result
is that performance monitoring tools in the guest could get false
information about where a program was executing and what data it was
accessing at the time of a performance monitor interrupt.  This fixes
it by saving and restoring these registers along with the other PMU
registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/kernel/asm-offsets.c       |  2 ++
 arch/powerpc/kvm/book3s_hv.c            | 12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 ++++++++++++
 4 files changed, 28 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a67c76e..822b6ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,6 +506,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2b95c45..9df824f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -771,6 +771,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		}
 		break;
 #endif /* CONFIG_VSX */
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		spin_lock(&vcpu->arch.vpa_update_lock);
 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -855,6 +861,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		}
 		break;
 #endif /* CONFIG_VSX */
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		addr = set_reg_val(id, *val);
 		r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..2e1dd6c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -198,6 +198,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r6, VCPU_MMCR + 16(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+BEGIN_FTR_SECTION
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1125,6 +1131,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+BEGIN_FTR_SECTION
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/3] KVM: PPC: Book3S HV: Implement timebase offset for guests
  2013-08-26 10:15 ` Paul Mackerras
@ 2013-08-26 11:18   ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 11:18 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

This allows guests to have a different timebase origin from the host.
This is needed for migration, where a guest can migrate from one host
to another and the two hosts might have a different timebase origin.
However, the timebase seen by the guest must not go backwards, and
should go forwards only by a small amount corresponding to the time
taken for the migration.

Therefore this provides a new per-vcpu value accessed via the one_reg
interface using the new KVM_REG_PPC_TB_OFFSET identifier.  This value
defaults to 0 and is not modified by KVM.  On entering the guest, this
value is added onto the timebase, and on exiting the guest, it is
subtracted from the timebase.

This is only supported for recent POWER hardware which has the TBU40
(timebase upper 40 bits) register.  Writing to the TBU40 register only
alters the upper 40 bits of the timebase, leaving the lower 24 bits
unchanged.  This provides a way to modify the timebase for guest
migration without disturbing the synchronization of the timebase
registers across CPU cores.  This means that userspace must supply
a value for the offset that has zeroes in the lower 24 bits.  If the
lower 24 bits are non-zero, they are ignored and taken as zeroes.

Timebase values stored in KVM structures (struct kvm_vcpu, struct
kvmppc_vcore, etc.) are stored as host timebase values.  The timebase
values in the dispatch trace log need to be guest timebase values,
however, since that is read directly by the guest.  This moves the
setting of vcpu->arch.dec_expires on guest exit to a point after we
have restored the host timebase so that vcpu->arch.dec_expires is a
host timebase value.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 Documentation/virtual/kvm/api.txt       |  1 +
 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/include/asm/reg.h          |  1 +
 arch/powerpc/include/uapi/asm/kvm.h     |  3 ++
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_hv.c            |  8 +++++-
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 50 +++++++++++++++++++++++++++------
 7 files changed, 56 insertions(+), 10 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 8b4d984..88f4653 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1815,6 +1815,7 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TLB3PS	| 32
   PPC   | KVM_REG_PPC_EPTCFG	| 32
   PPC   | KVM_REG_PPC_ICP_STATE | 64
+  PPC   | KVM_REG_PPC_TB_OFFSET	| 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 91b833d..702d88b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -607,6 +607,8 @@ struct kvm_vcpu_arch {
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+
+	u64 tb_offset;		/* guest timebase - host timebase */
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408..72f8798 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -243,6 +243,7 @@
 #define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */
 #define SPRN_TBWU	0x11D	/* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40	0x11E	/* Timebase upper 40 bits (hyper, R/W) */
 #define SPRN_SPURR	0x134	/* Scaled PURR */
 #define SPRN_HSPRG0	0x130	/* Hypervisor Scratch 0 */
 #define SPRN_HSPRG1	0x131	/* Hypervisor Scratch 1 */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index fb0a8a9..9935321 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -504,6 +504,9 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TLB3PS	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
 #define KVM_REG_PPC_EPTCFG	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
 
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 822b6ba..62acafd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -488,6 +488,7 @@ int main(void)
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_TB_OFFSET, offsetof(struct kvm_vcpu, arch.tb_offset));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9df824f..2f4c624 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -489,7 +489,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
 	memset(dt, 0, sizeof(struct dtl_entry));
 	dt->dispatch_reason = 7;
 	dt->processor_id = vc->pcpu + vcpu->arch.ptid;
-	dt->timebase = now;
+	dt->timebase = now + vcpu->arch.tb_offset;
 	dt->enqueue_to_dispatch_time = stolen;
 	dt->srr0 = kvmppc_get_pc(vcpu);
 	dt->srr1 = vcpu->arch.shregs.msr;
@@ -794,6 +794,9 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		val->vpaval.length = vcpu->arch.dtl.len;
 		spin_unlock(&vcpu->arch.vpa_update_lock);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		*val = get_reg_val(id, vcpu->arch.tb_offset);
+		break;
 	default:
 		r = -EINVAL;
 		break;
@@ -893,6 +896,9 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		len -= len % sizeof(struct dtl_entry);
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		vcpu->arch.tb_offset = set_reg_val(id, *val) & ~0xffffffULL;
+		break;
 	default:
 		r = -EINVAL;
 		break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2e1dd6c..d7d4d41 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -345,7 +345,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	bdnz	28b
 	ptesync
 
-22:	li	r0,1
+	/* Add timebase offset onto timebase */
+22:	ld	r8,VCPU_TB_OFFSET(r4)
+	cmpdi	r8,0
+	beq	37f
+	mftb	r6		/* current host timebase */
+	add	r8,r8,r6
+	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
+	mftb	r7		/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	37f
+	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+37:	li	r0,1
 	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
 	b	10f
 
@@ -776,13 +791,6 @@ ext_stash_for_host:
 ext_interrupt_to_host:
 
 guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
-	/* Save DEC */
-	mfspr	r5,SPRN_DEC
-	mftb	r6
-	extsw	r5,r5
-	add	r5,r5,r6
-	std	r5,VCPU_DEC_EXPIRES(r9)
-
 	/* Save more register state  */
 	mfdar	r6
 	mfdsisr	r7
@@ -952,7 +960,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
 	mtspr	SPRN_LPID,r7
 	isync
-	li	r0,0
+
+	/* Subtract timebase offset from timebase */
+	ld	r8,VCPU_TB_OFFSET(r9)
+	cmpdi	r8,0
+	beq	17f
+	mftb	r6			/* current host timebase */
+	subf	r8,r8,r6
+	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
+	mftb	r7			/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	17f
+	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+	/* Signal secondary CPUs to continue */
+17:	li	r0,0
 	stb	r0,VCORE_IN_GUEST(r5)
 	lis	r8,0x7fff		/* MAX_INT@h */
 	mtspr	SPRN_HDEC,r8
@@ -1046,6 +1071,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 1:	addi	r8,r8,16
 	.endr
 
+	/* Save DEC */
+	mfspr	r5,SPRN_DEC
+	mftb	r6
+	extsw	r5,r5
+	add	r5,r5,r6
+	std	r5,VCPU_DEC_EXPIRES(r9)
+
 	/* Save and reset AMR and UAMOR before turning on the MMU */
 BEGIN_FTR_SECTION
 	mfspr	r5,SPRN_AMR
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 3/3] KVM: PPC: Book3S HV: Implement timebase offset for guests
@ 2013-08-26 11:18   ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-26 11:18 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

This allows guests to have a different timebase origin from the host.
This is needed for migration, where a guest can migrate from one host
to another and the two hosts might have a different timebase origin.
However, the timebase seen by the guest must not go backwards, and
should go forwards only by a small amount corresponding to the time
taken for the migration.

Therefore this provides a new per-vcpu value accessed via the one_reg
interface using the new KVM_REG_PPC_TB_OFFSET identifier.  This value
defaults to 0 and is not modified by KVM.  On entering the guest, this
value is added onto the timebase, and on exiting the guest, it is
subtracted from the timebase.

This is only supported for recent POWER hardware which has the TBU40
(timebase upper 40 bits) register.  Writing to the TBU40 register only
alters the upper 40 bits of the timebase, leaving the lower 24 bits
unchanged.  This provides a way to modify the timebase for guest
migration without disturbing the synchronization of the timebase
registers across CPU cores.  This means that userspace must supply
a value for the offset that has zeroes in the lower 24 bits.  If the
lower 24 bits are non-zero, they are ignored and taken as zeroes.

Timebase values stored in KVM structures (struct kvm_vcpu, struct
kvmppc_vcore, etc.) are stored as host timebase values.  The timebase
values in the dispatch trace log need to be guest timebase values,
however, since that is read directly by the guest.  This moves the
setting of vcpu->arch.dec_expires on guest exit to a point after we
have restored the host timebase so that vcpu->arch.dec_expires is a
host timebase value.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 Documentation/virtual/kvm/api.txt       |  1 +
 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/include/asm/reg.h          |  1 +
 arch/powerpc/include/uapi/asm/kvm.h     |  3 ++
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_hv.c            |  8 +++++-
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 50 +++++++++++++++++++++++++++------
 7 files changed, 56 insertions(+), 10 deletions(-)

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 8b4d984..88f4653 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1815,6 +1815,7 @@ registers, find a list below:
   PPC   | KVM_REG_PPC_TLB3PS	| 32
   PPC   | KVM_REG_PPC_EPTCFG	| 32
   PPC   | KVM_REG_PPC_ICP_STATE | 64
+  PPC   | KVM_REG_PPC_TB_OFFSET	| 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 91b833d..702d88b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -607,6 +607,8 @@ struct kvm_vcpu_arch {
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+
+	u64 tb_offset;		/* guest timebase - host timebase */
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408..72f8798 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -243,6 +243,7 @@
 #define SPRN_TBRU	0x10D	/* Time Base Read Upper Register (user, R/O) */
 #define SPRN_TBWL	0x11C	/* Time Base Lower Register (super, R/W) */
 #define SPRN_TBWU	0x11D	/* Time Base Upper Register (super, R/W) */
+#define SPRN_TBU40	0x11E	/* Timebase upper 40 bits (hyper, R/W) */
 #define SPRN_SPURR	0x134	/* Scaled PURR */
 #define SPRN_HSPRG0	0x130	/* Hypervisor Scratch 0 */
 #define SPRN_HSPRG1	0x131	/* Hypervisor Scratch 1 */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index fb0a8a9..9935321 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -504,6 +504,9 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TLB3PS	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
 #define KVM_REG_PPC_EPTCFG	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
 
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 822b6ba..62acafd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -488,6 +488,7 @@ int main(void)
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_TB_OFFSET, offsetof(struct kvm_vcpu, arch.tb_offset));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9df824f..2f4c624 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -489,7 +489,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
 	memset(dt, 0, sizeof(struct dtl_entry));
 	dt->dispatch_reason = 7;
 	dt->processor_id = vc->pcpu + vcpu->arch.ptid;
-	dt->timebase = now;
+	dt->timebase = now + vcpu->arch.tb_offset;
 	dt->enqueue_to_dispatch_time = stolen;
 	dt->srr0 = kvmppc_get_pc(vcpu);
 	dt->srr1 = vcpu->arch.shregs.msr;
@@ -794,6 +794,9 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		val->vpaval.length = vcpu->arch.dtl.len;
 		spin_unlock(&vcpu->arch.vpa_update_lock);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		*val = get_reg_val(id, vcpu->arch.tb_offset);
+		break;
 	default:
 		r = -EINVAL;
 		break;
@@ -893,6 +896,9 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		len -= len % sizeof(struct dtl_entry);
 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 		break;
+	case KVM_REG_PPC_TB_OFFSET:
+		vcpu->arch.tb_offset = set_reg_val(id, *val) & ~0xffffffULL;
+		break;
 	default:
 		r = -EINVAL;
 		break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2e1dd6c..d7d4d41 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -345,7 +345,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	bdnz	28b
 	ptesync
 
-22:	li	r0,1
+	/* Add timebase offset onto timebase */
+22:	ld	r8,VCPU_TB_OFFSET(r4)
+	cmpdi	r8,0
+	beq	37f
+	mftb	r6		/* current host timebase */
+	add	r8,r8,r6
+	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
+	mftb	r7		/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	37f
+	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+37:	li	r0,1
 	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
 	b	10f
 
@@ -776,13 +791,6 @@ ext_stash_for_host:
 ext_interrupt_to_host:
 
 guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
-	/* Save DEC */
-	mfspr	r5,SPRN_DEC
-	mftb	r6
-	extsw	r5,r5
-	add	r5,r5,r6
-	std	r5,VCPU_DEC_EXPIRES(r9)
-
 	/* Save more register state  */
 	mfdar	r6
 	mfdsisr	r7
@@ -952,7 +960,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
 	mtspr	SPRN_LPID,r7
 	isync
-	li	r0,0
+
+	/* Subtract timebase offset from timebase */
+	ld	r8,VCPU_TB_OFFSET(r9)
+	cmpdi	r8,0
+	beq	17f
+	mftb	r6			/* current host timebase */
+	subf	r8,r8,r6
+	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
+	mftb	r7			/* check if lower 24 bits overflowed */
+	clrldi	r6,r6,40
+	clrldi	r7,r7,40
+	cmpld	r7,r6
+	bge	17f
+	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
+	mtspr	SPRN_TBU40,r8
+
+	/* Signal secondary CPUs to continue */
+17:	li	r0,0
 	stb	r0,VCORE_IN_GUEST(r5)
 	lis	r8,0x7fff		/* MAX_INT@h */
 	mtspr	SPRN_HDEC,r8
@@ -1046,6 +1071,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 1:	addi	r8,r8,16
 	.endr
 
+	/* Save DEC */
+	mfspr	r5,SPRN_DEC
+	mftb	r6
+	extsw	r5,r5
+	add	r5,r5,r6
+	std	r5,VCPU_DEC_EXPIRES(r9)
+
 	/* Save and reset AMR and UAMOR before turning on the MMU */
 BEGIN_FTR_SECTION
 	mfspr	r5,SPRN_AMR
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 2/3] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
  2013-08-26 10:16   ` Paul Mackerras
@ 2013-08-28  5:42     ` Paul Mackerras
  -1 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-28  5:42 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently we are not saving and restoring the SIAR and SDAR registers in
the PMU (performance monitor unit) on guest entry and exit.  The result
is that performance monitoring tools in the guest could get false
information about where a program was executing and what data it was
accessing at the time of a performance monitor interrupt.  This fixes
it by saving and restoring these registers along with the other PMU
registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
v2: PPC970 also has SIAR and SDAR, so move the save/restore code out
of the POWER7 feature sections.  The other patches in this series are
unchanged.

 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/kernel/asm-offsets.c       |  2 ++
 arch/powerpc/kvm/book3s_hv.c            | 12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |  8 ++++++++
 4 files changed, 24 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a67c76e..822b6ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,6 +506,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1cbcb4c..25c2e39 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -749,6 +749,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -833,6 +839,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..bfb4b0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH v2 2/3] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other PMU registers
@ 2013-08-28  5:42     ` Paul Mackerras
  0 siblings, 0 replies; 14+ messages in thread
From: Paul Mackerras @ 2013-08-28  5:42 UTC (permalink / raw)
  To: Alexander Graf; +Cc: kvm-ppc, kvm

Currently we are not saving and restoring the SIAR and SDAR registers in
the PMU (performance monitor unit) on guest entry and exit.  The result
is that performance monitoring tools in the guest could get false
information about where a program was executing and what data it was
accessing at the time of a performance monitor interrupt.  This fixes
it by saving and restoring these registers along with the other PMU
registers on guest entry/exit.

This also provides a way for userspace to access these values for a
vcpu via the one_reg interface.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
v2: PPC970 also has SIAR and SDAR, so move the save/restore code out
of the POWER7 feature sections.  The other patches in this series are
unchanged.

 arch/powerpc/include/asm/kvm_host.h     |  2 ++
 arch/powerpc/kernel/asm-offsets.c       |  2 ++
 arch/powerpc/kvm/book3s_hv.c            | 12 ++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |  8 ++++++++
 4 files changed, 24 insertions(+)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3328353..91b833d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -498,6 +498,8 @@ struct kvm_vcpu_arch {
 
 	u64 mmcr[3];
 	u32 pmc[8];
+	u64 siar;
+	u64 sdar;
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a67c76e..822b6ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -506,6 +506,8 @@ int main(void)
 	DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
 	DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
 	DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
+	DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
+	DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
 	DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
 	DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
 	DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1cbcb4c..25c2e39 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -749,6 +749,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
 		break;
+	case KVM_REG_PPC_SIAR:
+		*val = get_reg_val(id, vcpu->arch.siar);
+		break;
+	case KVM_REG_PPC_SDAR:
+		*val = get_reg_val(id, vcpu->arch.sdar);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -833,6 +839,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 		i = id - KVM_REG_PPC_PMC1;
 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_SIAR:
+		vcpu->arch.siar = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_SDAR:
+		vcpu->arch.sdar = set_reg_val(id, *val);
+		break;
 #ifdef CONFIG_VSX
 	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 		if (cpu_has_feature(CPU_FTR_VSX)) {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 60dce5b..bfb4b0a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -196,8 +196,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 	ld	r3, VCPU_MMCR(r4)
 	ld	r5, VCPU_MMCR + 8(r4)
 	ld	r6, VCPU_MMCR + 16(r4)
+	ld	r7, VCPU_SIAR(r4)
+	ld	r8, VCPU_SDAR(r4)
 	mtspr	SPRN_MMCR1, r5
 	mtspr	SPRN_MMCRA, r6
+	mtspr	SPRN_SIAR, r7
+	mtspr	SPRN_SDAR, r8
 	mtspr	SPRN_MMCR0, r3
 	isync
 
@@ -1122,9 +1126,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
 	b	22f
 21:	mfspr	r5, SPRN_MMCR1
+	mfspr	r7, SPRN_SIAR
+	mfspr	r8, SPRN_SDAR
 	std	r4, VCPU_MMCR(r9)
 	std	r5, VCPU_MMCR + 8(r9)
 	std	r6, VCPU_MMCR + 16(r9)
+	std	r7, VCPU_SIAR(r9)
+	std	r8, VCPU_SDAR(r9)
 	mfspr	r3, SPRN_PMC1
 	mfspr	r4, SPRN_PMC2
 	mfspr	r5, SPRN_PMC3
-- 
1.8.4.rc3


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/3] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler
  2013-08-26 11:18   ` Paul Mackerras
  (?)
@ 2019-03-19  6:53   ` Alexey Kardashevskiy
  -1 siblings, 0 replies; 14+ messages in thread
From: Alexey Kardashevskiy @ 2019-03-19  6:53 UTC (permalink / raw)
  To: kvm-ppc



On 19/03/2019 15:04, Suraj Jitindar Singh wrote:
> Implement a real mode handler for the H_CALL H_PAGE_INIT which can be
> used to zero or copy a guest page. The page is defined to be 4k and must
> be 4k aligned.
> 
> The in-kernel real mode handler halves the time to handle this H_CALL
> compared to handling it in userspace for a hash guest.
> 
> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> ---
>  arch/powerpc/include/asm/kvm_ppc.h      |   2 +
>  arch/powerpc/kvm/book3s_hv_rm_mmu.c     | 144 ++++++++++++++++++++++++++++++++
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |   2 +-
>  3 files changed, 147 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index 8e8bb1299a0e..f34f290463aa 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -653,6 +653,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
>                          unsigned long pte_index);
>  long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
>                          unsigned long pte_index);
> +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
> +			   unsigned long dest, unsigned long src);
>  long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
>                            unsigned long slb_v, unsigned int status, bool data);
>  unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
> diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> index 3b3791ed74a6..26cfe1480ff5 100644
> --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> @@ -867,6 +867,150 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
>  	return ret;
>  }
>  
> +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
> +			  int writing, unsigned long *hpa,
> +			  struct kvm_memory_slot **memslot_p)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_memory_slot *memslot;
> +	unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
> +	unsigned int shift;
> +	pte_t *ptep, pte;
> +
> +	/* Find the memslot for this address */
> +	gfn = gpa >> PAGE_SHIFT;
> +	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
> +	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
> +		return H_PARAMETER;
> +
> +	/* Translate to host virtual address */
> +	hva = __gfn_to_hva_memslot(memslot, gfn);
> +
> +	/* Try to find the host pte for that virtual address */
> +	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
> +	if (!ptep)
> +		return H_TOO_HARD;
> +	pte = kvmppc_read_update_linux_pte(ptep, writing);
> +	if (!pte_present(pte))
> +		return H_TOO_HARD;
> +
> +	/* Convert to a physical address */
> +	if (shift)
> +		psize = 1UL << shift;
> +	pa = pte_pfn(pte) << PAGE_SHIFT;
> +	pa |= hva & (psize - 1);
> +	pa |= gpa & ~PAGE_MASK;
> +
> +	if (hpa)
> +		*hpa = pa;


hpa is always not null.


> +	if (memslot_p)
> +		*memslot_p = memslot;

memslot_p!=NULL only when writing=1, you can safely drop the writing
parameter.


> +
> +	return H_SUCCESS;
> +}
> +
> +static int kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, unsigned long dest)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_memory_slot *memslot;
> +	unsigned long pa;
> +	unsigned long mmu_seq;

Nit: the two lines above can be one.

> +	int i, ret = H_SUCCESS;
> +
> +	/* Used later to detect if we might have been invalidated */
> +	mmu_seq = kvm->mmu_notifier_seq;
> +	smp_rmb();
> +
> +	ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +
> +	/* Check if we've been invalidated */
> +	spin_lock(&kvm->mmu_lock);
> +	if (mmu_notifier_retry(kvm, mmu_seq)) {
> +		ret = H_TOO_HARD;
> +		goto out_unlock;
> +	}
> +
> +	/* Zero the page */
> +	for (i = 0; i < 4096; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
> +		dcbz((void *)pa);
> +	kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
> +
> +out_unlock:
> +	spin_unlock(&kvm->mmu_lock);
> +	return ret;
> +}
> +
> +static int kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, unsigned long dest,
> +				      unsigned long src)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	struct kvm_memory_slot *dest_memslot;
> +	unsigned long dest_pa, src_pa;
> +	unsigned long mmu_seq;
> +	int ret = H_SUCCESS;
> +
> +	/* Used later to detect if we might have been invalidated */
> +	mmu_seq = kvm->mmu_notifier_seq;
> +	smp_rmb();
> +
> +	ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +	ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
> +	if (ret != H_SUCCESS)
> +		return ret;
> +
> +	/* Check if we've been invalidated */
> +	spin_lock(&kvm->mmu_lock);

I am no expect on spin_lock but from my memory in real mode it should be
at least raw_spin_lock as CONFIG_DEBUG_SPINLOCK (when defined) can break
things horribly.


> +	if (mmu_notifier_retry(kvm, mmu_seq)) {
> +		ret = H_TOO_HARD;
> +		goto out_unlock;
> +	}
> +
> +	/* Copy the page */
> +	memcpy((void *)dest_pa, (void *)src_pa, 4096);


s/4096/SZ_4K/

> +
> +	kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
> +
> +out_unlock:
> +	spin_unlock(&kvm->mmu_lock);
> +	return ret;
> +}
> +
> +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
> +			   unsigned long dest, unsigned long src)
> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 pg_sz = 1UL << 12;  /* 4K page size */
> +	u64 pg_mask = pg_sz - 1;

Same comment about SZ_4K as in 2/3.


> +	int ret = H_SUCCESS;


Usually H_SUCCESS/... codes are long and EINVAL/... are int. Same for
2/3. The rule is not used 100% of time but nevertheless.


> +
> +	/* Don't handle radix mode here, go up to the virtual mode handler */
> +	if (kvm_is_radix(kvm))
> +		return H_TOO_HARD;
> +
> +	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
> +	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
> +		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
> +		return H_PARAMETER;
> +
> +	/* dest (and src if copy_page flag set) must be page aligned */
> +	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
> +		return H_PARAMETER;
> +
> +	/* zero and/or copy the page as determined by the flags */
> +	if (flags & H_ZERO_PAGE)
> +		ret = kvmppc_do_h_page_init_zero(vcpu, dest);

"else" here?

> +	if (flags & H_COPY_PAGE)
> +		ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);


else if (src)
	return H_PARAMETER;

> +
> +	/* We can ignore the other flags */
> +
> +	return ret;
> +}
> +
>  void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
>  			unsigned long pte_index)
>  {
> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> index 9b8d50a7cbaf..5927497e7bbf 100644
> --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> @@ -2268,7 +2268,7 @@ hcall_real_table:
>  	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
>  	.long	0		/* 0x24 - H_SET_SPRG0 */
>  	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
> -	.long	0		/* 0x2c */
> +	.long	DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
>  	.long	0		/* 0x30 */
>  	.long	0		/* 0x34 */
>  	.long	0		/* 0x38 */
> 

-- 
Alexey

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/3] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler
  2013-08-26 11:18   ` Paul Mackerras
  (?)
  (?)
@ 2019-03-22  5:15   ` Suraj Jitindar Singh
  -1 siblings, 0 replies; 14+ messages in thread
From: Suraj Jitindar Singh @ 2019-03-22  5:15 UTC (permalink / raw)
  To: kvm-ppc

On Tue, 2019-03-19 at 17:53 +1100, Alexey Kardashevskiy wrote:
> 
> On 19/03/2019 15:04, Suraj Jitindar Singh wrote:
> > Implement a real mode handler for the H_CALL H_PAGE_INIT which can
> > be
> > used to zero or copy a guest page. The page is defined to be 4k and
> > must
> > be 4k aligned.
> > 
> > The in-kernel real mode handler halves the time to handle this
> > H_CALL
> > compared to handling it in userspace for a hash guest.
> > 
> > Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
> > ---
> >  arch/powerpc/include/asm/kvm_ppc.h      |   2 +
> >  arch/powerpc/kvm/book3s_hv_rm_mmu.c     | 144
> > ++++++++++++++++++++++++++++++++
> >  arch/powerpc/kvm/book3s_hv_rmhandlers.S |   2 +-
> >  3 files changed, 147 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/powerpc/include/asm/kvm_ppc.h
> > b/arch/powerpc/include/asm/kvm_ppc.h
> > index 8e8bb1299a0e..f34f290463aa 100644
> > --- a/arch/powerpc/include/asm/kvm_ppc.h
> > +++ b/arch/powerpc/include/asm/kvm_ppc.h
> > @@ -653,6 +653,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu,
> > unsigned long flags,
> >                          unsigned long pte_index);
> >  long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long
> > flags,
> >                          unsigned long pte_index);
> > +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long
> > flags,
> > +			   unsigned long dest, unsigned long src);
> >  long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long
> > addr,
> >                            unsigned long slb_v, unsigned int
> > status, bool data);
> >  unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
> > diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> > b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> > index 3b3791ed74a6..26cfe1480ff5 100644
> > --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> > +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
> > @@ -867,6 +867,150 @@ long kvmppc_h_clear_mod(struct kvm_vcpu
> > *vcpu, unsigned long flags,
> >  	return ret;
> >  }
> >  
> > +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long
> > gpa,
> > +			  int writing, unsigned long *hpa,
> > +			  struct kvm_memory_slot **memslot_p)
> > +{
> > +	struct kvm *kvm = vcpu->kvm;
> > +	struct kvm_memory_slot *memslot;
> > +	unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
> > +	unsigned int shift;
> > +	pte_t *ptep, pte;
> > +
> > +	/* Find the memslot for this address */
> > +	gfn = gpa >> PAGE_SHIFT;
> > +	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
> > +	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
> > +		return H_PARAMETER;
> > +
> > +	/* Translate to host virtual address */
> > +	hva = __gfn_to_hva_memslot(memslot, gfn);
> > +
> > +	/* Try to find the host pte for that virtual address */
> > +	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL,
> > &shift);
> > +	if (!ptep)
> > +		return H_TOO_HARD;
> > +	pte = kvmppc_read_update_linux_pte(ptep, writing);
> > +	if (!pte_present(pte))
> > +		return H_TOO_HARD;
> > +
> > +	/* Convert to a physical address */
> > +	if (shift)
> > +		psize = 1UL << shift;
> > +	pa = pte_pfn(pte) << PAGE_SHIFT;
> > +	pa |= hva & (psize - 1);
> > +	pa |= gpa & ~PAGE_MASK;
> > +
> > +	if (hpa)
> > +		*hpa = pa;
> 
> 
> hpa is always not null.

For now that is the case. I feel like it's better to check incase
someone reuses the function in future.
> 
> 
> > +	if (memslot_p)
> > +		*memslot_p = memslot;
> 
> memslot_p!=NULL only when writing=1, you can safely drop the writing
> parameter.

As above.

> 
> 
> > +
> > +	return H_SUCCESS;
> > +}
> > +
> > +static int kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
> > unsigned long dest)
> > +{
> > +	struct kvm *kvm = vcpu->kvm;
> > +	struct kvm_memory_slot *memslot;
> > +	unsigned long pa;
> > +	unsigned long mmu_seq;
> 
> Nit: the two lines above can be one.

True :)

> 
> > +	int i, ret = H_SUCCESS;
> > +
> > +	/* Used later to detect if we might have been invalidated
> > */
> > +	mmu_seq = kvm->mmu_notifier_seq;
> > +	smp_rmb();
> > +
> > +	ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
> > +	if (ret != H_SUCCESS)
> > +		return ret;
> > +
> > +	/* Check if we've been invalidated */
> > +	spin_lock(&kvm->mmu_lock);
> > +	if (mmu_notifier_retry(kvm, mmu_seq)) {
> > +		ret = H_TOO_HARD;
> > +		goto out_unlock;
> > +	}
> > +
> > +	/* Zero the page */
> > +	for (i = 0; i < 4096; i += L1_CACHE_BYTES, pa +> > L1_CACHE_BYTES)
> > +		dcbz((void *)pa);
> > +	kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT,
> > PAGE_SIZE);
> > +
> > +out_unlock:
> > +	spin_unlock(&kvm->mmu_lock);
> > +	return ret;
> > +}
> > +
> > +static int kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
> > unsigned long dest,
> > +				      unsigned long src)
> > +{
> > +	struct kvm *kvm = vcpu->kvm;
> > +	struct kvm_memory_slot *dest_memslot;
> > +	unsigned long dest_pa, src_pa;
> > +	unsigned long mmu_seq;
> > +	int ret = H_SUCCESS;
> > +
> > +	/* Used later to detect if we might have been invalidated
> > */
> > +	mmu_seq = kvm->mmu_notifier_seq;
> > +	smp_rmb();
> > +
> > +	ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa,
> > &dest_memslot);
> > +	if (ret != H_SUCCESS)
> > +		return ret;
> > +	ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
> > +	if (ret != H_SUCCESS)
> > +		return ret;
> > +
> > +	/* Check if we've been invalidated */
> > +	spin_lock(&kvm->mmu_lock);
> 
> I am no expect on spin_lock but from my memory in real mode it should
> be
> at least raw_spin_lock as CONFIG_DEBUG_SPINLOCK (when defined) can
> break
> things horribly.

I am also no expert. I'll take your word for it.

> 
> 
> > +	if (mmu_notifier_retry(kvm, mmu_seq)) {
> > +		ret = H_TOO_HARD;
> > +		goto out_unlock;
> > +	}
> > +
> > +	/* Copy the page */
> > +	memcpy((void *)dest_pa, (void *)src_pa, 4096);
> 
> 
> s/4096/SZ_4K/

yep

> 
> > +
> > +	kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT,
> > PAGE_SIZE);
> > +
> > +out_unlock:
> > +	spin_unlock(&kvm->mmu_lock);
> > +	return ret;
> > +}
> > +
> > +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long
> > flags,
> > +			   unsigned long dest, unsigned long src)
> > +{
> > +	struct kvm *kvm = vcpu->kvm;
> > +	u64 pg_sz = 1UL << 12;  /* 4K page size */
> > +	u64 pg_mask = pg_sz - 1;
> 
> Same comment about SZ_4K as in 2/3.

yep

> 
> 
> > +	int ret = H_SUCCESS;
> 
> 
> Usually H_SUCCESS/... codes are long and EINVAL/... are int. Same for
> 2/3. The rule is not used 100% of time but nevertheless.

Ok, will do

> 
> 
> > +
> > +	/* Don't handle radix mode here, go up to the virtual mode
> > handler */
> > +	if (kvm_is_radix(kvm))
> > +		return H_TOO_HARD;
> > +
> > +	/* Check for invalid flags (H_PAGE_SET_LOANED covers all
> > CMO flags) */
> > +	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
> > +		      H_ZERO_PAGE | H_COPY_PAGE |
> > H_PAGE_SET_LOANED))
> > +		return H_PARAMETER;
> > +
> > +	/* dest (and src if copy_page flag set) must be page
> > aligned */
> > +	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src &
> > pg_mask)))
> > +		return H_PARAMETER;
> > +
> > +	/* zero and/or copy the page as determined by the flags */
> > +	if (flags & H_ZERO_PAGE)
> > +		ret = kvmppc_do_h_page_init_zero(vcpu, dest);
> 
> "else" here?

yeah and I'll flip the order of zero and copy

> 
> > +	if (flags & H_COPY_PAGE)
> > +		ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
> 
> 
> else if (src)
> 	return H_PARAMETER;
> 
> > +
> > +	/* We can ignore the other flags */
> > +
> > +	return ret;
> > +}
> > +
> >  void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
> >  			unsigned long pte_index)
> >  {
> > diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > index 9b8d50a7cbaf..5927497e7bbf 100644
> > --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
> > @@ -2268,7 +2268,7 @@ hcall_real_table:
> >  	.long	DOTSYM(kvmppc_rm_h_put_tce) -
> > hcall_real_table
> >  	.long	0		/* 0x24 - H_SET_SPRG0 */
> >  	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
> > -	.long	0		/* 0x2c */
> > +	.long	DOTSYM(kvmppc_rm_h_page_init) -
> > hcall_real_table
> >  	.long	0		/* 0x30 */
> >  	.long	0		/* 0x34 */
> >  	.long	0		/* 0x38 */
> > 
> 
> 

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/3] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler
  2013-08-26 11:18   ` Paul Mackerras
                     ` (2 preceding siblings ...)
  (?)
@ 2019-03-25  4:50   ` Alexey Kardashevskiy
  -1 siblings, 0 replies; 14+ messages in thread
From: Alexey Kardashevskiy @ 2019-03-25  4:50 UTC (permalink / raw)
  To: kvm-ppc



On 22/03/2019 16:15, Suraj Jitindar Singh wrote:
> On Tue, 2019-03-19 at 17:53 +1100, Alexey Kardashevskiy wrote:
>>
>> On 19/03/2019 15:04, Suraj Jitindar Singh wrote:
>>> Implement a real mode handler for the H_CALL H_PAGE_INIT which can
>>> be
>>> used to zero or copy a guest page. The page is defined to be 4k and
>>> must
>>> be 4k aligned.
>>>
>>> The in-kernel real mode handler halves the time to handle this
>>> H_CALL
>>> compared to handling it in userspace for a hash guest.
>>>
>>> Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
>>> ---
>>>  arch/powerpc/include/asm/kvm_ppc.h      |   2 +
>>>  arch/powerpc/kvm/book3s_hv_rm_mmu.c     | 144
>>> ++++++++++++++++++++++++++++++++
>>>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |   2 +-
>>>  3 files changed, 147 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/powerpc/include/asm/kvm_ppc.h
>>> b/arch/powerpc/include/asm/kvm_ppc.h
>>> index 8e8bb1299a0e..f34f290463aa 100644
>>> --- a/arch/powerpc/include/asm/kvm_ppc.h
>>> +++ b/arch/powerpc/include/asm/kvm_ppc.h
>>> @@ -653,6 +653,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu,
>>> unsigned long flags,
>>>                          unsigned long pte_index);
>>>  long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long
>>> flags,
>>>                          unsigned long pte_index);
>>> +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long
>>> flags,
>>> +			   unsigned long dest, unsigned long src);
>>>  long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long
>>> addr,
>>>                            unsigned long slb_v, unsigned int
>>> status, bool data);
>>>  unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
>>> diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
>>> b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
>>> index 3b3791ed74a6..26cfe1480ff5 100644
>>> --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
>>> +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
>>> @@ -867,6 +867,150 @@ long kvmppc_h_clear_mod(struct kvm_vcpu
>>> *vcpu, unsigned long flags,
>>>  	return ret;
>>>  }
>>>  
>>> +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long
>>> gpa,
>>> +			  int writing, unsigned long *hpa,
>>> +			  struct kvm_memory_slot **memslot_p)
>>> +{
>>> +	struct kvm *kvm = vcpu->kvm;
>>> +	struct kvm_memory_slot *memslot;
>>> +	unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
>>> +	unsigned int shift;
>>> +	pte_t *ptep, pte;
>>> +
>>> +	/* Find the memslot for this address */
>>> +	gfn = gpa >> PAGE_SHIFT;
>>> +	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
>>> +	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
>>> +		return H_PARAMETER;
>>> +
>>> +	/* Translate to host virtual address */
>>> +	hva = __gfn_to_hva_memslot(memslot, gfn);
>>> +
>>> +	/* Try to find the host pte for that virtual address */
>>> +	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL,
>>> &shift);
>>> +	if (!ptep)
>>> +		return H_TOO_HARD;
>>> +	pte = kvmppc_read_update_linux_pte(ptep, writing);
>>> +	if (!pte_present(pte))
>>> +		return H_TOO_HARD;
>>> +
>>> +	/* Convert to a physical address */
>>> +	if (shift)
>>> +		psize = 1UL << shift;
>>> +	pa = pte_pfn(pte) << PAGE_SHIFT;
>>> +	pa |= hva & (psize - 1);
>>> +	pa |= gpa & ~PAGE_MASK;
>>> +
>>> +	if (hpa)
>>> +		*hpa = pa;
>>
>>
>> hpa is always not null.
> 
> For now that is the case. I feel like it's better to check incase
> someone reuses the function in future.


Hard to imagine such case though. kvmppc_rm_ua_to_hpa() has never been
reused, for example.


>>
>>
>>> +	if (memslot_p)
>>> +		*memslot_p = memslot;
>>
>> memslot_p!=NULL only when writing=1, you can safely drop the writing
>> parameter.
> 
> As above.

I still suggest adding more parameters only when you need them.



-- 
Alexey

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 3/3] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler
@ 2019-03-19  4:04 Suraj Jitindar Singh
  0 siblings, 0 replies; 14+ messages in thread
From: Suraj Jitindar Singh @ 2019-03-19  4:04 UTC (permalink / raw)
  To: kvm-ppc

Implement a real mode handler for the H_CALL H_PAGE_INIT which can be
used to zero or copy a guest page. The page is defined to be 4k and must
be 4k aligned.

The in-kernel real mode handler halves the time to handle this H_CALL
compared to handling it in userspace for a hash guest.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_ppc.h      |   2 +
 arch/powerpc/kvm/book3s_hv_rm_mmu.c     | 144 ++++++++++++++++++++++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S |   2 +-
 3 files changed, 147 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8e8bb1299a0e..f34f290463aa 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -653,6 +653,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
                         unsigned long pte_index);
 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
                         unsigned long pte_index);
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+			   unsigned long dest, unsigned long src);
 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                           unsigned long slb_v, unsigned int status, bool data);
 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 3b3791ed74a6..26cfe1480ff5 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -867,6 +867,150 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
 	return ret;
 }
 
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
+			  int writing, unsigned long *hpa,
+			  struct kvm_memory_slot **memslot_p)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_memory_slot *memslot;
+	unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
+	unsigned int shift;
+	pte_t *ptep, pte;
+
+	/* Find the memslot for this address */
+	gfn = gpa >> PAGE_SHIFT;
+	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+		return H_PARAMETER;
+
+	/* Translate to host virtual address */
+	hva = __gfn_to_hva_memslot(memslot, gfn);
+
+	/* Try to find the host pte for that virtual address */
+	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+	if (!ptep)
+		return H_TOO_HARD;
+	pte = kvmppc_read_update_linux_pte(ptep, writing);
+	if (!pte_present(pte))
+		return H_TOO_HARD;
+
+	/* Convert to a physical address */
+	if (shift)
+		psize = 1UL << shift;
+	pa = pte_pfn(pte) << PAGE_SHIFT;
+	pa |= hva & (psize - 1);
+	pa |= gpa & ~PAGE_MASK;
+
+	if (hpa)
+		*hpa = pa;
+	if (memslot_p)
+		*memslot_p = memslot;
+
+	return H_SUCCESS;
+}
+
+static int kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, unsigned long dest)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_memory_slot *memslot;
+	unsigned long pa;
+	unsigned long mmu_seq;
+	int i, ret = H_SUCCESS;
+
+	/* Used later to detect if we might have been invalidated */
+	mmu_seq = kvm->mmu_notifier_seq;
+	smp_rmb();
+
+	ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	/* Check if we've been invalidated */
+	spin_lock(&kvm->mmu_lock);
+	if (mmu_notifier_retry(kvm, mmu_seq)) {
+		ret = H_TOO_HARD;
+		goto out_unlock;
+	}
+
+	/* Zero the page */
+	for (i = 0; i < 4096; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
+		dcbz((void *)pa);
+	kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+	spin_unlock(&kvm->mmu_lock);
+	return ret;
+}
+
+static int kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, unsigned long dest,
+				      unsigned long src)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_memory_slot *dest_memslot;
+	unsigned long dest_pa, src_pa;
+	unsigned long mmu_seq;
+	int ret = H_SUCCESS;
+
+	/* Used later to detect if we might have been invalidated */
+	mmu_seq = kvm->mmu_notifier_seq;
+	smp_rmb();
+
+	ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
+	if (ret != H_SUCCESS)
+		return ret;
+	ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
+	if (ret != H_SUCCESS)
+		return ret;
+
+	/* Check if we've been invalidated */
+	spin_lock(&kvm->mmu_lock);
+	if (mmu_notifier_retry(kvm, mmu_seq)) {
+		ret = H_TOO_HARD;
+		goto out_unlock;
+	}
+
+	/* Copy the page */
+	memcpy((void *)dest_pa, (void *)src_pa, 4096);
+
+	kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+	spin_unlock(&kvm->mmu_lock);
+	return ret;
+}
+
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+			   unsigned long dest, unsigned long src)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 pg_sz = 1UL << 12;  /* 4K page size */
+	u64 pg_mask = pg_sz - 1;
+	int ret = H_SUCCESS;
+
+	/* Don't handle radix mode here, go up to the virtual mode handler */
+	if (kvm_is_radix(kvm))
+		return H_TOO_HARD;
+
+	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+		return H_PARAMETER;
+
+	/* dest (and src if copy_page flag set) must be page aligned */
+	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+		return H_PARAMETER;
+
+	/* zero and/or copy the page as determined by the flags */
+	if (flags & H_ZERO_PAGE)
+		ret = kvmppc_do_h_page_init_zero(vcpu, dest);
+	if (flags & H_COPY_PAGE)
+		ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
+
+	/* We can ignore the other flags */
+
+	return ret;
+}
+
 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
 			unsigned long pte_index)
 {
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 9b8d50a7cbaf..5927497e7bbf 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2268,7 +2268,7 @@ hcall_real_table:
 	.long	DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
 	.long	0		/* 0x24 - H_SET_SPRG0 */
 	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
-	.long	0		/* 0x2c */
+	.long	DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
 	.long	0		/* 0x30 */
 	.long	0		/* 0x34 */
 	.long	0		/* 0x38 */
-- 
2.13.6

^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2019-03-25  4:50 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-08-26 10:15 [PATCH 0/3] Some fixes for PPC HV-style KVM Paul Mackerras
2013-08-26 10:15 ` Paul Mackerras
2013-08-26 10:16 ` [PATCH 1/3] KVM: PPC: Book3S HV: Add one_reg definitions for more PMU registers Paul Mackerras
2013-08-26 10:16   ` Paul Mackerras
2013-08-26 10:16 ` [PATCH 2/3] KVM: PPC: Book3S HV: Save/restore SIAR and SDAR along with other " Paul Mackerras
2013-08-26 10:16   ` Paul Mackerras
2013-08-28  5:42   ` [PATCH v2 " Paul Mackerras
2013-08-28  5:42     ` Paul Mackerras
2013-08-26 11:18 ` [PATCH 3/3] KVM: PPC: Book3S HV: Implement timebase offset for guests Paul Mackerras
2013-08-26 11:18   ` Paul Mackerras
2019-03-19  6:53   ` [PATCH 3/3] KVM: PPC: Book3S HV: Implement real mode H_PAGE_INIT handler Alexey Kardashevskiy
2019-03-22  5:15   ` Suraj Jitindar Singh
2019-03-25  4:50   ` Alexey Kardashevskiy
2019-03-19  4:04 Suraj Jitindar Singh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.