All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH 03/12] KVM: PPC: Book3S HV: Provide a way for userspace to get/set per-vCPU areas
Date: Tue, 28 Aug 2012 22:33:12 +1000	[thread overview]
Message-ID: <20120828123312.GD7258@bloggs.ozlabs.ibm.com> (raw)
In-Reply-To: <20120828123043.GA7258@bloggs.ozlabs.ibm.com>

The PAPR paravirtualization interface lets guests register three
different types of per-vCPU buffer areas in its memory for communication
with the hypervisor.  These are called virtual processor areas (VPAs).
Currently the hypercalls to register and unregister VPAs are handled
by KVM in the kernel, and userspace has no way to know about or save
and restore these registrations across a migration.

This adds get and set ioctls to allow userspace to see what addresses
have been registered, and to register or unregister them.  This will
be needed for guest hibernation and migration, and is also needed so
that userspace can unregister them on reset (otherwise we corrupt
guest memory after reboot by writing to the VPAs registered by the
previous kernel).  We also add a capability to indicate that the
ioctls are supported.

This also fixes a bug where we were calling init_vpa unconditionally,
leading to an oops when unregistering the VPA.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_ppc.h |    2 ++
 arch/powerpc/kvm/book3s_hv.c       |   54 +++++++++++++++++++++++++++++++++++-
 arch/powerpc/kvm/powerpc.c         |   23 +++++++++++++++
 include/linux/kvm.h                |   12 ++++++++
 4 files changed, 90 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 41a00ea..4e0bc22 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -201,6 +201,8 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 
 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_get_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa);
+int kvm_vcpu_set_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa);
 
 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 817837d..53d950b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -143,6 +143,57 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
 	vpa->yield_count = 1;
 }
 
+int kvm_vcpu_get_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa)
+{
+	spin_lock(&vcpu->arch.vpa_update_lock);
+	vpa->vpa_addr = vcpu->arch.vpa.next_gpa;
+	vpa->slb_shadow_addr = vcpu->arch.slb_shadow.next_gpa;
+	vpa->slb_shadow_size = vcpu->arch.slb_shadow.len;
+	vpa->dtl_addr = vcpu->arch.dtl.next_gpa;
+	vpa->dtl_size = vcpu->arch.dtl.len;
+	spin_unlock(&vcpu->arch.vpa_update_lock);
+	return 0;
+}
+
+static inline void set_vpa(struct kvmppc_vpa *v, unsigned long addr,
+			   unsigned long len)
+{
+	if (v->next_gpa != addr || v->len != len) {
+		v->next_gpa = addr;
+		v->len = addr ? len : 0;
+		v->update_pending = 1;
+	}
+}
+
+int kvm_vcpu_set_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa)
+{
+	/* check that addresses are cacheline aligned */
+	if ((vpa->vpa_addr & (L1_CACHE_BYTES - 1)) ||
+	    (vpa->slb_shadow_addr & (L1_CACHE_BYTES - 1)) ||
+	    (vpa->dtl_addr & (L1_CACHE_BYTES - 1)))
+		return -EINVAL;
+
+	/* DTL must be at least 1 entry long, if being set */
+	if (vpa->dtl_addr) {
+		if (vpa->dtl_size < sizeof(struct dtl_entry))
+			return -EINVAL;
+		vpa->dtl_size -= vpa->dtl_size % sizeof(struct dtl_entry);
+	}
+
+	/* DTL and SLB shadow require VPA */
+	if (!vpa->vpa_addr && (vpa->slb_shadow_addr || vpa->dtl_addr))
+		return -EINVAL;
+
+	spin_lock(&vcpu->arch.vpa_update_lock);
+	set_vpa(&vcpu->arch.vpa, vpa->vpa_addr, sizeof(struct lppaca));
+	set_vpa(&vcpu->arch.slb_shadow, vpa->slb_shadow_addr,
+		vpa->slb_shadow_size);
+	set_vpa(&vcpu->arch.dtl, vpa->dtl_addr, vpa->dtl_size);
+	spin_unlock(&vcpu->arch.vpa_update_lock);
+
+	return 0;
+}
+
 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
 struct reg_vpa {
 	u32 dummy;
@@ -321,7 +372,8 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	if (vcpu->arch.vpa.update_pending) {
 		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
-		init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+		if (vcpu->arch.vpa.pinned_addr)
+			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
 	}
 	if (vcpu->arch.dtl.update_pending) {
 		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 10e4dfb..72764ba 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -341,6 +341,9 @@ int kvm_dev_ioctl_check_extension(long ext)
 		if (cpu_has_feature(CPU_FTR_ARCH_201))
 			r = 2;
 		break;
+	case KVM_CAP_PPC_VPA:
+		r = 1;
+		break;
 #endif
 	case KVM_CAP_SYNC_MMU:
 #ifdef CONFIG_KVM_BOOK3S_64_HV
@@ -821,6 +824,26 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		break;
 	}
 #endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+	case KVM_PPC_GET_VPA_INFO: {
+		struct kvm_ppc_vpa vpa;
+		r = kvm_vcpu_get_vpa_info(vcpu, &vpa);
+		if (r)
+			break;
+		r = -EFAULT;
+		if (copy_to_user(argp, &vpa, sizeof(vpa)))
+			break;
+		r = 0;
+	}
+	case KVM_PPC_SET_VPA_INFO: {
+		struct kvm_ppc_vpa vpa;
+		r = -EFAULT;
+		if (copy_from_user(&vpa, argp, sizeof(vpa)))
+			break;
+		r = kvm_vcpu_set_vpa_info(vcpu, &vpa);
+		break;
+	}
+#endif
 	default:
 		r = -EINVAL;
 	}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f4f5be8..4a8bb4f 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -622,6 +622,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_S390_COW 79
 #define KVM_CAP_PPC_ALLOC_HTAB 80
 #define KVM_CAP_PPC_BOOKE_WATCHDOG 81
+#define KVM_CAP_PPC_VPA 83
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -959,4 +960,15 @@ struct kvm_assigned_msix_entry {
 	__u16 padding[3];
 };
 
+struct kvm_ppc_vpa {
+	__u64 vpa_addr;
+	__u64 slb_shadow_addr;
+	__u64 dtl_addr;
+	__u32 slb_shadow_size;
+	__u32 dtl_size;
+};
+
+#define KVM_PPC_GET_VPA_INFO	_IOR(KVMIO, 0xae, struct kvm_ppc_vpa)
+#define KVM_PPC_SET_VPA_INFO	_IOW(KVMIO, 0xaf, struct kvm_ppc_vpa)
+
 #endif /* __LINUX_KVM_H */
-- 
1.7.10.rc3.219.g53414


WARNING: multiple messages have this Message-ID (diff)
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH 03/12] KVM: PPC: Book3S HV: Provide a way for userspace to get/set per-vCPU areas
Date: Tue, 28 Aug 2012 12:33:12 +0000	[thread overview]
Message-ID: <20120828123312.GD7258@bloggs.ozlabs.ibm.com> (raw)
In-Reply-To: <20120828123043.GA7258@bloggs.ozlabs.ibm.com>

The PAPR paravirtualization interface lets guests register three
different types of per-vCPU buffer areas in its memory for communication
with the hypervisor.  These are called virtual processor areas (VPAs).
Currently the hypercalls to register and unregister VPAs are handled
by KVM in the kernel, and userspace has no way to know about or save
and restore these registrations across a migration.

This adds get and set ioctls to allow userspace to see what addresses
have been registered, and to register or unregister them.  This will
be needed for guest hibernation and migration, and is also needed so
that userspace can unregister them on reset (otherwise we corrupt
guest memory after reboot by writing to the VPAs registered by the
previous kernel).  We also add a capability to indicate that the
ioctls are supported.

This also fixes a bug where we were calling init_vpa unconditionally,
leading to an oops when unregistering the VPA.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_ppc.h |    2 ++
 arch/powerpc/kvm/book3s_hv.c       |   54 +++++++++++++++++++++++++++++++++++-
 arch/powerpc/kvm/powerpc.c         |   23 +++++++++++++++
 include/linux/kvm.h                |   12 ++++++++
 4 files changed, 90 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 41a00ea..4e0bc22 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -201,6 +201,8 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
 
 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
+int kvm_vcpu_get_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa);
+int kvm_vcpu_set_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa);
 
 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 817837d..53d950b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -143,6 +143,57 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
 	vpa->yield_count = 1;
 }
 
+int kvm_vcpu_get_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa)
+{
+	spin_lock(&vcpu->arch.vpa_update_lock);
+	vpa->vpa_addr = vcpu->arch.vpa.next_gpa;
+	vpa->slb_shadow_addr = vcpu->arch.slb_shadow.next_gpa;
+	vpa->slb_shadow_size = vcpu->arch.slb_shadow.len;
+	vpa->dtl_addr = vcpu->arch.dtl.next_gpa;
+	vpa->dtl_size = vcpu->arch.dtl.len;
+	spin_unlock(&vcpu->arch.vpa_update_lock);
+	return 0;
+}
+
+static inline void set_vpa(struct kvmppc_vpa *v, unsigned long addr,
+			   unsigned long len)
+{
+	if (v->next_gpa != addr || v->len != len) {
+		v->next_gpa = addr;
+		v->len = addr ? len : 0;
+		v->update_pending = 1;
+	}
+}
+
+int kvm_vcpu_set_vpa_info(struct kvm_vcpu *vcpu, struct kvm_ppc_vpa *vpa)
+{
+	/* check that addresses are cacheline aligned */
+	if ((vpa->vpa_addr & (L1_CACHE_BYTES - 1)) ||
+	    (vpa->slb_shadow_addr & (L1_CACHE_BYTES - 1)) ||
+	    (vpa->dtl_addr & (L1_CACHE_BYTES - 1)))
+		return -EINVAL;
+
+	/* DTL must be at least 1 entry long, if being set */
+	if (vpa->dtl_addr) {
+		if (vpa->dtl_size < sizeof(struct dtl_entry))
+			return -EINVAL;
+		vpa->dtl_size -= vpa->dtl_size % sizeof(struct dtl_entry);
+	}
+
+	/* DTL and SLB shadow require VPA */
+	if (!vpa->vpa_addr && (vpa->slb_shadow_addr || vpa->dtl_addr))
+		return -EINVAL;
+
+	spin_lock(&vcpu->arch.vpa_update_lock);
+	set_vpa(&vcpu->arch.vpa, vpa->vpa_addr, sizeof(struct lppaca));
+	set_vpa(&vcpu->arch.slb_shadow, vpa->slb_shadow_addr,
+		vpa->slb_shadow_size);
+	set_vpa(&vcpu->arch.dtl, vpa->dtl_addr, vpa->dtl_size);
+	spin_unlock(&vcpu->arch.vpa_update_lock);
+
+	return 0;
+}
+
 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
 struct reg_vpa {
 	u32 dummy;
@@ -321,7 +372,8 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 	spin_lock(&vcpu->arch.vpa_update_lock);
 	if (vcpu->arch.vpa.update_pending) {
 		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
-		init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+		if (vcpu->arch.vpa.pinned_addr)
+			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
 	}
 	if (vcpu->arch.dtl.update_pending) {
 		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 10e4dfb..72764ba 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -341,6 +341,9 @@ int kvm_dev_ioctl_check_extension(long ext)
 		if (cpu_has_feature(CPU_FTR_ARCH_201))
 			r = 2;
 		break;
+	case KVM_CAP_PPC_VPA:
+		r = 1;
+		break;
 #endif
 	case KVM_CAP_SYNC_MMU:
 #ifdef CONFIG_KVM_BOOK3S_64_HV
@@ -821,6 +824,26 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 		break;
 	}
 #endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+	case KVM_PPC_GET_VPA_INFO: {
+		struct kvm_ppc_vpa vpa;
+		r = kvm_vcpu_get_vpa_info(vcpu, &vpa);
+		if (r)
+			break;
+		r = -EFAULT;
+		if (copy_to_user(argp, &vpa, sizeof(vpa)))
+			break;
+		r = 0;
+	}
+	case KVM_PPC_SET_VPA_INFO: {
+		struct kvm_ppc_vpa vpa;
+		r = -EFAULT;
+		if (copy_from_user(&vpa, argp, sizeof(vpa)))
+			break;
+		r = kvm_vcpu_set_vpa_info(vcpu, &vpa);
+		break;
+	}
+#endif
 	default:
 		r = -EINVAL;
 	}
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index f4f5be8..4a8bb4f 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -622,6 +622,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_S390_COW 79
 #define KVM_CAP_PPC_ALLOC_HTAB 80
 #define KVM_CAP_PPC_BOOKE_WATCHDOG 81
+#define KVM_CAP_PPC_VPA 83
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -959,4 +960,15 @@ struct kvm_assigned_msix_entry {
 	__u16 padding[3];
 };
 
+struct kvm_ppc_vpa {
+	__u64 vpa_addr;
+	__u64 slb_shadow_addr;
+	__u64 dtl_addr;
+	__u32 slb_shadow_size;
+	__u32 dtl_size;
+};
+
+#define KVM_PPC_GET_VPA_INFO	_IOR(KVMIO, 0xae, struct kvm_ppc_vpa)
+#define KVM_PPC_SET_VPA_INFO	_IOW(KVMIO, 0xaf, struct kvm_ppc_vpa)
+
 #endif /* __LINUX_KVM_H */
-- 
1.7.10.rc3.219.g53414


  parent reply	other threads:[~2012-08-28 12:42 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-08-28 12:30 [PATCH 0/12] Sundry fixes for Book3S HV Paul Mackerras
2012-08-28 12:30 ` Paul Mackerras
2012-08-28 12:32 ` [PATCH 01/12] KVM: PPC: Move kvm->arch.slot_phys into memslot.arch Paul Mackerras
2012-08-28 12:32   ` Paul Mackerras
2012-08-28 12:32 ` [PATCH 02/12] KVM: PPC: Book3S HV: Take the SRCU read lock before looking up memslots Paul Mackerras
2012-08-28 12:32   ` Paul Mackerras
2012-08-28 12:33 ` Paul Mackerras [this message]
2012-08-28 12:33   ` [PATCH 03/12] KVM: PPC: Book3S HV: Provide a way for userspace to get/set per-vCPU areas Paul Mackerras
2012-08-28 12:34 ` [PATCH 04/12] KVM: PPC: Book3S HV: Allow KVM guests to stop secondary threads coming online Paul Mackerras
2012-08-28 12:34   ` Paul Mackerras
2012-08-29  4:51   ` [PATCH v2 " Paul Mackerras
2012-08-29  4:51     ` Paul Mackerras
2012-08-28 12:34 ` [PATCH 05/12] KVM: PPC: Fix updates of vcpu->cpu on HV KVM Paul Mackerras
2012-08-28 12:34   ` Paul Mackerras
2012-08-28 12:35 ` [PATCH 06/12] KVM: PPC: Book3S HV: Remove bogus update of thread IDs in " Paul Mackerras
2012-08-28 12:35   ` Paul Mackerras
2012-08-28 12:36 ` [PATCH 07/12] KVM: PPC: Book3S HV: Fix some races in starting secondary threads Paul Mackerras
2012-08-28 12:36   ` Paul Mackerras
2012-08-28 12:37 ` [PATCH 08/12] KVM: PPC: Book3s HV: Don't access runnable threads list without vcore lock Paul Mackerras
2012-08-28 12:37   ` Paul Mackerras
2012-08-28 12:38 ` [PATCH 09/12] KVM: PPC: Book3S HV: Fixes for late-joining threads Paul Mackerras
2012-08-28 12:38   ` Paul Mackerras
2012-08-28 12:39 ` [PATCH 10/12] KVM: PPC: Book3S HV: Run virtual core whenever any vcpus in it can run Paul Mackerras
2012-08-28 12:39   ` Paul Mackerras
2012-08-28 12:40 ` [PATCH 11/12] KVM: PPC: Book3S HV: Fix accounting of stolen time Paul Mackerras
2012-08-28 12:40   ` Paul Mackerras
2012-08-28 12:40 ` [PATCH 12/12] KVM: PPC: Book3S HV: Fix calculation of guest phys address for MMIO emulation Paul Mackerras
2012-08-28 12:40   ` Paul Mackerras

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20120828123312.GD7258@bloggs.ozlabs.ibm.com \
    --to=paulus@samba.org \
    --cc=agraf@suse.de \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.