All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH 10/23] KVM: PPC: Book3S PR: Make HPT accesses and updates SMP-safe
Date: Tue, 6 Aug 2013 14:21:05 +1000	[thread overview]
Message-ID: <20130806042105.GP19254@iris.ozlabs.ibm.com> (raw)
In-Reply-To: <20130806041259.GF19254@iris.ozlabs.ibm.com>

This adds a per-VM mutex to provide mutual exclusion between vcpus
for accesses to and updates of the guest hashed page table (HPT).
This also makes the code use single-byte writes to the HPT entry
when updating of the reference (R) and change (C) bits.  The reason
for doing this, rather than writing back the whole HPTE, is that on
non-PAPR virtual machines, the guest OS might be writing to the HPTE
concurrently, and writing back the whole HPTE might conflict with
that.  Also, real hardware does single-byte writes to update R and C.

The new mutex is taken in kvmppc_mmu_book3s_64_xlate() when reading
the HPT and updating R and/or C, and in the PAPR HPT update hcalls
(H_ENTER, H_REMOVE, etc.).  Having the mutex means that we don't need
to use a hypervisor lock bit in the HPT update hcalls, and we don't
need to be careful about the order in which the bytes of the HPTE are
updated by those hcalls.

The other change here is to make emulated TLB invalidations (tlbie)
effective across all vcpus.  To do this we call kvmppc_mmu_pte_vflush
for all vcpus in kvmppc_ppc_book3s_64_tlbie().

For 32-bit, this makes the setting of the accessed and dirty bits use
single-byte writes, and makes tlbie invalidate shadow HPTEs for all
vcpus.

With this, PR KVM can successfully run SMP guests.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +++
 arch/powerpc/kvm/book3s_32_mmu.c    | 36 ++++++++++++++++++++++--------------
 arch/powerpc/kvm/book3s_64_mmu.c    | 33 +++++++++++++++++++++++----------
 arch/powerpc/kvm/book3s_pr.c        |  1 +
 arch/powerpc/kvm/book3s_pr_papr.c   | 33 +++++++++++++++++++++++----------
 5 files changed, 72 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2d3c770..c37207f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -259,6 +259,9 @@ struct kvm_arch {
 	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
 	int hpt_cma_alloc;
 #endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_KVM_BOOK3S_PR
+	struct mutex hpt_mutex;
+#endif
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct list_head spapr_tce_tables;
 	struct list_head rtas_tokens;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index af04553..856af98 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -271,19 +271,22 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
 	/* Update PTE C and A bits, so the guest's swapper knows we used the
 	   page */
 	if (found) {
-		u32 oldpte = pteg[i+1];
-
-		if (pte->may_read)
-			pteg[i+1] |= PTEG_FLAG_ACCESSED;
-		if (pte->may_write)
-			pteg[i+1] |= PTEG_FLAG_DIRTY;
-		else
-			dprintk_pte("KVM: Mapping read-only page!\n");
-
-		/* Write back into the PTEG */
-		if (pteg[i+1] != oldpte)
-			copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-
+		u32 pte_r = pteg[i+1];
+		char __user *addr = (char __user *) &pteg[i+1];
+
+		/*
+		 * Use single-byte writes to update the HPTE, to
+		 * conform to what real hardware does.
+		 */
+		if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
+			pte_r |= PTEG_FLAG_ACCESSED;
+			put_user(pte_r >> 8, addr + 2);
+		}
+		if (pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
+			/* XXX should only set this for stores */
+			pte_r |= PTEG_FLAG_DIRTY;
+			put_user(pte_r, addr + 3);
+		}
 		return 0;
 	}
 
@@ -348,7 +351,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 
 static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
 {
-	kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
+	int i;
+	struct kvm_vcpu *v;
+
+	/* flush this VA on all cpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
 }
 
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 563fbf7..26a57ca 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -257,6 +257,8 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 
 	pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+
 do_second:
 	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
 	if (kvm_is_error_hva(ptegp))
@@ -332,30 +334,37 @@ do_second:
 
 	/* Update PTE R and C bits, so the guest's swapper knows we used the
 	 * page */
-	if (gpte->may_read) {
-		/* Set the accessed flag */
+	if (gpte->may_read && !(r & HPTE_R_R)) {
+		/*
+		 * Set the accessed flag.
+		 * We have to write this back with a single byte write
+		 * because another vcpu may be accessing this on
+		 * non-PAPR platforms such as mac99, and this is
+		 * what real hardware does.
+		 */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_R;
+		put_user(r >> 8, addr + 6);
 	}
-	if (data && gpte->may_write) {
+	if (data && gpte->may_write && !(r & HPTE_R_C)) {
 		/* Set the dirty flag -- XXX even if not writing */
+		/* Use a single byte write */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_C;
+		put_user(r, addr + 7);
 	}
 
-	/* Write back into the PTEG */
-	if (pteg[i+1] != r) {
-		pteg[i+1] = r;
-		copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 
 	if (!gpte->may_read)
 		return -EPERM;
 	return 0;
 
 no_page_found:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	return -ENOENT;
 
 no_seg_found:
-
 	dprintk("KVM MMU: Trigger segment fault\n");
 	return -EINVAL;
 }
@@ -520,6 +529,8 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 				       bool large)
 {
 	u64 mask = 0xFFFFFFFFFULL;
+	long i;
+	struct kvm_vcpu *v;
 
 	dprintk("KVM MMU: tlbie(0x%lx)\n", va);
 
@@ -542,7 +553,9 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 		if (large)
 			mask = 0xFFFFFF000ULL;
 	}
-	kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
+	/* flush this VA on all vcpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_vflush(v, va >> 12, mask);
 }
 
 #ifdef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 4d39820..f61d10d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1401,6 +1401,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 #endif
+	mutex_init(&kvm->arch.hpt_mutex);
 
 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 		spin_lock(&kvm_global_user_count_lock);
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 38f1899..5efa97b 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -48,6 +48,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
 	pte_index &= ~7UL;
 	pteg_addr = get_pteg_addr(vcpu, pte_index);
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
 	hpte = pteg;
 
@@ -74,6 +75,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
 	ret = H_SUCCESS;
 
  done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
@@ -86,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long v = 0, pteg, rb;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) == 0 ||
 	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
-	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
+		goto done;
 
 	copy_to_user((void __user *)pteg, &v, sizeof(v));
 
 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+	ret = H_SUCCESS;
 	kvmppc_set_gpr(vcpu, 4, pte[0]);
 	kvmppc_set_gpr(vcpu, 5, pte[1]);
 
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
+
 	return EMULATE_DONE;
 }
 
@@ -133,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 	int paramnr = 4;
 	int ret = H_SUCCESS;
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
 		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -181,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 		}
 		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
@@ -193,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long rb, pteg, r, v;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) == 0 ||
-	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
+		goto done;
 
 	v = pte[0];
 	r = pte[1];
@@ -216,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 	rb = compute_tlbie_rb(v, r, pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 	copy_to_user((void __user *)pteg, pte, sizeof(pte));
+	ret = H_SUCCESS;
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
 }
-- 
1.8.3.1

WARNING: multiple messages have this Message-ID (diff)
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: kvm-ppc@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH 10/23] KVM: PPC: Book3S PR: Make HPT accesses and updates SMP-safe
Date: Tue, 06 Aug 2013 04:21:05 +0000	[thread overview]
Message-ID: <20130806042105.GP19254@iris.ozlabs.ibm.com> (raw)
In-Reply-To: <20130806041259.GF19254@iris.ozlabs.ibm.com>

This adds a per-VM mutex to provide mutual exclusion between vcpus
for accesses to and updates of the guest hashed page table (HPT).
This also makes the code use single-byte writes to the HPT entry
when updating of the reference (R) and change (C) bits.  The reason
for doing this, rather than writing back the whole HPTE, is that on
non-PAPR virtual machines, the guest OS might be writing to the HPTE
concurrently, and writing back the whole HPTE might conflict with
that.  Also, real hardware does single-byte writes to update R and C.

The new mutex is taken in kvmppc_mmu_book3s_64_xlate() when reading
the HPT and updating R and/or C, and in the PAPR HPT update hcalls
(H_ENTER, H_REMOVE, etc.).  Having the mutex means that we don't need
to use a hypervisor lock bit in the HPT update hcalls, and we don't
need to be careful about the order in which the bytes of the HPTE are
updated by those hcalls.

The other change here is to make emulated TLB invalidations (tlbie)
effective across all vcpus.  To do this we call kvmppc_mmu_pte_vflush
for all vcpus in kvmppc_ppc_book3s_64_tlbie().

For 32-bit, this makes the setting of the accessed and dirty bits use
single-byte writes, and makes tlbie invalidate shadow HPTEs for all
vcpus.

With this, PR KVM can successfully run SMP guests.

Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h |  3 +++
 arch/powerpc/kvm/book3s_32_mmu.c    | 36 ++++++++++++++++++++++--------------
 arch/powerpc/kvm/book3s_64_mmu.c    | 33 +++++++++++++++++++++++----------
 arch/powerpc/kvm/book3s_pr.c        |  1 +
 arch/powerpc/kvm/book3s_pr_papr.c   | 33 +++++++++++++++++++++++----------
 5 files changed, 72 insertions(+), 34 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2d3c770..c37207f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -259,6 +259,9 @@ struct kvm_arch {
 	struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
 	int hpt_cma_alloc;
 #endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_KVM_BOOK3S_PR
+	struct mutex hpt_mutex;
+#endif
 #ifdef CONFIG_PPC_BOOK3S_64
 	struct list_head spapr_tce_tables;
 	struct list_head rtas_tokens;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index af04553..856af98 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -271,19 +271,22 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
 	/* Update PTE C and A bits, so the guest's swapper knows we used the
 	   page */
 	if (found) {
-		u32 oldpte = pteg[i+1];
-
-		if (pte->may_read)
-			pteg[i+1] |= PTEG_FLAG_ACCESSED;
-		if (pte->may_write)
-			pteg[i+1] |= PTEG_FLAG_DIRTY;
-		else
-			dprintk_pte("KVM: Mapping read-only page!\n");
-
-		/* Write back into the PTEG */
-		if (pteg[i+1] != oldpte)
-			copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-
+		u32 pte_r = pteg[i+1];
+		char __user *addr = (char __user *) &pteg[i+1];
+
+		/*
+		 * Use single-byte writes to update the HPTE, to
+		 * conform to what real hardware does.
+		 */
+		if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
+			pte_r |= PTEG_FLAG_ACCESSED;
+			put_user(pte_r >> 8, addr + 2);
+		}
+		if (pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
+			/* XXX should only set this for stores */
+			pte_r |= PTEG_FLAG_DIRTY;
+			put_user(pte_r, addr + 3);
+		}
 		return 0;
 	}
 
@@ -348,7 +351,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 
 static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
 {
-	kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
+	int i;
+	struct kvm_vcpu *v;
+
+	/* flush this VA on all cpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
 }
 
 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 563fbf7..26a57ca 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -257,6 +257,8 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 
 	pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+
 do_second:
 	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
 	if (kvm_is_error_hva(ptegp))
@@ -332,30 +334,37 @@ do_second:
 
 	/* Update PTE R and C bits, so the guest's swapper knows we used the
 	 * page */
-	if (gpte->may_read) {
-		/* Set the accessed flag */
+	if (gpte->may_read && !(r & HPTE_R_R)) {
+		/*
+		 * Set the accessed flag.
+		 * We have to write this back with a single byte write
+		 * because another vcpu may be accessing this on
+		 * non-PAPR platforms such as mac99, and this is
+		 * what real hardware does.
+		 */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_R;
+		put_user(r >> 8, addr + 6);
 	}
-	if (data && gpte->may_write) {
+	if (data && gpte->may_write && !(r & HPTE_R_C)) {
 		/* Set the dirty flag -- XXX even if not writing */
+		/* Use a single byte write */
+		char __user *addr = (char __user *) &pteg[i+1];
 		r |= HPTE_R_C;
+		put_user(r, addr + 7);
 	}
 
-	/* Write back into the PTEG */
-	if (pteg[i+1] != r) {
-		pteg[i+1] = r;
-		copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
-	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 
 	if (!gpte->may_read)
 		return -EPERM;
 	return 0;
 
 no_page_found:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	return -ENOENT;
 
 no_seg_found:
-
 	dprintk("KVM MMU: Trigger segment fault\n");
 	return -EINVAL;
 }
@@ -520,6 +529,8 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 				       bool large)
 {
 	u64 mask = 0xFFFFFFFFFULL;
+	long i;
+	struct kvm_vcpu *v;
 
 	dprintk("KVM MMU: tlbie(0x%lx)\n", va);
 
@@ -542,7 +553,9 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 		if (large)
 			mask = 0xFFFFFF000ULL;
 	}
-	kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
+	/* flush this VA on all vcpus */
+	kvm_for_each_vcpu(i, v, vcpu->kvm)
+		kvmppc_mmu_pte_vflush(v, va >> 12, mask);
 }
 
 #ifdef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 4d39820..f61d10d 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1401,6 +1401,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
 	INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
 #endif
+	mutex_init(&kvm->arch.hpt_mutex);
 
 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
 		spin_lock(&kvm_global_user_count_lock);
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 38f1899..5efa97b 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -48,6 +48,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
 	pte_index &= ~7UL;
 	pteg_addr = get_pteg_addr(vcpu, pte_index);
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
 	hpte = pteg;
 
@@ -74,6 +75,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
 	ret = H_SUCCESS;
 
  done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
@@ -86,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long v = 0, pteg, rb;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) = 0 ||
 	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
-	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
+		goto done;
 
 	copy_to_user((void __user *)pteg, &v, sizeof(v));
 
 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+	ret = H_SUCCESS;
 	kvmppc_set_gpr(vcpu, 4, pte[0]);
 	kvmppc_set_gpr(vcpu, 5, pte[1]);
 
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
+
 	return EMULATE_DONE;
 }
 
@@ -133,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 	int paramnr = 4;
 	int ret = H_SUCCESS;
 
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
 		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
 		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
@@ -181,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
 		}
 		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
 	}
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
@@ -193,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
 	unsigned long rb, pteg, r, v;
 	unsigned long pte[2];
+	long int ret;
 
 	pteg = get_pteg_addr(vcpu, pte_index);
+	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
 
+	ret = H_NOT_FOUND;
 	if ((pte[0] & HPTE_V_VALID) = 0 ||
-	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
-		kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
-		return EMULATE_DONE;
-	}
+	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
+		goto done;
 
 	v = pte[0];
 	r = pte[1];
@@ -216,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
 	rb = compute_tlbie_rb(v, r, pte_index);
 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
 	copy_to_user((void __user *)pteg, pte, sizeof(pte));
+	ret = H_SUCCESS;
 
-	kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ done:
+	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
+	kvmppc_set_gpr(vcpu, 3, ret);
 
 	return EMULATE_DONE;
 }
-- 
1.8.3.1


  parent reply	other threads:[~2013-08-06  4:21 UTC|newest]

Thread overview: 136+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-08-06  4:12 [PATCH 00/23] Allow PR and HV KVM to coexist in one kernel Paul Mackerras
2013-08-06  4:12 ` Paul Mackerras
2013-08-06  4:13 ` [PATCH 01/23] KVM: PPC: Book3S: Fix compile error in XICS emulation Paul Mackerras
2013-08-06  4:13   ` Paul Mackerras
2013-08-28 22:51   ` Alexander Graf
2013-08-28 22:51     ` Alexander Graf
2013-08-06  4:14 ` [PATCH 02/23] KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX Paul Mackerras
2013-08-06  4:14   ` Paul Mackerras
2013-08-08 15:49   ` Aneesh Kumar K.V
2013-08-08 15:49     ` Aneesh Kumar K.V
2013-08-28 22:51   ` Alexander Graf
2013-08-28 22:51     ` Alexander Graf
2013-08-06  4:15 ` [PATCH 03/23] KVM: PPC: Book3S PR: Make instruction fetch fallback work for system calls Paul Mackerras
2013-08-06  4:15   ` Paul Mackerras
2013-08-28 22:51   ` Alexander Graf
2013-08-28 22:51     ` Alexander Graf
2013-08-06  4:16 ` [PATCH 04/23] KVM: PPC: Book3S PR: Keep volatile reg values in vcpu rather than shadow_vcpu Paul Mackerras
2013-08-06  4:16   ` Paul Mackerras
2013-08-11 11:06   ` Aneesh Kumar K.V
2013-08-11 11:18     ` Aneesh Kumar K.V
2013-08-28 22:00   ` Alexander Graf
2013-08-28 22:00     ` Alexander Graf
2013-08-29  5:04     ` Paul Mackerras
2013-08-29  5:04       ` Paul Mackerras
2013-08-29 12:46       ` Alexander Graf
2013-08-29 12:46         ` Alexander Graf
2013-08-06  4:18 ` [PATCH 05/23] KVM: PPC: Book3S PR: Rework kvmppc_mmu_book3s_64_xlate() Paul Mackerras
2013-08-06  4:18   ` Paul Mackerras
2013-08-28 22:51   ` Alexander Graf
2013-08-28 22:51     ` Alexander Graf
2013-08-06  4:18 ` [PATCH 06/23] KVM: PPC: Book3S PR: Allow guest to use 64k pages Paul Mackerras
2013-08-06  4:18   ` Paul Mackerras
2013-08-28 22:56   ` Alexander Graf
2013-08-28 22:56     ` Alexander Graf
2013-08-29  5:17     ` Paul Mackerras
2013-08-29  5:17       ` Paul Mackerras
2013-08-29 12:48       ` Alexander Graf
2013-08-29 12:48         ` Alexander Graf
2013-08-06  4:19 ` [PATCH 07/23] KVM: PPC: Book3S PR: Use 64k host pages where possible Paul Mackerras
2013-08-06  4:19   ` Paul Mackerras
2013-08-28 23:24   ` Alexander Graf
2013-08-28 23:24     ` Alexander Graf
2013-08-29  5:23     ` Paul Mackerras
2013-08-29  5:23       ` Paul Mackerras
2013-08-29 12:43       ` Alexander Graf
2013-08-29 12:43         ` Alexander Graf
2013-08-06  4:20 ` [PATCH 08/23] KVM: PPC: Book3S PR: Handle PP0 page-protection bit in guest HPTEs Paul Mackerras
2013-08-06  4:20   ` Paul Mackerras
2013-08-06  4:20 ` [PATCH 09/23] KVM: PPC: Book3S PR: Correct errors in H_ENTER implementation Paul Mackerras
2013-08-06  4:20   ` Paul Mackerras
2013-08-06  4:21 ` Paul Mackerras [this message]
2013-08-06  4:21   ` [PATCH 10/23] KVM: PPC: Book3S PR: Make HPT accesses and updates SMP-safe Paul Mackerras
2013-08-06  4:21 ` [PATCH 11/23] KVM: PPC: Book3S PR: Allocate kvm_vcpu structs from kvm_vcpu_cache Paul Mackerras
2013-08-06  4:21   ` Paul Mackerras
2013-08-12 10:03   ` Aneesh Kumar K.V
2013-08-12 10:15     ` Aneesh Kumar K.V
2013-08-06  4:22 ` [PATCH 12/23] KVM: PPC: Book3S HV: Better handling of exceptions that happen in real mode Paul Mackerras
2013-08-06  4:22   ` Paul Mackerras
2013-08-06  4:22 ` [PATCH 13/23] KVM: PPC: Book3S: Move skip-interrupt handlers to common code Paul Mackerras
2013-08-06  4:22   ` Paul Mackerras
2013-08-06  4:23 ` [PATCH 14/23] KVM: PPC: Book3S PR: Delay disabling relocation-on interrupts Paul Mackerras
2013-08-06  4:23   ` Paul Mackerras
2013-08-30 16:30   ` Alexander Graf
2013-08-30 16:30     ` Alexander Graf
2013-08-30 22:55     ` Paul Mackerras
2013-08-30 22:55       ` Paul Mackerras
2013-08-30 23:13       ` Alexander Graf
2013-08-30 23:13         ` Alexander Graf
2013-08-31  5:42         ` Paul Mackerras
2013-08-31  5:42           ` Paul Mackerras
2013-08-06  4:24 ` [PATCH 15/23] KVM: PPC: Book3S: Rename symbols that exist in both PR and HV KVM Paul Mackerras
2013-08-06  4:24   ` Paul Mackerras
2013-08-06  4:24 ` [PATCH 16/23] KVM: PPC: Book3S: Merge implementations of KVM_PPC_GET_SMMU_INFO ioctl Paul Mackerras
2013-08-06  4:24   ` Paul Mackerras
2013-08-06  4:25 ` [PATCH 17/23] KVM: PPC: Book3S HV: Factorize kvmppc_core_vcpu_create_hv() Paul Mackerras
2013-08-06  4:25   ` Paul Mackerras
2013-08-06  4:25 ` [PATCH 18/23] KVM: PPC: Book3S: Allow both PR and HV KVM to be selected Paul Mackerras
2013-08-06  4:25   ` Paul Mackerras
2013-08-06  4:26 ` [PATCH 19/23] KVM: PPC: Book3S: Select PR vs HV separately for each guest Paul Mackerras
2013-08-06  4:26   ` Paul Mackerras
2013-09-12 22:56   ` Alexander Graf
2013-09-12 22:56     ` Alexander Graf
2013-09-13  0:17     ` Paul Mackerras
2013-09-13  0:17       ` Paul Mackerras
2013-09-13  1:31       ` Benjamin Herrenschmidt
2013-09-13  1:31         ` Benjamin Herrenschmidt
2013-09-13  4:18         ` Alexander Graf
2013-09-13  4:18           ` Alexander Graf
2013-09-14 18:33         ` Aneesh Kumar K.V
2013-09-14 18:45           ` Aneesh Kumar K.V
2013-09-14 20:22           ` Alexander Graf
2013-09-14 20:22             ` Alexander Graf
2013-09-15  9:16             ` Aneesh Kumar K.V
2013-09-15  9:28               ` Aneesh Kumar K.V
2013-09-15 11:55               ` Alexander Graf
2013-09-15 11:55                 ` Alexander Graf
2013-09-13  4:17       ` Alexander Graf
2013-09-13  4:17         ` Alexander Graf
2013-09-18 12:05         ` Paul Mackerras
2013-09-18 12:05           ` Paul Mackerras
2013-09-19  7:31           ` Alexander Graf
2013-09-19  7:31             ` Alexander Graf
2013-08-06  4:27 ` [PATCH 20/23] KVM: PPC: Book3S PR: Better handling of host-side read-only pages Paul Mackerras
2013-08-06  4:27   ` Paul Mackerras
2013-09-12 23:01   ` Alexander Graf
2013-09-12 23:01     ` Alexander Graf
2013-09-13  0:23     ` Paul Mackerras
2013-09-13  0:23       ` Paul Mackerras
2013-09-14  5:24     ` Paul Mackerras
2013-09-14  5:24       ` Paul Mackerras
2013-09-14 20:23       ` Alexander Graf
2013-09-14 20:23         ` Alexander Graf
2013-09-16  4:12         ` Paul Mackerras
2013-09-16  4:12           ` Paul Mackerras
2013-09-16 12:47           ` Alexander Graf
2013-09-16 12:47             ` Alexander Graf
2013-08-06  4:27 ` [PATCH 21/23] KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page() Paul Mackerras
2013-08-06  4:27   ` Paul Mackerras
2013-08-07  4:13   ` Bhushan Bharat-R65777
2013-08-07  4:13     ` Bhushan Bharat-R65777
2013-08-07  4:28     ` Paul Mackerras
2013-08-07  4:28       ` Paul Mackerras
2013-08-07  5:18       ` Bhushan Bharat-R65777
2013-08-07  5:18         ` Bhushan Bharat-R65777
2013-08-07  5:17   ` Bhushan Bharat-R65777
2013-08-07  5:17     ` Bhushan Bharat-R65777
2013-08-07  8:27     ` Paul Mackerras
2013-08-07  8:27       ` Paul Mackerras
2013-08-07  8:31       ` Bhushan Bharat-R65777
2013-08-07  8:31         ` Bhushan Bharat-R65777
2013-08-08 12:06         ` Paul Mackerras
2013-08-08 12:06           ` Paul Mackerras
2013-08-06  4:27 ` [PATCH 22/23] KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written Paul Mackerras
2013-08-06  4:27   ` Paul Mackerras
2013-08-06  4:28 ` [PATCH 23/23] KVM: PPC: Book3S PR: Reduce number of shadow PTEs invalidated by MMU notifiers Paul Mackerras
2013-08-06  4:28   ` Paul Mackerras

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130806042105.GP19254@iris.ozlabs.ibm.com \
    --to=paulus@samba.org \
    --cc=agraf@suse.de \
    --cc=benh@kernel.crashing.org \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.