All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] KVM: change gfn->hva cache to use per-VCPU memslots
@ 2017-02-15 22:00 Paolo Bonzini
  2017-02-15 22:00 ` [PATCH 1/3] KVM: only retrieve memslots once when initializing cache Paolo Bonzini
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-02-15 22:00 UTC (permalink / raw)
  To: linux-kernel, kvm

This comes from the dirty page list work, but it can also be useful for
VMCS12 handling in nested VMX.

Paolo

Cao, Lei (1):
  KVM: Support vCPU-based gfn->hva cache

Paolo Bonzini (2):
  KVM: only retrieve memslots once when initializing cache
  KVM: use separate generations for each address space

 arch/x86/kvm/lapic.c     | 22 +++++++-------
 arch/x86/kvm/x86.c       | 41 +++++++++++++-------------
 include/linux/kvm_host.h | 16 +++++------
 virt/kvm/kvm_main.c      | 75 +++++++++++++++++++++++++++++-------------------
 4 files changed, 83 insertions(+), 71 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3] KVM: only retrieve memslots once when initializing cache
  2017-02-15 22:00 [PATCH 0/3] KVM: change gfn->hva cache to use per-VCPU memslots Paolo Bonzini
@ 2017-02-15 22:00 ` Paolo Bonzini
  2017-02-16 16:31   ` Radim Krčmář
  2017-02-15 22:00 ` [PATCH 2/3] KVM: use separate generations for each address space Paolo Bonzini
  2017-02-15 22:00 ` [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache Paolo Bonzini
  2 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2017-02-15 22:00 UTC (permalink / raw)
  To: linux-kernel, kvm

This will make it a bit simpler to handle multiple address spaces
in gfn_to_hva_cache.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 virt/kvm/kvm_main.c | 21 ++++++++++++++-------
 1 file changed, 14 insertions(+), 7 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 482612b4e496..e21bac7ed5d3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1937,10 +1937,10 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
 
-int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			      gpa_t gpa, unsigned long len)
+static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
+				       struct gfn_to_hva_cache *ghc,
+				       gpa_t gpa, unsigned long len)
 {
-	struct kvm_memslots *slots = kvm_memslots(kvm);
 	int offset = offset_in_page(gpa);
 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
@@ -1950,7 +1950,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 	ghc->gpa = gpa;
 	ghc->generation = slots->generation;
 	ghc->len = len;
-	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+	ghc->memslot = __gfn_to_memslot(slots, start_gfn);
 	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
 	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
 		ghc->hva += offset;
@@ -1960,7 +1960,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 		 * verify that the entire region is valid here.
 		 */
 		while (start_gfn <= end_gfn) {
-			ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+			ghc->memslot = __gfn_to_memslot(slots, start_gfn);
 			ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
 						   &nr_pages_avail);
 			if (kvm_is_error_hva(ghc->hva))
@@ -1972,6 +1972,13 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 	}
 	return 0;
 }
+
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa, unsigned long len)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
+}
 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
 
 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
@@ -1984,7 +1991,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 	BUG_ON(len + offset > ghc->len);
 
 	if (slots->generation != ghc->generation)
-		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
 	if (unlikely(!ghc->memslot))
 		return kvm_write_guest(kvm, gpa, data, len);
@@ -2017,7 +2024,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 	BUG_ON(len > ghc->len);
 
 	if (slots->generation != ghc->generation)
-		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
 	if (unlikely(!ghc->memslot))
 		return kvm_read_guest(kvm, ghc->gpa, data, len);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] KVM: use separate generations for each address space
  2017-02-15 22:00 [PATCH 0/3] KVM: change gfn->hva cache to use per-VCPU memslots Paolo Bonzini
  2017-02-15 22:00 ` [PATCH 1/3] KVM: only retrieve memslots once when initializing cache Paolo Bonzini
@ 2017-02-15 22:00 ` Paolo Bonzini
  2017-02-16 17:04   ` Radim Krčmář
  2017-02-17  0:29   ` Bandan Das
  2017-02-15 22:00 ` [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache Paolo Bonzini
  2 siblings, 2 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-02-15 22:00 UTC (permalink / raw)
  To: linux-kernel, kvm

This will make it easier to support multiple address spaces in
kvm_gfn_to_hva_cache_init.  Instead of having to check the address
space id, we can keep on checking just the generation number.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 virt/kvm/kvm_main.c | 24 ++++++++++++++++--------
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e21bac7ed5d3..a83c186cefc1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -506,11 +506,6 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
 	if (!slots)
 		return NULL;
 
-	/*
-	 * Init kvm generation close to the maximum to easily test the
-	 * code of handling generation number wrap-around.
-	 */
-	slots->generation = -150;
 	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 		slots->id_to_index[i] = slots->memslots[i].id = i;
 
@@ -641,9 +636,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
 
 	r = -ENOMEM;
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-		kvm->memslots[i] = kvm_alloc_memslots();
-		if (!kvm->memslots[i])
+		struct kvm_memslots *slots = kvm_alloc_memslots();
+		if (!slots)
 			goto out_err_no_srcu;
+		/*
+		 * Generations must be different for each address space.
+		 * Init kvm generation close to the maximum to easily test the
+		 * code of handling generation number wrap-around.
+		 */
+		slots->generation = i * 2 - 150;
+		rcu_assign_pointer(kvm->memslots[i], slots);
 	}
 
 	if (init_srcu_struct(&kvm->srcu))
@@ -870,8 +872,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
 	 * Increment the new memslot generation a second time. This prevents
 	 * vm exits that race with memslot updates from caching a memslot
 	 * generation that will (potentially) be valid forever.
+	 *
+	 * Generations must be unique even across address spaces.  We do not need
+	 * a global counter for that, instead the generation space is evenly split
+	 * across address spaces.  For example, with two address spaces, address
+	 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
+	 * use generations 2, 6, 10, 14, ...
 	 */
-	slots->generation++;
+	slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
 
 	kvm_arch_memslots_updated(kvm, slots);
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache
  2017-02-15 22:00 [PATCH 0/3] KVM: change gfn->hva cache to use per-VCPU memslots Paolo Bonzini
  2017-02-15 22:00 ` [PATCH 1/3] KVM: only retrieve memslots once when initializing cache Paolo Bonzini
  2017-02-15 22:00 ` [PATCH 2/3] KVM: use separate generations for each address space Paolo Bonzini
@ 2017-02-15 22:00 ` Paolo Bonzini
  2017-02-16 17:07   ` Radim Krčmář
  2 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2017-02-15 22:00 UTC (permalink / raw)
  To: linux-kernel, kvm; +Cc: Cao, Lei, Lei Cao

From: "Cao, Lei" <Lei.Cao@stratus.com>

Provide versions of struct gfn_to_hva_cache functions that
take vcpu as a parameter instead of struct kvm.  The existing functions
are not needed anymore, so delete them.  This allows dirty pages to
be logged in the vcpu dirty ring, instead of the global dirty ring,
for ring-based dirty memory tracking.

Signed-off-by: Lei Cao <lei.cao@stratus.com>
Message-Id: <CY1PR08MB19929BD2AC47A291FD680E83F04F0@CY1PR08MB1992.namprd08.prod.outlook.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/lapic.c     | 22 ++++++++++------------
 arch/x86/kvm/x86.c       | 41 ++++++++++++++++++++---------------------
 include/linux/kvm_host.h | 16 ++++++++--------
 virt/kvm/kvm_main.c      | 34 +++++++++++++++++-----------------
 4 files changed, 55 insertions(+), 58 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9fa5b8164961..acf959441778 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -529,16 +529,14 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 
 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 {
-
-	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
-				      sizeof(val));
+	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
+					   sizeof(val));
 }
 
 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
 {
-
-	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
-				      sizeof(*val));
+	return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
+					  sizeof(*val));
 }
 
 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
@@ -2287,8 +2285,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
 		return;
 
-	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-				  sizeof(u32)))
+	if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+				       sizeof(u32)))
 		return;
 
 	apic_set_tpr(vcpu->arch.apic, data & 0xff);
@@ -2340,14 +2338,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
 		max_isr = 0;
 	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-				sizeof(u32));
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+				    sizeof(u32));
 }
 
 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 {
 	if (vapic_addr) {
-		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
 					&vcpu->arch.apic->vapic_cache,
 					vapic_addr, sizeof(u32)))
 			return -EINVAL;
@@ -2441,7 +2439,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
 	vcpu->arch.pv_eoi.msr_val = data;
 	if (!pv_eoi_enabled(vcpu))
 		return 0;
-	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+	return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data,
 					 addr, sizeof(u8));
 }
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2a0974383ffe..8d3047c8cce7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1811,7 +1811,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 	struct kvm_vcpu_arch *vcpu = &v->arch;
 	struct pvclock_vcpu_time_info guest_hv_clock;
 
-	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+	if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
 		&guest_hv_clock, sizeof(guest_hv_clock))))
 		return;
 
@@ -1832,9 +1832,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
 
 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-				&vcpu->hv_clock,
-				sizeof(vcpu->hv_clock.version));
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+				    &vcpu->hv_clock,
+				    sizeof(vcpu->hv_clock.version));
 
 	smp_wmb();
 
@@ -1848,16 +1848,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
 
 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-				&vcpu->hv_clock,
-				sizeof(vcpu->hv_clock));
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+				    &vcpu->hv_clock,
+				    sizeof(vcpu->hv_clock));
 
 	smp_wmb();
 
 	vcpu->hv_clock.version++;
-	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
-				&vcpu->hv_clock,
-				sizeof(vcpu->hv_clock.version));
+	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
+				    &vcpu->hv_clock,
+				    sizeof(vcpu->hv_clock.version));
 }
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
@@ -2090,7 +2090,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 		return 0;
 	}
 
-	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+	if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
 					sizeof(u32)))
 		return 1;
 
@@ -2109,7 +2109,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
 		return;
 
-	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
 		return;
 
@@ -2120,7 +2120,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.st.steal.version += 1;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
 	smp_wmb();
@@ -2129,14 +2129,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 		vcpu->arch.st.last_steal;
 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 
 	smp_wmb();
 
 	vcpu->arch.st.steal.version += 1;
 
-	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
@@ -2241,7 +2241,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!(data & 1))
 			break;
 
-		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
 		     &vcpu->arch.pv_time, data & ~1ULL,
 		     sizeof(struct pvclock_vcpu_time_info)))
 			vcpu->arch.pv_time_enabled = false;
@@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (data & KVM_STEAL_RESERVED_MASK)
 			return 1;
 
-		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
 						data & KVM_STEAL_VALID_BITS,
 						sizeof(struct kvm_steal_time)))
 			return 1;
@@ -2876,7 +2876,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 
 	vcpu->arch.st.steal.preempted = 1;
 
-	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+	kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
 			&vcpu->arch.st.steal.preempted,
 			offsetof(struct kvm_steal_time, preempted),
 			sizeof(vcpu->arch.st.steal.preempted));
@@ -8537,9 +8537,8 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
 
 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
 {
-
-	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
-				      sizeof(val));
+	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
+					   sizeof(val));
 }
 
 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cda457bcedc1..17fa466cd5f4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -641,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 			  unsigned long len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
-int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, unsigned long len);
+int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+			       void *data, unsigned long len);
 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 			 int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 		    unsigned long len);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, unsigned long len);
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, int offset, unsigned long len);
-int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			      gpa_t gpa, unsigned long len);
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+				void *data, unsigned long len);
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+				       void *data, int offset, unsigned long len);
+int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
+				   gpa_t gpa, unsigned long len);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a83c186cefc1..263a80513ad9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1981,18 +1981,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
 	return 0;
 }
 
-int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
 			      gpa_t gpa, unsigned long len)
 {
-	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
 }
-EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
 
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, int offset, unsigned long len)
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+				       void *data, int offset, unsigned long len)
 {
-	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
 	int r;
 	gpa_t gpa = ghc->gpa + offset;
 
@@ -2002,7 +2002,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
 	if (unlikely(!ghc->memslot))
-		return kvm_write_guest(kvm, gpa, data, len);
+		return kvm_vcpu_write_guest(vcpu, gpa, data, len);
 
 	if (kvm_is_error_hva(ghc->hva))
 		return -EFAULT;
@@ -2014,19 +2014,19 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
 
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, unsigned long len)
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+			       void *data, unsigned long len)
 {
-	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+	return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
 }
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
 
-int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
-			   void *data, unsigned long len)
+int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+			       void *data, unsigned long len)
 {
-	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
 	int r;
 
 	BUG_ON(len > ghc->len);
@@ -2035,7 +2035,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
 	if (unlikely(!ghc->memslot))
-		return kvm_read_guest(kvm, ghc->gpa, data, len);
+		return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
 
 	if (kvm_is_error_hva(ghc->hva))
 		return -EFAULT;
@@ -2046,7 +2046,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] KVM: only retrieve memslots once when initializing cache
  2017-02-15 22:00 ` [PATCH 1/3] KVM: only retrieve memslots once when initializing cache Paolo Bonzini
@ 2017-02-16 16:31   ` Radim Krčmář
  0 siblings, 0 replies; 9+ messages in thread
From: Radim Krčmář @ 2017-02-16 16:31 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm

2017-02-15 23:00+0100, Paolo Bonzini:
> This will make it a bit simpler to handle multiple address spaces
> in gfn_to_hva_cache.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>

>  virt/kvm/kvm_main.c | 21 ++++++++++++++-------
>  1 file changed, 14 insertions(+), 7 deletions(-)
> 
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 482612b4e496..e21bac7ed5d3 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1937,10 +1937,10 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
>  }
>  EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
>  
> -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			      gpa_t gpa, unsigned long len)
> +static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
> +				       struct gfn_to_hva_cache *ghc,
> +				       gpa_t gpa, unsigned long len)
>  {
> -	struct kvm_memslots *slots = kvm_memslots(kvm);
>  	int offset = offset_in_page(gpa);
>  	gfn_t start_gfn = gpa >> PAGE_SHIFT;
>  	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
> @@ -1950,7 +1950,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  	ghc->gpa = gpa;
>  	ghc->generation = slots->generation;
>  	ghc->len = len;
> -	ghc->memslot = gfn_to_memslot(kvm, start_gfn);
> +	ghc->memslot = __gfn_to_memslot(slots, start_gfn);
>  	ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
>  	if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
>  		ghc->hva += offset;
> @@ -1960,7 +1960,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  		 * verify that the entire region is valid here.
>  		 */
>  		while (start_gfn <= end_gfn) {
> -			ghc->memslot = gfn_to_memslot(kvm, start_gfn);
> +			ghc->memslot = __gfn_to_memslot(slots, start_gfn);
>  			ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
>  						   &nr_pages_avail);
>  			if (kvm_is_error_hva(ghc->hva))
> @@ -1972,6 +1972,13 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  	}
>  	return 0;
>  }
> +
> +int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +			      gpa_t gpa, unsigned long len)
> +{
> +	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
> +}
>  EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
>  
>  int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> @@ -1984,7 +1991,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  	BUG_ON(len + offset > ghc->len);
>  
>  	if (slots->generation != ghc->generation)
> -		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
> +		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
>  
>  	if (unlikely(!ghc->memslot))
>  		return kvm_write_guest(kvm, gpa, data, len);
> @@ -2017,7 +2024,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  	BUG_ON(len > ghc->len);
>  
>  	if (slots->generation != ghc->generation)
> -		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
> +		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
>  
>  	if (unlikely(!ghc->memslot))
>  		return kvm_read_guest(kvm, ghc->gpa, data, len);
> -- 
> 1.8.3.1
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] KVM: use separate generations for each address space
  2017-02-15 22:00 ` [PATCH 2/3] KVM: use separate generations for each address space Paolo Bonzini
@ 2017-02-16 17:04   ` Radim Krčmář
  2017-02-17  0:29   ` Bandan Das
  1 sibling, 0 replies; 9+ messages in thread
From: Radim Krčmář @ 2017-02-16 17:04 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm

2017-02-15 23:00+0100, Paolo Bonzini:
> This will make it easier to support multiple address spaces in
> kvm_gfn_to_hva_cache_init.  Instead of having to check the address
> space id, we can keep on checking just the generation number.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

Reviewed-by: Radim Krčmař <rkrcmar@redhat.com>

>  virt/kvm/kvm_main.c | 24 ++++++++++++++++--------
>  1 file changed, 16 insertions(+), 8 deletions(-)
> 
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index e21bac7ed5d3..a83c186cefc1 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -506,11 +506,6 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
>  	if (!slots)
>  		return NULL;
>  
> -	/*
> -	 * Init kvm generation close to the maximum to easily test the
> -	 * code of handling generation number wrap-around.
> -	 */
> -	slots->generation = -150;
>  	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
>  		slots->id_to_index[i] = slots->memslots[i].id = i;
>  
> @@ -641,9 +636,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
>  
>  	r = -ENOMEM;
>  	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> -		kvm->memslots[i] = kvm_alloc_memslots();
> -		if (!kvm->memslots[i])
> +		struct kvm_memslots *slots = kvm_alloc_memslots();
> +		if (!slots)
>  			goto out_err_no_srcu;
> +		/*
> +		 * Generations must be different for each address space.
> +		 * Init kvm generation close to the maximum to easily test the
> +		 * code of handling generation number wrap-around.
> +		 */
> +		slots->generation = i * 2 - 150;
> +		rcu_assign_pointer(kvm->memslots[i], slots);
>  	}
>  
>  	if (init_srcu_struct(&kvm->srcu))
> @@ -870,8 +872,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
>  	 * Increment the new memslot generation a second time. This prevents
>  	 * vm exits that race with memslot updates from caching a memslot
>  	 * generation that will (potentially) be valid forever.
> +	 *
> +	 * Generations must be unique even across address spaces.  We do not need
> +	 * a global counter for that, instead the generation space is evenly split
> +	 * across address spaces.  For example, with two address spaces, address
> +	 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
> +	 * use generations 2, 6, 10, 14, ...
>  	 */
> -	slots->generation++;
> +	slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
>  
>  	kvm_arch_memslots_updated(kvm, slots);
>  
> -- 
> 1.8.3.1
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache
  2017-02-15 22:00 ` [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache Paolo Bonzini
@ 2017-02-16 17:07   ` Radim Krčmář
  0 siblings, 0 replies; 9+ messages in thread
From: Radim Krčmář @ 2017-02-16 17:07 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm, Cao, Lei

2017-02-15 23:00+0100, Paolo Bonzini:
> From: "Cao, Lei" <Lei.Cao@stratus.com>
> 
> Provide versions of struct gfn_to_hva_cache functions that
> take vcpu as a parameter instead of struct kvm.  The existing functions
> are not needed anymore, so delete them.  This allows dirty pages to
> be logged in the vcpu dirty ring, instead of the global dirty ring,
> for ring-based dirty memory tracking.
> 
> Signed-off-by: Lei Cao <lei.cao@stratus.com>
> Message-Id: <CY1PR08MB19929BD2AC47A291FD680E83F04F0@CY1PR08MB1992.namprd08.prod.outlook.com>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---

Reviewd-by: Radim Krčmář <rkrcmar@redhat.com>

>  arch/x86/kvm/lapic.c     | 22 ++++++++++------------
>  arch/x86/kvm/x86.c       | 41 ++++++++++++++++++++---------------------
>  include/linux/kvm_host.h | 16 ++++++++--------
>  virt/kvm/kvm_main.c      | 34 +++++++++++++++++-----------------
>  4 files changed, 55 insertions(+), 58 deletions(-)
> 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 9fa5b8164961..acf959441778 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -529,16 +529,14 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
>  
>  static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
>  {
> -
> -	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
> -				      sizeof(val));
> +	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
> +					   sizeof(val));
>  }
>  
>  static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
>  {
> -
> -	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
> -				      sizeof(*val));
> +	return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
> +					  sizeof(*val));
>  }
>  
>  static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
> @@ -2287,8 +2285,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
>  	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
>  		return;
>  
> -	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
> -				  sizeof(u32)))
> +	if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
> +				       sizeof(u32)))
>  		return;
>  
>  	apic_set_tpr(vcpu->arch.apic, data & 0xff);
> @@ -2340,14 +2338,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
>  		max_isr = 0;
>  	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
> -				sizeof(u32));
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
> +				    sizeof(u32));
>  }
>  
>  int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
>  {
>  	if (vapic_addr) {
> -		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
> +		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
>  					&vcpu->arch.apic->vapic_cache,
>  					vapic_addr, sizeof(u32)))
>  			return -EINVAL;
> @@ -2441,7 +2439,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
>  	vcpu->arch.pv_eoi.msr_val = data;
>  	if (!pv_eoi_enabled(vcpu))
>  		return 0;
> -	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
> +	return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data,
>  					 addr, sizeof(u8));
>  }
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 2a0974383ffe..8d3047c8cce7 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1811,7 +1811,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>  	struct kvm_vcpu_arch *vcpu = &v->arch;
>  	struct pvclock_vcpu_time_info guest_hv_clock;
>  
> -	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
> +	if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time,
>  		&guest_hv_clock, sizeof(guest_hv_clock))))
>  		return;
>  
> @@ -1832,9 +1832,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>  	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
>  
>  	vcpu->hv_clock.version = guest_hv_clock.version + 1;
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> -				&vcpu->hv_clock,
> -				sizeof(vcpu->hv_clock.version));
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> +				    &vcpu->hv_clock,
> +				    sizeof(vcpu->hv_clock.version));
>  
>  	smp_wmb();
>  
> @@ -1848,16 +1848,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>  
>  	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
>  
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> -				&vcpu->hv_clock,
> -				sizeof(vcpu->hv_clock));
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> +				    &vcpu->hv_clock,
> +				    sizeof(vcpu->hv_clock));
>  
>  	smp_wmb();
>  
>  	vcpu->hv_clock.version++;
> -	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> -				&vcpu->hv_clock,
> -				sizeof(vcpu->hv_clock.version));
> +	kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> +				    &vcpu->hv_clock,
> +				    sizeof(vcpu->hv_clock.version));
>  }
>  
>  static int kvm_guest_time_update(struct kvm_vcpu *v)
> @@ -2090,7 +2090,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
>  		return 0;
>  	}
>  
> -	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
> +	if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa,
>  					sizeof(u32)))
>  		return 1;
>  
> @@ -2109,7 +2109,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
>  		return;
>  
> -	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
>  		return;
>  
> @@ -2120,7 +2120,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  
>  	vcpu->arch.st.steal.version += 1;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  
>  	smp_wmb();
> @@ -2129,14 +2129,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>  		vcpu->arch.st.last_steal;
>  	vcpu->arch.st.last_steal = current->sched_info.run_delay;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  
>  	smp_wmb();
>  
>  	vcpu->arch.st.steal.version += 1;
>  
> -	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
>  		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>  }
>  
> @@ -2241,7 +2241,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (!(data & 1))
>  			break;
>  
> -		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
> +		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu,
>  		     &vcpu->arch.pv_time, data & ~1ULL,
>  		     sizeof(struct pvclock_vcpu_time_info)))
>  			vcpu->arch.pv_time_enabled = false;
> @@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		if (data & KVM_STEAL_RESERVED_MASK)
>  			return 1;
>  
> -		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
> +		if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime,
>  						data & KVM_STEAL_VALID_BITS,
>  						sizeof(struct kvm_steal_time)))
>  			return 1;
> @@ -2876,7 +2876,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
>  
>  	vcpu->arch.st.steal.preempted = 1;
>  
> -	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
> +	kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
>  			&vcpu->arch.st.steal.preempted,
>  			offsetof(struct kvm_steal_time, preempted),
>  			sizeof(vcpu->arch.st.steal.preempted));
> @@ -8537,9 +8537,8 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>  
>  static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
>  {
> -
> -	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
> -				      sizeof(val));
> +	return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
> +					   sizeof(val));
>  }
>  
>  void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index cda457bcedc1..17fa466cd5f4 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -641,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
>  int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
>  			  unsigned long len);
>  int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
> -int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, unsigned long len);
> +int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
> +			       void *data, unsigned long len);
>  int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
>  			 int offset, int len);
>  int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
>  		    unsigned long len);
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, unsigned long len);
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, int offset, unsigned long len);
> -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			      gpa_t gpa, unsigned long len);
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> +				void *data, unsigned long len);
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> +				       void *data, int offset, unsigned long len);
> +int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> +				   gpa_t gpa, unsigned long len);
>  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
>  int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
>  struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index a83c186cefc1..263a80513ad9 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1981,18 +1981,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
>  	return 0;
>  }
>  
> -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
>  			      gpa_t gpa, unsigned long len)
>  {
> -	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
>  	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
>  }
> -EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
>  
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, int offset, unsigned long len)
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
> +				       void *data, int offset, unsigned long len)
>  {
> -	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
>  	int r;
>  	gpa_t gpa = ghc->gpa + offset;
>  
> @@ -2002,7 +2002,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
>  
>  	if (unlikely(!ghc->memslot))
> -		return kvm_write_guest(kvm, gpa, data, len);
> +		return kvm_vcpu_write_guest(vcpu, gpa, data, len);
>  
>  	if (kvm_is_error_hva(ghc->hva))
>  		return -EFAULT;
> @@ -2014,19 +2014,19 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  
>  	return 0;
>  }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
>  
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, unsigned long len)
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
> +			       void *data, unsigned long len)
>  {
> -	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
> +	return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
>  }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
>  
> -int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> -			   void *data, unsigned long len)
> +int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
> +			       void *data, unsigned long len)
>  {
> -	struct kvm_memslots *slots = kvm_memslots(kvm);
> +	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
>  	int r;
>  
>  	BUG_ON(len > ghc->len);
> @@ -2035,7 +2035,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
>  
>  	if (unlikely(!ghc->memslot))
> -		return kvm_read_guest(kvm, ghc->gpa, data, len);
> +		return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
>  
>  	if (kvm_is_error_hva(ghc->hva))
>  		return -EFAULT;
> @@ -2046,7 +2046,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>  
>  	return 0;
>  }
> -EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
>  
>  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
>  {
> -- 
> 1.8.3.1
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] KVM: use separate generations for each address space
  2017-02-15 22:00 ` [PATCH 2/3] KVM: use separate generations for each address space Paolo Bonzini
  2017-02-16 17:04   ` Radim Krčmář
@ 2017-02-17  0:29   ` Bandan Das
  2017-02-17  8:28     ` Paolo Bonzini
  1 sibling, 1 reply; 9+ messages in thread
From: Bandan Das @ 2017-02-17  0:29 UTC (permalink / raw)
  To: Paolo Bonzini; +Cc: linux-kernel, kvm

Paolo Bonzini <pbonzini@redhat.com> writes:

> This will make it easier to support multiple address spaces in
> kvm_gfn_to_hva_cache_init.  Instead of having to check the address
> space id, we can keep on checking just the generation number.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  virt/kvm/kvm_main.c | 24 ++++++++++++++++--------
>  1 file changed, 16 insertions(+), 8 deletions(-)
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index e21bac7ed5d3..a83c186cefc1 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -506,11 +506,6 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
>  	if (!slots)
>  		return NULL;
>  
> -	/*
> -	 * Init kvm generation close to the maximum to easily test the
> -	 * code of handling generation number wrap-around.
> -	 */
> -	slots->generation = -150;
>  	for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
>  		slots->id_to_index[i] = slots->memslots[i].id = i;
>  
> @@ -641,9 +636,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
>  
>  	r = -ENOMEM;
>  	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> -		kvm->memslots[i] = kvm_alloc_memslots();
> -		if (!kvm->memslots[i])
> +		struct kvm_memslots *slots = kvm_alloc_memslots();
> +		if (!slots)
>  			goto out_err_no_srcu;
> +		/*
> +		 * Generations must be different for each address space.
> +		 * Init kvm generation close to the maximum to easily test the
> +		 * code of handling generation number wrap-around.
> +		 */
> +		slots->generation = i * 2 - 150;
> +		rcu_assign_pointer(kvm->memslots[i], slots);
>  	}

I can't seem to understand why rcu_assign_pointer wasn't used before.
kvm->memslots[i] was a rcu protected pointer even before this change,
right ?

>  	if (init_srcu_struct(&kvm->srcu))
> @@ -870,8 +872,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
>  	 * Increment the new memslot generation a second time. This prevents
>  	 * vm exits that race with memslot updates from caching a memslot
>  	 * generation that will (potentially) be valid forever.
> +	 *
> +	 * Generations must be unique even across address spaces.  We do not need
> +	 * a global counter for that, instead the generation space is evenly split
> +	 * across address spaces.  For example, with two address spaces, address
> +	 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
> +	 * use generations 2, 6, 10, 14, ...
>  	 */
> -	slots->generation++;
> +	slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
>  
>  	kvm_arch_memslots_updated(kvm, slots);

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] KVM: use separate generations for each address space
  2017-02-17  0:29   ` Bandan Das
@ 2017-02-17  8:28     ` Paolo Bonzini
  0 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2017-02-17  8:28 UTC (permalink / raw)
  To: Bandan Das; +Cc: linux-kernel, kvm

> > +		/*
> > +		 * Generations must be different for each address space.
> > +		 * Init kvm generation close to the maximum to easily test the
> > +		 * code of handling generation number wrap-around.
> > +		 */
> > +		slots->generation = i * 2 - 150;
> > +		rcu_assign_pointer(kvm->memslots[i], slots);
> >  	}
> 
> I can't seem to understand why rcu_assign_pointer wasn't used before.
> kvm->memslots[i] was a rcu protected pointer even before this change,
> right ?

Actually, a better match is RCU_INIT_POINTER.  Here there is no concurrent
reader because we're just initializing the struct kvm.  There is something
else providing synchronization between this writer and the "first" RCU
read-side.  It could be signaling a condition variable, creating a thread,
or releasing a mutex; all three of them have release semantics, which
means they imply a smp_wmb just like rcu_assign_pointer does.

Paolo


> >  	if (init_srcu_struct(&kvm->srcu))
> > @@ -870,8 +872,14 @@ static struct kvm_memslots
> > *install_new_memslots(struct kvm *kvm,
> >  	 * Increment the new memslot generation a second time. This prevents
> >  	 * vm exits that race with memslot updates from caching a memslot
> >  	 * generation that will (potentially) be valid forever.
> > +	 *
> > +	 * Generations must be unique even across address spaces.  We do not need
> > +	 * a global counter for that, instead the generation space is evenly
> > split
> > +	 * across address spaces.  For example, with two address spaces, address
> > +	 * space 0 will use generations 0, 4, 8, ... while * address space 1 will
> > +	 * use generations 2, 6, 10, 14, ...
> >  	 */
> > -	slots->generation++;
> > +	slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
> >  
> >  	kvm_arch_memslots_updated(kvm, slots);
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-02-17  8:28 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-15 22:00 [PATCH 0/3] KVM: change gfn->hva cache to use per-VCPU memslots Paolo Bonzini
2017-02-15 22:00 ` [PATCH 1/3] KVM: only retrieve memslots once when initializing cache Paolo Bonzini
2017-02-16 16:31   ` Radim Krčmář
2017-02-15 22:00 ` [PATCH 2/3] KVM: use separate generations for each address space Paolo Bonzini
2017-02-16 17:04   ` Radim Krčmář
2017-02-17  0:29   ` Bandan Das
2017-02-17  8:28     ` Paolo Bonzini
2017-02-15 22:00 ` [PATCH 3/3] KVM: Support vCPU-based gfn->hva cache Paolo Bonzini
2017-02-16 17:07   ` Radim Krčmář

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.