* [PATCH v3 3/5] KVM: Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached()
[not found] <201702031949.v13Jncv8032022@dev1.sn.stratus.com>
@ 2017-02-03 20:04 ` Cao, Lei
2017-02-04 4:53 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Cao, Lei @ 2017-02-03 20:04 UTC (permalink / raw)
To: Paolo Bonzini, Radim Krčmář, kvm
Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() to
take vcpu as a parameter instead kvm. Rename the two functions to
kvm_vcpu_write_*. This is to allow dirty pages to be logged in the vcpu
dirty ring, instead of the global dirty ring, for ring-based dirty
memory tracking.
Signed-off-by: Lei Cao <lei.cao@stratus.com>
---
arch/x86/kvm/lapic.c | 4 ++--
arch/x86/kvm/x86.c | 16 ++++++++--------
include/linux/kvm_host.h | 4 ++--
virt/kvm/kvm_main.c | 16 ++++++++--------
4 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2f6ef51..6275369 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -501,7 +501,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{
- return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
+ return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
sizeof(val));
}
@@ -2273,7 +2273,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
sizeof(u32));
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1889f62..378fc98 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1793,7 +1793,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
vcpu->hv_clock.version = guest_hv_clock.version + 1;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock.version));
@@ -1809,14 +1809,14 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
smp_wmb();
vcpu->hv_clock.version++;
- kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock.version));
}
@@ -2081,7 +2081,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
vcpu->arch.st.steal.version += 1;
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb();
@@ -2090,14 +2090,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb();
vcpu->arch.st.steal.version += 1;
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}
@@ -2835,7 +2835,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
vcpu->arch.st.steal.preempted = 1;
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
&vcpu->arch.st.steal.preempted,
offsetof(struct kvm_steal_time, preempted),
sizeof(vcpu->arch.st.steal.preempted));
@@ -8422,7 +8422,7 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
- return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+ return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
sizeof(val));
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 33d9974..65561bf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -648,9 +648,9 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
void *data, int offset, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 016be4d..417c0ff 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1974,20 +1974,20 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
-int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
void *data, int offset, unsigned long len)
{
- struct kvm_memslots *slots = kvm_memslots(kvm);
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
int r;
gpa_t gpa = ghc->gpa + offset;
BUG_ON(len + offset > ghc->len);
if (slots->generation != ghc->generation)
- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
- return kvm_write_guest(kvm, gpa, data, len);
+ return kvm_write_guest(v->kvm, gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
@@ -1999,14 +1999,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
- return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+ return kvm_vcpu_write_guest_offset_cached(v, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
--
2.5.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v3 3/5] KVM: Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached()
2017-02-03 20:04 ` [PATCH v3 3/5] KVM: Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() Cao, Lei
@ 2017-02-04 4:53 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2017-02-04 4:53 UTC (permalink / raw)
To: Cao, Lei, Radim Krčmář, kvm
On 03/02/2017 12:04, Cao, Lei wrote:
> Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() to
> take vcpu as a parameter instead kvm. Rename the two functions to
> kvm_vcpu_write_*. This is to allow dirty pages to be logged in the vcpu
> dirty ring, instead of the global dirty ring, for ring-based dirty
> memory tracking.
>
> Signed-off-by: Lei Cao <lei.cao@stratus.com>
I think I prefer to change everything to be vCPU-based, including
kvm_read_guest_cached and kvm_gfn_to_hva_cache_init. That is, using
kvm_vcpu_memslots and so on.
However, slots->generation right can have false positives if the
kvm_memslots address space id differs. I'll take care of that and post
a small patch series.
Paolo
> ---
> arch/x86/kvm/lapic.c | 4 ++--
> arch/x86/kvm/x86.c | 16 ++++++++--------
> include/linux/kvm_host.h | 4 ++--
> virt/kvm/kvm_main.c | 16 ++++++++--------
> 4 files changed, 20 insertions(+), 20 deletions(-)
>
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 2f6ef51..6275369 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -501,7 +501,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
> static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
> {
>
> - return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
> + return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
> sizeof(val));
> }
>
> @@ -2273,7 +2273,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
> max_isr = 0;
> data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
>
> - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
> + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
> sizeof(u32));
> }
>
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 1889f62..378fc98 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -1793,7 +1793,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
> BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
>
> vcpu->hv_clock.version = guest_hv_clock.version + 1;
> - kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> &vcpu->hv_clock,
> sizeof(vcpu->hv_clock.version));
>
> @@ -1809,14 +1809,14 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
>
> trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
>
> - kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> &vcpu->hv_clock,
> sizeof(vcpu->hv_clock));
>
> smp_wmb();
>
> vcpu->hv_clock.version++;
> - kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
> + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time,
> &vcpu->hv_clock,
> sizeof(vcpu->hv_clock.version));
> }
> @@ -2081,7 +2081,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
>
> vcpu->arch.st.steal.version += 1;
>
> - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
> &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>
> smp_wmb();
> @@ -2090,14 +2090,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
> vcpu->arch.st.last_steal;
> vcpu->arch.st.last_steal = current->sched_info.run_delay;
>
> - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
> &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>
> smp_wmb();
>
> vcpu->arch.st.steal.version += 1;
>
> - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime,
> &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
> }
>
> @@ -2835,7 +2835,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
>
> vcpu->arch.st.steal.preempted = 1;
>
> - kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
> + kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime,
> &vcpu->arch.st.steal.preempted,
> offsetof(struct kvm_steal_time, preempted),
> sizeof(vcpu->arch.st.steal.preempted));
> @@ -8422,7 +8422,7 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
> static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
> {
>
> - return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
> + return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
> sizeof(val));
> }
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 33d9974..65561bf 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -648,9 +648,9 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
> int offset, int len);
> int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
> unsigned long len);
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> void *data, unsigned long len);
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> void *data, int offset, unsigned long len);
> int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> gpa_t gpa, unsigned long len);
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 016be4d..417c0ff 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1974,20 +1974,20 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> }
> EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
>
> -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> void *data, int offset, unsigned long len)
> {
> - struct kvm_memslots *slots = kvm_memslots(kvm);
> + struct kvm_memslots *slots = kvm_memslots(v->kvm);
> int r;
> gpa_t gpa = ghc->gpa + offset;
>
> BUG_ON(len + offset > ghc->len);
>
> if (slots->generation != ghc->generation)
> - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
> + kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
>
> if (unlikely(!ghc->memslot))
> - return kvm_write_guest(kvm, gpa, data, len);
> + return kvm_write_guest(v->kvm, gpa, data, len);
>
> if (kvm_is_error_hva(ghc->hva))
> return -EFAULT;
> @@ -1999,14 +1999,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>
> return 0;
> }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
>
> -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
> void *data, unsigned long len)
> {
> - return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
> + return kvm_vcpu_write_guest_offset_cached(v, ghc, data, 0, len);
> }
> -EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
> +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
>
> int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
> void *data, unsigned long len)
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2017-02-04 4:53 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <201702031949.v13Jncv8032022@dev1.sn.stratus.com>
2017-02-03 20:04 ` [PATCH v3 3/5] KVM: Modify kvm_write_guest_cached() and kvm_write_guest_offset_cached() Cao, Lei
2017-02-04 4:53 ` Paolo Bonzini
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.