All of lore.kernel.org
 help / color / mirror / Atom feed
From: Oliver Upton <oupton@google.com>
To: kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	Marc Zyngier <maz@kernel.org>, Peter Shier <pshier@google.com>,
	Jim Mattson <jmattson@google.com>,
	David Matlack <dmatlack@google.com>,
	Ricardo Koller <ricarkol@google.com>,
	Jing Zhang <jingzhangos@google.com>,
	Raghavendra Rao Anata <rananta@google.com>,
	James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Andrew Jones <drjones@redhat.com>, Will Deacon <will@kernel.org>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Oliver Upton <oupton@google.com>
Subject: [PATCH v6 04/21] KVM: x86: Refactor tsc synchronization code
Date: Wed,  4 Aug 2021 08:58:02 +0000	[thread overview]
Message-ID: <20210804085819.846610-5-oupton@google.com> (raw)
In-Reply-To: <20210804085819.846610-1-oupton@google.com>

Refactor kvm_synchronize_tsc to make a new function that allows callers
to specify TSC parameters (offset, value, nanoseconds, etc.) explicitly
for the sake of participating in TSC synchronization.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/x86/kvm/x86.c | 105 ++++++++++++++++++++++++++-------------------
 1 file changed, 61 insertions(+), 44 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b449761fbe..91aea751d621 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2443,13 +2443,71 @@ static inline bool kvm_check_tsc_unstable(void)
 	return check_tsc_unstable();
 }
 
+/*
+ * Infers attempts to synchronize the guest's tsc from host writes. Sets the
+ * offset for the vcpu and tracks the TSC matching generation that the vcpu
+ * participates in.
+ */
+static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
+				  u64 ns, bool matched)
+{
+	struct kvm *kvm = vcpu->kvm;
+	bool already_matched;
+
+	lockdep_assert_held(&kvm->arch.tsc_write_lock);
+
+	already_matched =
+	       (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
+
+	/*
+	 * We track the most recent recorded KHZ, write and time to
+	 * allow the matching interval to be extended at each write.
+	 */
+	kvm->arch.last_tsc_nsec = ns;
+	kvm->arch.last_tsc_write = tsc;
+	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
+
+	vcpu->arch.last_guest_tsc = tsc;
+
+	/* Keep track of which generation this VCPU has synchronized to */
+	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
+	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
+	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+
+	kvm_vcpu_write_tsc_offset(vcpu, offset);
+
+	if (!matched) {
+		/*
+		 * We split periods of matched TSC writes into generations.
+		 * For each generation, we track the original measured
+		 * nanosecond time, offset, and write, so if TSCs are in
+		 * sync, we can match exact offset, and if not, we can match
+		 * exact software computation in compute_guest_tsc()
+		 *
+		 * These values are tracked in kvm->arch.cur_xxx variables.
+		 */
+		kvm->arch.cur_tsc_generation++;
+		kvm->arch.cur_tsc_nsec = ns;
+		kvm->arch.cur_tsc_write = tsc;
+		kvm->arch.cur_tsc_offset = offset;
+
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc = 0;
+	} else if (!already_matched) {
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc++;
+	}
+
+	kvm_track_tsc_matching(vcpu);
+	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+}
+
 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
-	bool matched;
-	bool already_matched;
+	bool matched = false;
 	bool synchronizing = false;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -2495,50 +2553,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 			offset = kvm_compute_l1_tsc_offset(vcpu, data);
 		}
 		matched = true;
-		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
-	} else {
-		/*
-		 * We split periods of matched TSC writes into generations.
-		 * For each generation, we track the original measured
-		 * nanosecond time, offset, and write, so if TSCs are in
-		 * sync, we can match exact offset, and if not, we can match
-		 * exact software computation in compute_guest_tsc()
-		 *
-		 * These values are tracked in kvm->arch.cur_xxx variables.
-		 */
-		kvm->arch.cur_tsc_generation++;
-		kvm->arch.cur_tsc_nsec = ns;
-		kvm->arch.cur_tsc_write = data;
-		kvm->arch.cur_tsc_offset = offset;
-		matched = false;
 	}
 
-	/*
-	 * We also track th most recent recorded KHZ, write and time to
-	 * allow the matching interval to be extended at each write.
-	 */
-	kvm->arch.last_tsc_nsec = ns;
-	kvm->arch.last_tsc_write = data;
-	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
-
-	vcpu->arch.last_guest_tsc = data;
-
-	/* Keep track of which generation this VCPU has synchronized to */
-	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
-
-	kvm_vcpu_write_tsc_offset(vcpu, offset);
-
-	spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
-	if (!matched) {
-		kvm->arch.nr_vcpus_matched_tsc = 0;
-	} else if (!already_matched) {
-		kvm->arch.nr_vcpus_matched_tsc++;
-	}
-
-	kvm_track_tsc_matching(vcpu);
-	spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+	__kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
-- 
2.32.0.605.g8dce9f2422-goog


WARNING: multiple messages have this Message-ID (diff)
From: Oliver Upton <oupton@google.com>
To: kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu
Cc: Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>, Marc Zyngier <maz@kernel.org>,
	Raghavendra Rao Anata <rananta@google.com>,
	Peter Shier <pshier@google.com>,
	Sean Christopherson <seanjc@google.com>,
	David Matlack <dmatlack@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	linux-arm-kernel@lists.infradead.org,
	Jim Mattson <jmattson@google.com>
Subject: [PATCH v6 04/21] KVM: x86: Refactor tsc synchronization code
Date: Wed,  4 Aug 2021 08:58:02 +0000	[thread overview]
Message-ID: <20210804085819.846610-5-oupton@google.com> (raw)
In-Reply-To: <20210804085819.846610-1-oupton@google.com>

Refactor kvm_synchronize_tsc to make a new function that allows callers
to specify TSC parameters (offset, value, nanoseconds, etc.) explicitly
for the sake of participating in TSC synchronization.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/x86/kvm/x86.c | 105 ++++++++++++++++++++++++++-------------------
 1 file changed, 61 insertions(+), 44 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b449761fbe..91aea751d621 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2443,13 +2443,71 @@ static inline bool kvm_check_tsc_unstable(void)
 	return check_tsc_unstable();
 }
 
+/*
+ * Infers attempts to synchronize the guest's tsc from host writes. Sets the
+ * offset for the vcpu and tracks the TSC matching generation that the vcpu
+ * participates in.
+ */
+static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
+				  u64 ns, bool matched)
+{
+	struct kvm *kvm = vcpu->kvm;
+	bool already_matched;
+
+	lockdep_assert_held(&kvm->arch.tsc_write_lock);
+
+	already_matched =
+	       (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
+
+	/*
+	 * We track the most recent recorded KHZ, write and time to
+	 * allow the matching interval to be extended at each write.
+	 */
+	kvm->arch.last_tsc_nsec = ns;
+	kvm->arch.last_tsc_write = tsc;
+	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
+
+	vcpu->arch.last_guest_tsc = tsc;
+
+	/* Keep track of which generation this VCPU has synchronized to */
+	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
+	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
+	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+
+	kvm_vcpu_write_tsc_offset(vcpu, offset);
+
+	if (!matched) {
+		/*
+		 * We split periods of matched TSC writes into generations.
+		 * For each generation, we track the original measured
+		 * nanosecond time, offset, and write, so if TSCs are in
+		 * sync, we can match exact offset, and if not, we can match
+		 * exact software computation in compute_guest_tsc()
+		 *
+		 * These values are tracked in kvm->arch.cur_xxx variables.
+		 */
+		kvm->arch.cur_tsc_generation++;
+		kvm->arch.cur_tsc_nsec = ns;
+		kvm->arch.cur_tsc_write = tsc;
+		kvm->arch.cur_tsc_offset = offset;
+
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc = 0;
+	} else if (!already_matched) {
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc++;
+	}
+
+	kvm_track_tsc_matching(vcpu);
+	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+}
+
 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
-	bool matched;
-	bool already_matched;
+	bool matched = false;
 	bool synchronizing = false;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -2495,50 +2553,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 			offset = kvm_compute_l1_tsc_offset(vcpu, data);
 		}
 		matched = true;
-		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
-	} else {
-		/*
-		 * We split periods of matched TSC writes into generations.
-		 * For each generation, we track the original measured
-		 * nanosecond time, offset, and write, so if TSCs are in
-		 * sync, we can match exact offset, and if not, we can match
-		 * exact software computation in compute_guest_tsc()
-		 *
-		 * These values are tracked in kvm->arch.cur_xxx variables.
-		 */
-		kvm->arch.cur_tsc_generation++;
-		kvm->arch.cur_tsc_nsec = ns;
-		kvm->arch.cur_tsc_write = data;
-		kvm->arch.cur_tsc_offset = offset;
-		matched = false;
 	}
 
-	/*
-	 * We also track th most recent recorded KHZ, write and time to
-	 * allow the matching interval to be extended at each write.
-	 */
-	kvm->arch.last_tsc_nsec = ns;
-	kvm->arch.last_tsc_write = data;
-	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
-
-	vcpu->arch.last_guest_tsc = data;
-
-	/* Keep track of which generation this VCPU has synchronized to */
-	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
-
-	kvm_vcpu_write_tsc_offset(vcpu, offset);
-
-	spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
-	if (!matched) {
-		kvm->arch.nr_vcpus_matched_tsc = 0;
-	} else if (!already_matched) {
-		kvm->arch.nr_vcpus_matched_tsc++;
-	}
-
-	kvm_track_tsc_matching(vcpu);
-	spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+	__kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
-- 
2.32.0.605.g8dce9f2422-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Oliver Upton <oupton@google.com>
To: kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu
Cc: Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>,
	 Marc Zyngier <maz@kernel.org>, Peter Shier <pshier@google.com>,
	Jim Mattson <jmattson@google.com>,
	 David Matlack <dmatlack@google.com>,
	Ricardo Koller <ricarkol@google.com>,
	 Jing Zhang <jingzhangos@google.com>,
	Raghavendra Rao Anata <rananta@google.com>,
	James Morse <james.morse@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Andrew Jones <drjones@redhat.com>, Will Deacon <will@kernel.org>,
	 Catalin Marinas <catalin.marinas@arm.com>,
	Oliver Upton <oupton@google.com>
Subject: [PATCH v6 04/21] KVM: x86: Refactor tsc synchronization code
Date: Wed,  4 Aug 2021 08:58:02 +0000	[thread overview]
Message-ID: <20210804085819.846610-5-oupton@google.com> (raw)
In-Reply-To: <20210804085819.846610-1-oupton@google.com>

Refactor kvm_synchronize_tsc to make a new function that allows callers
to specify TSC parameters (offset, value, nanoseconds, etc.) explicitly
for the sake of participating in TSC synchronization.

Signed-off-by: Oliver Upton <oupton@google.com>
---
 arch/x86/kvm/x86.c | 105 ++++++++++++++++++++++++++-------------------
 1 file changed, 61 insertions(+), 44 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b449761fbe..91aea751d621 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2443,13 +2443,71 @@ static inline bool kvm_check_tsc_unstable(void)
 	return check_tsc_unstable();
 }
 
+/*
+ * Infers attempts to synchronize the guest's tsc from host writes. Sets the
+ * offset for the vcpu and tracks the TSC matching generation that the vcpu
+ * participates in.
+ */
+static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
+				  u64 ns, bool matched)
+{
+	struct kvm *kvm = vcpu->kvm;
+	bool already_matched;
+
+	lockdep_assert_held(&kvm->arch.tsc_write_lock);
+
+	already_matched =
+	       (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
+
+	/*
+	 * We track the most recent recorded KHZ, write and time to
+	 * allow the matching interval to be extended at each write.
+	 */
+	kvm->arch.last_tsc_nsec = ns;
+	kvm->arch.last_tsc_write = tsc;
+	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
+
+	vcpu->arch.last_guest_tsc = tsc;
+
+	/* Keep track of which generation this VCPU has synchronized to */
+	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
+	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
+	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
+
+	kvm_vcpu_write_tsc_offset(vcpu, offset);
+
+	if (!matched) {
+		/*
+		 * We split periods of matched TSC writes into generations.
+		 * For each generation, we track the original measured
+		 * nanosecond time, offset, and write, so if TSCs are in
+		 * sync, we can match exact offset, and if not, we can match
+		 * exact software computation in compute_guest_tsc()
+		 *
+		 * These values are tracked in kvm->arch.cur_xxx variables.
+		 */
+		kvm->arch.cur_tsc_generation++;
+		kvm->arch.cur_tsc_nsec = ns;
+		kvm->arch.cur_tsc_write = tsc;
+		kvm->arch.cur_tsc_offset = offset;
+
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc = 0;
+	} else if (!already_matched) {
+		spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+		kvm->arch.nr_vcpus_matched_tsc++;
+	}
+
+	kvm_track_tsc_matching(vcpu);
+	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+}
+
 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
 	struct kvm *kvm = vcpu->kvm;
 	u64 offset, ns, elapsed;
 	unsigned long flags;
-	bool matched;
-	bool already_matched;
+	bool matched = false;
 	bool synchronizing = false;
 
 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -2495,50 +2553,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
 			offset = kvm_compute_l1_tsc_offset(vcpu, data);
 		}
 		matched = true;
-		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
-	} else {
-		/*
-		 * We split periods of matched TSC writes into generations.
-		 * For each generation, we track the original measured
-		 * nanosecond time, offset, and write, so if TSCs are in
-		 * sync, we can match exact offset, and if not, we can match
-		 * exact software computation in compute_guest_tsc()
-		 *
-		 * These values are tracked in kvm->arch.cur_xxx variables.
-		 */
-		kvm->arch.cur_tsc_generation++;
-		kvm->arch.cur_tsc_nsec = ns;
-		kvm->arch.cur_tsc_write = data;
-		kvm->arch.cur_tsc_offset = offset;
-		matched = false;
 	}
 
-	/*
-	 * We also track th most recent recorded KHZ, write and time to
-	 * allow the matching interval to be extended at each write.
-	 */
-	kvm->arch.last_tsc_nsec = ns;
-	kvm->arch.last_tsc_write = data;
-	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
-
-	vcpu->arch.last_guest_tsc = data;
-
-	/* Keep track of which generation this VCPU has synchronized to */
-	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
-	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
-	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
-
-	kvm_vcpu_write_tsc_offset(vcpu, offset);
-
-	spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
-	if (!matched) {
-		kvm->arch.nr_vcpus_matched_tsc = 0;
-	} else if (!already_matched) {
-		kvm->arch.nr_vcpus_matched_tsc++;
-	}
-
-	kvm_track_tsc_matching(vcpu);
-	spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
+	__kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 }
 
-- 
2.32.0.605.g8dce9f2422-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-08-04  8:58 UTC|newest]

Thread overview: 153+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-04  8:57 [PATCH v6 00/21] KVM: Add idempotent controls for migrating system counter state Oliver Upton
2021-08-04  8:57 ` Oliver Upton
2021-08-04  8:57 ` Oliver Upton
2021-08-04  8:57 ` [PATCH v6 01/21] KVM: x86: Fix potential race in KVM_GET_CLOCK Oliver Upton
2021-08-04  8:57   ` Oliver Upton
2021-08-04  8:57   ` Oliver Upton
2021-08-11 12:23   ` Paolo Bonzini
2021-08-11 12:23     ` Paolo Bonzini
2021-08-11 12:23     ` Paolo Bonzini
2021-08-13 10:39     ` Oliver Upton
2021-08-13 10:39       ` Oliver Upton
2021-08-13 10:39       ` Oliver Upton
2021-08-13 10:44       ` Paolo Bonzini
2021-08-13 10:44         ` Paolo Bonzini
2021-08-13 10:44         ` Paolo Bonzini
2021-08-13 17:46         ` Oliver Upton
2021-08-13 17:46           ` Oliver Upton
2021-08-13 17:46           ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 02/21] KVM: x86: Report host tsc and realtime values " Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 03/21] KVM: x86: Take the pvclock sync lock behind the tsc_write_lock Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` Oliver Upton [this message]
2021-08-04  8:58   ` [PATCH v6 04/21] KVM: x86: Refactor tsc synchronization code Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 05/21] KVM: x86: Expose TSC offset controls to userspace Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 06/21] tools: arch: x86: pull in pvclock headers Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 07/21] selftests: KVM: Add test for KVM_{GET,SET}_CLOCK Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 08/21] selftests: KVM: Fix kvm device helper ioctl assertions Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 09/21] selftests: KVM: Add helpers for vCPU device attributes Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 10/21] selftests: KVM: Introduce system counter offset test Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 11/21] KVM: arm64: Refactor update_vtimer_cntvoff() Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  9:23   ` Andrew Jones
2021-08-04  9:23     ` Andrew Jones
2021-08-04  9:23     ` Andrew Jones
2021-08-04  8:58 ` [PATCH v6 12/21] KVM: arm64: Separate guest/host counter offset values Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 10:19   ` Andrew Jones
2021-08-04 10:19     ` Andrew Jones
2021-08-04 10:19     ` Andrew Jones
2021-08-04  8:58 ` [PATCH v6 13/21] KVM: arm64: Allow userspace to configure a vCPU's virtual offset Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 10:20   ` Andrew Jones
2021-08-04 10:20     ` Andrew Jones
2021-08-04 10:20     ` Andrew Jones
2021-08-10  9:35   ` Marc Zyngier
2021-08-10  9:35     ` Marc Zyngier
2021-08-10  9:35     ` Marc Zyngier
2021-08-10  9:44     ` Oliver Upton
2021-08-10  9:44       ` Oliver Upton
2021-08-10  9:44       ` Oliver Upton
2021-08-11 15:22       ` Marc Zyngier
2021-08-11 15:22         ` Marc Zyngier
2021-08-11 15:22         ` Marc Zyngier
2021-08-04  8:58 ` [PATCH v6 14/21] selftests: KVM: Add helper to check for register presence Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  9:14   ` Andrew Jones
2021-08-04  9:14     ` Andrew Jones
2021-08-04  9:14     ` Andrew Jones
2021-08-04  8:58 ` [PATCH v6 15/21] selftests: KVM: Add support for aarch64 to system_counter_offset_test Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58 ` [PATCH v6 16/21] arm64: cpufeature: Enumerate support for Enhanced Counter Virtualization Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-10  9:38   ` Marc Zyngier
2021-08-10  9:38     ` Marc Zyngier
2021-08-10  9:38     ` Marc Zyngier
2021-08-04  8:58 ` [PATCH v6 17/21] KVM: arm64: Allow userspace to configure a guest's counter-timer offset Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 10:17   ` Andrew Jones
2021-08-04 10:17     ` Andrew Jones
2021-08-04 10:17     ` Andrew Jones
2021-08-04 10:22     ` Oliver Upton
2021-08-04 10:22       ` Oliver Upton
2021-08-04 10:22       ` Oliver Upton
2021-08-10 10:56   ` Marc Zyngier
2021-08-10 10:56     ` Marc Zyngier
2021-08-10 10:56     ` Marc Zyngier
2021-08-10 17:55     ` Oliver Upton
2021-08-10 17:55       ` Oliver Upton
2021-08-10 17:55       ` Oliver Upton
2021-08-11  9:01       ` Marc Zyngier
2021-08-11  9:01         ` Marc Zyngier
2021-08-11  9:01         ` Marc Zyngier
2021-08-04  8:58 ` [PATCH v6 18/21] KVM: arm64: Configure timer traps in vcpu_load() for VHE Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 10:25   ` Andrew Jones
2021-08-04 10:25     ` Andrew Jones
2021-08-04 10:25     ` Andrew Jones
2021-08-04  8:58 ` [PATCH v6 19/21] KVM: arm64: Emulate physical counter offsetting on non-ECV systems Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 11:05   ` Andrew Jones
2021-08-04 11:05     ` Andrew Jones
2021-08-04 11:05     ` Andrew Jones
2021-08-05  6:27     ` Oliver Upton
2021-08-05  6:27       ` Oliver Upton
2021-08-05  6:27       ` Oliver Upton
2021-08-10 11:27   ` Marc Zyngier
2021-08-10 11:27     ` Marc Zyngier
2021-08-10 11:27     ` Marc Zyngier
2021-08-04  8:58 ` [PATCH v6 20/21] selftests: KVM: Test physical counter offsetting Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 11:03   ` Andrew Jones
2021-08-04 11:03     ` Andrew Jones
2021-08-04 11:03     ` Andrew Jones
2021-08-04  8:58 ` [PATCH v6 21/21] selftests: KVM: Add counter emulation benchmark Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04  8:58   ` Oliver Upton
2021-08-04 11:05 ` [PATCH v6 00/21] KVM: Add idempotent controls for migrating system counter state Oliver Upton
2021-08-04 11:05   ` Oliver Upton
2021-08-04 11:05   ` Oliver Upton
2021-08-04 22:03   ` Oliver Upton
2021-08-04 22:03     ` Oliver Upton
2021-08-04 22:03     ` Oliver Upton
2021-08-10  0:04     ` Oliver Upton
2021-08-10  0:04       ` Oliver Upton
2021-08-10  0:04       ` Oliver Upton
2021-08-10 12:30       ` Marc Zyngier
2021-08-10 12:30         ` Marc Zyngier
2021-08-10 12:30         ` Marc Zyngier
2021-08-11 13:05 ` Paolo Bonzini
2021-08-11 13:05   ` Paolo Bonzini
2021-08-11 13:05   ` Paolo Bonzini
2021-08-11 18:56   ` Oliver Upton
2021-08-11 18:56     ` Oliver Upton
2021-08-11 18:56     ` Oliver Upton
2021-08-11 19:01     ` Marc Zyngier
2021-08-11 19:01       ` Marc Zyngier
2021-08-11 19:01       ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210804085819.846610-5-oupton@google.com \
    --to=oupton@google.com \
    --cc=alexandru.elisei@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=dmatlack@google.com \
    --cc=drjones@redhat.com \
    --cc=james.morse@arm.com \
    --cc=jingzhangos@google.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pshier@google.com \
    --cc=rananta@google.com \
    --cc=ricarkol@google.com \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.