From: Paul Durrant <paul@xen.org>
To: Paolo Bonzini <pbonzini@redhat.com>,
Jonathan Corbet <corbet@lwn.net>,
Sean Christopherson <seanjc@google.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
David Woodhouse <dwmw2@infradead.org>,
Paul Durrant <paul@xen.org>, Shuah Khan <shuah@kernel.org>,
kvm@vger.kernel.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org
Subject: [PATCH v12 05/20] KVM: pfncache: remove KVM_GUEST_USES_PFN usage
Date: Mon, 15 Jan 2024 12:56:52 +0000 [thread overview]
Message-ID: <20240115125707.1183-6-paul@xen.org> (raw)
In-Reply-To: <20240115125707.1183-1-paul@xen.org>
From: Paul Durrant <pdurrant@amazon.com>
As noted in [1] the KVM_GUEST_USES_PFN usage flag is never set by any
callers of kvm_gpc_init(), which also makes the 'vcpu' argument redundant.
Moreover, all existing callers specify KVM_HOST_USES_PFN so the usage
check in hva_to_pfn_retry() and hence the 'usage' argument to
kvm_gpc_init() are also redundant.
Remove the pfn_cache_usage enumeration and remove the redundant arguments,
fields of struct gfn_to_hva_cache, and all the related code.
[1] https://lore.kernel.org/all/ZQiR8IpqOZrOpzHC@google.com/
Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
---
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: x86@kernel.org
v8:
- New in this version.
---
arch/x86/kvm/x86.c | 2 +-
arch/x86/kvm/xen.c | 14 ++++-----
include/linux/kvm_host.h | 11 +------
include/linux/kvm_types.h | 8 -----
virt/kvm/pfncache.c | 61 ++++++---------------------------------
5 files changed, 16 insertions(+), 80 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0a0ac91a494f..19e6cc1dadfe 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12049,7 +12049,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
- kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
+ kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 34c48d0029c1..f9b1e494c430 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -2108,14 +2108,10 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
- kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
- kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
- KVM_HOST_USES_PFN);
+ kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm);
+ kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm);
}
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
@@ -2158,7 +2154,7 @@ void kvm_xen_init_vm(struct kvm *kvm)
{
mutex_init(&kvm->arch.xen.xen_lock);
idr_init(&kvm->arch.xen.evtchn_ports);
- kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
+ kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm);
}
void kvm_xen_destroy_vm(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f3bb9e0a81fe..f2354f808d04 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1319,21 +1319,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
*
* @gpc: struct gfn_to_pfn_cache object.
* @kvm: pointer to kvm instance.
- * @vcpu: vCPU to be used for marking pages dirty and to be woken on
- * invalidation.
- * @usage: indicates if the resulting host physical PFN is used while
- * the @vcpu is IN_GUEST_MODE (in which case invalidation of
- * the cache from MMU notifiers---but not for KVM memslot
- * changes!---will also force @vcpu to exit the guest and
- * refresh the cache); and/or if the PFN used directly
- * by KVM (and thus needs a kernel virtual mapping).
*
* This sets up a gfn_to_pfn_cache by initializing locks and assigning the
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
* the caller before init).
*/
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
/**
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 9d1f7835d8c1..d93f6522b2c3 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -49,12 +49,6 @@ typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
-enum pfn_cache_usage {
- KVM_GUEST_USES_PFN = BIT(0),
- KVM_HOST_USES_PFN = BIT(1),
- KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
-};
-
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -69,13 +63,11 @@ struct gfn_to_pfn_cache {
unsigned long uhva;
struct kvm_memory_slot *memslot;
struct kvm *kvm;
- struct kvm_vcpu *vcpu;
struct list_head list;
rwlock_t lock;
struct mutex refresh_lock;
void *khva;
kvm_pfn_t pfn;
- enum pfn_cache_usage usage;
bool active;
bool valid;
};
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index f3571f44d9af..6f4b537eb25b 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -25,9 +25,7 @@
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
unsigned long end, bool may_block)
{
- DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
- bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -37,43 +35,10 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
gpc->uhva >= start && gpc->uhva < end) {
gpc->valid = false;
-
- /*
- * If a guest vCPU could be using the physical address,
- * it needs to be forced out of guest mode.
- */
- if (gpc->usage & KVM_GUEST_USES_PFN) {
- if (!evict_vcpus) {
- evict_vcpus = true;
- bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
- }
- __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
- }
}
write_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
-
- if (evict_vcpus) {
- /*
- * KVM needs to ensure the vCPU is fully out of guest context
- * before allowing the invalidation to continue.
- */
- unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
- bool called;
-
- /*
- * If the OOM reaper is active, then all vCPUs should have
- * been stopped already, so perform the request without
- * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
- */
- if (!may_block)
- req &= ~KVM_REQUEST_WAIT;
-
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
-
- WARN_ON_ONCE(called && !may_block);
- }
}
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
@@ -206,16 +171,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
* pfn. Note, kmap() and memremap() can both sleep, so this
* too must be done outside of gpc->lock!
*/
- if (gpc->usage & KVM_HOST_USES_PFN) {
- if (new_pfn == gpc->pfn)
- new_khva = old_khva;
- else
- new_khva = gpc_map(new_pfn);
-
- if (!new_khva) {
- kvm_release_pfn_clean(new_pfn);
- goto out_error;
- }
+ if (new_pfn == gpc->pfn)
+ new_khva = old_khva;
+ else
+ new_khva = gpc_map(new_pfn);
+
+ if (!new_khva) {
+ kvm_release_pfn_clean(new_pfn);
+ goto out_error;
}
write_lock_irq(&gpc->lock);
@@ -346,18 +309,12 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
return __kvm_gpc_refresh(gpc, gpc->gpa, len);
}
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
- struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
{
- WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
- WARN_ON_ONCE((usage & KVM_GUEST_USES_PFN) && !vcpu);
-
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
gpc->kvm = kvm;
- gpc->vcpu = vcpu;
- gpc->usage = usage;
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
}
--
2.39.2
next prev parent reply other threads:[~2024-01-15 13:38 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-15 12:56 [PATCH v12 00/20] KVM: xen: update shared_info and vcpu_info handling Paul Durrant
2024-01-15 12:56 ` [PATCH v12 01/20] KVM: pfncache: Add a map helper function Paul Durrant
2024-01-15 12:56 ` [PATCH v12 02/20] KVM: pfncache: remove unnecessary exports Paul Durrant
2024-01-15 12:56 ` [PATCH v12 03/20] KVM: xen: mark guest pages dirty with the pfncache lock held Paul Durrant
2024-02-07 3:17 ` Sean Christopherson
2024-02-07 3:26 ` David Woodhouse
2024-02-07 15:15 ` Sean Christopherson
2024-02-07 8:48 ` Paul Durrant
2024-01-15 12:56 ` [PATCH v12 04/20] KVM: pfncache: add a mark-dirty helper Paul Durrant
2024-02-07 3:20 ` Sean Christopherson
2024-02-07 8:47 ` Paul Durrant
2024-02-09 15:58 ` Sean Christopherson
2024-02-09 16:05 ` Paul Durrant
2024-01-15 12:56 ` Paul Durrant [this message]
2024-01-15 12:56 ` [PATCH v12 06/20] KVM: pfncache: stop open-coding offset_in_page() Paul Durrant
2024-01-15 12:56 ` [PATCH v12 07/20] KVM: pfncache: include page offset in uhva and use it consistently Paul Durrant
2024-01-15 12:56 ` [PATCH v12 08/20] KVM: pfncache: allow a cache to be activated with a fixed (userspace) HVA Paul Durrant
2024-02-07 4:03 ` Sean Christopherson
2024-02-07 4:13 ` David Woodhouse
2024-02-14 16:01 ` Sean Christopherson
2024-02-14 16:09 ` Paul Durrant
2024-02-14 15:21 ` Paul Durrant
2024-02-14 16:20 ` Sean Christopherson
2024-02-14 16:33 ` Paul Durrant
2024-01-15 12:56 ` [PATCH v12 09/20] KVM: xen: separate initialization of shared_info cache and content Paul Durrant
2024-01-15 12:56 ` [PATCH v12 10/20] KVM: xen: re-initialize shared_info if guest (32/64-bit) mode is set Paul Durrant
2024-01-15 12:56 ` [PATCH v12 11/20] KVM: xen: allow shared_info to be mapped by fixed HVA Paul Durrant
2024-02-07 4:10 ` Sean Christopherson
2024-02-07 8:53 ` Paul Durrant
2024-02-08 8:52 ` Paul Durrant
2024-02-08 16:48 ` Sean Christopherson
2024-02-08 16:51 ` Paul Durrant
2024-02-08 17:26 ` David Woodhouse
2024-02-09 16:01 ` Sean Christopherson
2024-01-15 12:56 ` [PATCH v12 12/20] KVM: xen: allow vcpu_info " Paul Durrant
2024-01-15 12:57 ` [PATCH v12 13/20] KVM: selftests / xen: map shared_info using HVA rather than GFN Paul Durrant
2024-02-07 4:14 ` Sean Christopherson
2024-02-07 8:54 ` Paul Durrant
2024-02-07 14:58 ` Sean Christopherson
2024-01-15 12:57 ` [PATCH v12 14/20] KVM: selftests / xen: re-map vcpu_info using HVA rather than GPA Paul Durrant
2024-01-15 12:57 ` [PATCH v12 15/20] KVM: xen: advertize the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA capability Paul Durrant
2024-01-15 12:57 ` [PATCH v12 16/20] KVM: xen: split up kvm_xen_set_evtchn_fast() Paul Durrant
2024-01-15 12:57 ` [PATCH v12 17/20] KVM: xen: don't block on pfncache locks in kvm_xen_set_evtchn_fast() Paul Durrant
2024-02-07 4:17 ` Sean Christopherson
2024-02-07 4:21 ` David Woodhouse
2024-01-15 12:57 ` [PATCH v12 18/20] KVM: pfncache: check the need for invalidation under read lock first Paul Durrant
2024-02-07 4:22 ` Sean Christopherson
2024-02-07 4:27 ` David Woodhouse
2024-02-07 4:47 ` Sean Christopherson
2024-02-07 4:59 ` David Woodhouse
2024-02-07 15:10 ` Sean Christopherson
2024-01-15 12:57 ` [PATCH v12 19/20] KVM: xen: allow vcpu_info content to be 'safely' copied Paul Durrant
2024-01-15 12:57 ` [PATCH v12 20/20] KVM: pfncache: rework __kvm_gpc_refresh() to fix locking issues Paul Durrant
2024-01-25 15:03 ` [PATCH v12 00/20] KVM: xen: update shared_info and vcpu_info handling Paul Durrant
2024-01-25 20:07 ` David Woodhouse
2024-01-26 1:19 ` Sean Christopherson
2024-02-02 17:37 ` Paul Durrant
2024-02-02 22:03 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240115125707.1183-6-paul@xen.org \
--to=paul@xen.org \
--cc=bp@alien8.de \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=dwmw2@infradead.org \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=shuah@kernel.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).