From: Roger Pau Monne <roger.pau@citrix.com>
To: <xen-devel@lists.xenproject.org>
Cc: Wei Liu <wl@xen.org>, George Dunlap <george.dunlap@eu.citrix.com>,
Andrew Cooper <andrew.cooper3@citrix.com>,
Tim Deegan <tim@xen.org>, Jan Beulich <jbeulich@suse.com>,
Roger Pau Monne <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v4 2/7] x86/paging: add TLB flush hooks
Date: Mon, 10 Feb 2020 18:28:24 +0100 [thread overview]
Message-ID: <20200210172829.43604-3-roger.pau@citrix.com> (raw)
In-Reply-To: <20200210172829.43604-1-roger.pau@citrix.com>
Add shadow and hap implementation specific helpers to perform guest
TLB flushes. Note that the code for both is exactly the same at the
moment, and is copied from hvm_flush_vcpu_tlb. This will be changed by
further patches that will add implementation specific optimizations to
them.
No functional change intended.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Wei Liu <wl@xen.org>
---
Changes since v3:
- Fix stray newline removal.
- Fix return of shadow_flush_tlb dummy function.
---
xen/arch/x86/hvm/hvm.c | 51 ++----------------------------
xen/arch/x86/mm/hap/hap.c | 54 ++++++++++++++++++++++++++++++++
xen/arch/x86/mm/shadow/common.c | 55 +++++++++++++++++++++++++++++++++
xen/include/asm-x86/hap.h | 3 ++
xen/include/asm-x86/shadow.h | 12 +++++++
5 files changed, 127 insertions(+), 48 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 00a9e70b7c..4049f57232 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3990,55 +3990,10 @@ static void hvm_s3_resume(struct domain *d)
bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
void *ctxt)
{
- static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
- cpumask_t *mask = &this_cpu(flush_cpumask);
- struct domain *d = current->domain;
- struct vcpu *v;
-
- /* Avoid deadlock if more than one vcpu tries this at the same time. */
- if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return false;
-
- /* Pause all other vcpus. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_pause_nosync(v);
-
- /* Now that all VCPUs are signalled to deschedule, we wait... */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- while ( !vcpu_runnable(v) && v->is_running )
- cpu_relax();
-
- /* All other vcpus are paused, safe to unlock now. */
- spin_unlock(&d->hypercall_deadlock_mutex);
-
- cpumask_clear(mask);
-
- /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
- for_each_vcpu ( d, v )
- {
- unsigned int cpu;
-
- if ( !flush_vcpu(ctxt, v) )
- continue;
-
- paging_update_cr3(v, false);
+ struct domain *currd = current->domain;
- cpu = read_atomic(&v->dirty_cpu);
- if ( is_vcpu_dirty_cpu(cpu) )
- __cpumask_set_cpu(cpu, mask);
- }
-
- /* Flush TLBs on all CPUs with dirty vcpu state. */
- flush_tlb_mask(mask);
-
- /* Done. */
- for_each_vcpu ( d, v )
- if ( v != current && flush_vcpu(ctxt, v) )
- vcpu_unpause(v);
-
- return true;
+ return shadow_mode_enabled(currd) ? shadow_flush_tlb(flush_vcpu, ctxt)
+ : hap_flush_tlb(flush_vcpu, ctxt);
}
static bool always_flush(void *ctxt, struct vcpu *v)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 3d93f3451c..6894c1aa38 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -669,6 +669,60 @@ static void hap_update_cr3(struct vcpu *v, int do_locking, bool noflush)
hvm_update_guest_cr3(v, noflush);
}
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
const struct paging_mode *
hap_paging_get_mode(struct vcpu *v)
{
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 6212ec2c4a..ee90e55b41 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3357,6 +3357,61 @@ out:
return rc;
}
+/* Fluhs TLB of selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
+{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
+ struct domain *d = current->domain;
+ struct vcpu *v;
+
+ /* Avoid deadlock if more than one vcpu tries this at the same time. */
+ if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
+ return false;
+
+ /* Pause all other vcpus. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_pause_nosync(v);
+
+ /* Now that all VCPUs are signalled to deschedule, we wait... */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ while ( !vcpu_runnable(v) && v->is_running )
+ cpu_relax();
+
+ /* All other vcpus are paused, safe to unlock now. */
+ spin_unlock(&d->hypercall_deadlock_mutex);
+
+ cpumask_clear(mask);
+
+ /* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
+ for_each_vcpu ( d, v )
+ {
+ unsigned int cpu;
+
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
+ paging_update_cr3(v, false);
+
+ cpu = read_atomic(&v->dirty_cpu);
+ if ( is_vcpu_dirty_cpu(cpu) )
+ __cpumask_set_cpu(cpu, mask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
+
+ /* Done. */
+ for_each_vcpu ( d, v )
+ if ( v != current && flush_vcpu(ctxt, v) )
+ vcpu_unpause(v);
+
+ return true;
+}
+
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index b94bfb4ed0..0c6aa26b9b 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -46,6 +46,9 @@ int hap_track_dirty_vram(struct domain *d,
extern const struct paging_mode *hap_paging_get_mode(struct vcpu *);
int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
+bool hap_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#endif /* XEN_HAP_H */
/*
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 907c71f497..cfd4650a16 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -95,6 +95,10 @@ void shadow_blow_tables_per_domain(struct domain *d);
int shadow_set_allocation(struct domain *d, unsigned int pages,
bool *preempted);
+/* Flush the TLB of the selected vCPUs. */
+bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
#else /* !CONFIG_SHADOW_PAGING */
#define shadow_teardown(d, p) ASSERT(is_pv_domain(d))
@@ -106,6 +110,14 @@ int shadow_set_allocation(struct domain *d, unsigned int pages,
#define shadow_set_allocation(d, pages, preempted) \
({ ASSERT_UNREACHABLE(); -EOPNOTSUPP; })
+static inline bool shadow_flush_tlb(bool (*flush_vcpu)(void *ctxt,
+ struct vcpu *v),
+ void *ctxt)
+{
+ ASSERT_UNREACHABLE();
+ return false;
+}
+
static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn,
int fast, int all) {}
--
2.25.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
next prev parent reply other threads:[~2020-02-10 17:29 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-10 17:28 [Xen-devel] [PATCH v4 0/7] x86: improve assisted tlb flush and use it in guest mode Roger Pau Monne
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 1/7] x86/hvm: allow ASID flush when v != current Roger Pau Monne
2020-02-10 17:28 ` Roger Pau Monne [this message]
2020-02-13 9:02 ` [Xen-devel] [PATCH v4 2/7] x86/paging: add TLB flush hooks Tim Deegan
2020-02-13 9:03 ` Tim Deegan
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 3/7] x86/hap: improve hypervisor assisted guest TLB flush Roger Pau Monne
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 4/7] x86/tlb: introduce a flush guests TLB flag Roger Pau Monne
2020-02-13 9:02 ` Tim Deegan
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 5/7] x86/tlb: allow disabling the TLB clock Roger Pau Monne
2020-02-10 20:09 ` Wei Liu
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 6/7] xen/guest: prepare hypervisor ops to use alternative calls Roger Pau Monne
2020-02-10 20:10 ` Wei Liu
2020-02-11 9:52 ` Durrant, Paul
2020-02-10 17:28 ` [Xen-devel] [PATCH v4 7/7] x86/tlb: use Xen L0 assisted TLB flush when available Roger Pau Monne
2020-02-10 20:16 ` Wei Liu
2020-02-11 10:34 ` Wei Liu
2020-02-11 14:06 ` Roger Pau Monné
2020-02-11 14:08 ` Wei Liu
2020-02-18 12:42 ` Roger Pau Monné
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200210172829.43604-3-roger.pau@citrix.com \
--to=roger.pau@citrix.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=tim@xen.org \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).