All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Vrabel <david.vrabel@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Jan Beulich <jbeulich@suse.com>,
	David Vrabel <david.vrabel@citrix.com>,
	Jun Nakajima <jun.nakajima@intel.com>
Subject: [PATCHv1 3/3] x86/ept: defer the invalidation until the p2m lock is released
Date: Fri, 6 Nov 2015 17:37:17 +0000	[thread overview]
Message-ID: <1446831437-5897-4-git-send-email-david.vrabel@citrix.com> (raw)
In-Reply-To: <1446831437-5897-1-git-send-email-david.vrabel@citrix.com>

Holding the p2m lock while calling ept_sync_domain() is very expensive
since it does a on_selected_cpus() call.  IPIs on many socket machines
can be very slows and on_selected_cpus() is serialized.

Defer the invalidate until the p2m lock is released.  Since the processor
may cache partial translations, we also need to make sure any page table
pages to be freed are not freed until the invalidate is complete.  Such
pages are temporarily stored an per-PCPU list.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
---
 xen/arch/x86/mm/mm-locks.h | 23 +++++++++++++++--------
 xen/arch/x86/mm/p2m-ept.c  | 17 ++++++++++++++++-
 xen/arch/x86/mm/p2m.c      | 37 +++++++++++++++++++++++++++++++++++--
 xen/include/asm-x86/p2m.h  |  6 ++++++
 4 files changed, 72 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 76c7217..473aaab 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -263,14 +263,21 @@ declare_mm_lock(altp2mlist)
  */
 
 declare_mm_rwlock(altp2m);
-#define p2m_lock(p)                         \
-{                                           \
-    if ( p2m_is_altp2m(p) )                 \
-        mm_write_lock(altp2m, &(p)->lock);  \
-    else                                    \
-        mm_write_lock(p2m, &(p)->lock);     \
-}
-#define p2m_unlock(p)         mm_write_unlock(&(p)->lock);
+#define p2m_lock(p)                             \
+    do {                                        \
+        if ( p2m_is_altp2m(p) )                 \
+            mm_write_lock(altp2m, &(p)->lock);  \
+        else                                    \
+            mm_write_lock(p2m, &(p)->lock);     \
+        (p)->defer_flush++;                     \
+    } while (0)
+#define p2m_unlock(p)                                                   \
+    do {                                                                \
+        bool_t need_flush = --(p)->defer_flush == 0 && (p)->need_flush; \
+        mm_write_unlock(&(p)->lock);                                    \
+        if (need_flush && (p)->flush)                                   \
+            (p)->flush(p);                                              \
+    } while (0)
 #define gfn_lock(p,g,o)       p2m_lock(p)
 #define gfn_unlock(p,g,o)     p2m_unlock(p)
 #define p2m_read_lock(p)      mm_read_lock(p2m, &(p)->lock)
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a41d7d2..a573c14 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -263,7 +263,7 @@ static void ept_free_entry(struct p2m_domain *p2m, ept_entry_t *ept_entry, int l
         unmap_domain_page(epte);
     }
     
-    p2m_free_ptp(p2m, mfn_to_page(ept_entry->mfn));
+    p2m_free_ptp_defer(p2m, mfn_to_page(ept_entry->mfn));
 }
 
 static bool_t ept_split_super_page(struct p2m_domain *p2m,
@@ -1103,6 +1103,14 @@ void ept_sync_domain(struct p2m_domain *p2m)
     if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
         return;
 
+    if ( p2m->defer_flush )
+    {
+        p2m->need_flush = 1;
+        return;
+    }
+    else
+        p2m->need_flush = 0;
+
     ASSERT(local_irq_is_enabled());
 
     if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) )
@@ -1121,6 +1129,12 @@ void ept_sync_domain(struct p2m_domain *p2m)
                      __ept_sync_domain, p2m, 1);
 }
 
+static void ept_flush(struct p2m_domain *p2m)
+{
+    ept_sync_domain(p2m);
+    p2m_free_deferred_ptp(p2m);
+}
+
 static void ept_enable_pml(struct p2m_domain *p2m)
 {
     /* Domain must have been paused */
@@ -1169,6 +1183,7 @@ int ept_p2m_init(struct p2m_domain *p2m)
     p2m->change_entry_type_range = ept_change_entry_type_range;
     p2m->memory_type_changed = ept_memory_type_changed;
     p2m->audit_p2m = NULL;
+    p2m->flush = ept_flush;
 
     /* Set the memory type used when accessing EPT paging structures. */
     ept->ept_mt = EPT_DEFAULT_MT;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e13672d..2ad1de4 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -504,6 +504,26 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
     return;
 }
 
+DEFINE_PER_CPU(struct page_list_head, p2m_deferred_free_pages);
+
+void p2m_free_ptp_defer(struct p2m_domain *p2m, struct page_info *pg)
+{
+    page_list_del(pg, &p2m->pages);
+    page_list_add(pg, &this_cpu(p2m_deferred_free_pages));
+}
+
+void p2m_free_deferred_ptp(struct p2m_domain *p2m)
+{
+    struct page_list_head *list = &this_cpu(p2m_deferred_free_pages);
+    struct page_info *pg, *tmp;
+
+    page_list_for_each_safe(pg, tmp, list)
+    {
+        page_list_del(pg, list);
+        p2m->domain->arch.paging.free_page(p2m->domain, pg);
+    }
+}
+
 /*
  * Allocate a new p2m table for a domain.
  *
@@ -2827,20 +2847,33 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
                  "gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
                  gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
 
-    put_page(page);
-
     /*
      * This put_gfn for the above get_gfn for prev_mfn.  We must do this
      * after set_foreign_p2m_entry so another cpu doesn't populate the gpfn
      * before us.
      */
     put_gfn(tdom, gpfn);
+    if ( prev_page )
+        put_page(prev_page);
+    put_page(page);
 
 out:
     if ( fdom )
         rcu_unlock_domain(fdom);
     return rc;
 }
+
+int p2m_setup(void)
+{
+    unsigned int cpu;
+
+    for_each_present_cpu(cpu)
+        INIT_PAGE_LIST_HEAD(&per_cpu(p2m_deferred_free_pages, cpu));
+
+    return 0;
+}
+__initcall(p2m_setup);
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d748557..f572a1b 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -254,6 +254,10 @@ struct p2m_domain {
                                           unsigned long gfn, l1_pgentry_t *p,
                                           l1_pgentry_t new, unsigned int level);
     long               (*audit_p2m)(struct p2m_domain *p2m);
+    void               (*flush)(struct p2m_domain *p2m);
+
+    unsigned int defer_flush;
+    bool_t need_flush;
 
     /* Default P2M access type for each page in the the domain: new pages,
      * swapped in pages, cleared pages, and pages that are ambiguously
@@ -681,6 +685,8 @@ static inline bool_t p2m_mem_access_sanity_check(struct domain *d)
 
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
+void p2m_free_ptp_defer(struct p2m_domain *p2m, struct page_info *pg);
+void p2m_free_deferred_ptp(struct p2m_domain *p2m);
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-- 
2.1.4

  parent reply	other threads:[~2015-11-06 17:37 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-06 17:37 [RFC PATCHv1 0/3]: x86/ept: reduce translation invalidation impact David Vrabel
2015-11-06 17:37 ` [PATCHv1 1/3] x86/ept: remove unnecessary sync after resolving misconfigured entries David Vrabel
2015-11-06 18:29   ` Andrew Cooper
2015-11-10 12:22   ` Jan Beulich
2015-11-12 16:18     ` David Vrabel
2015-11-12 16:30       ` Jan Beulich
2015-11-06 17:37 ` [PATCHv1 2/3] mm: don't free pages until mm locks are released David Vrabel
2015-11-06 17:37 ` David Vrabel [this message]
2015-11-06 18:39   ` [PATCHv1 3/3] x86/ept: defer the invalidation until the p2m lock is released Andrew Cooper
2015-11-09 14:13   ` Jan Beulich
2015-11-10 13:35   ` Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1446831437-5897-4-git-send-email-david.vrabel@citrix.com \
    --to=david.vrabel@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.