All of lore.kernel.org
 help / color / mirror / Atom feed
From: Roger Pau Monne <roger.pau@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Kevin Tian <kevin.tian@intel.com>, Wei Liu <wei.liu2@citrix.com>,
	Jan Beulich <jbeulich@suse.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Tim Deegan <tim@xen.org>, Jun Nakajima <jun.nakajima@intel.com>,
	Roger Pau Monne <roger.pau@citrix.com>
Subject: [PATCH v2 6/7] x86/mm: handle foreign mappings in p2m_entry_modify
Date: Mon, 11 Feb 2019 18:46:41 +0100	[thread overview]
Message-ID: <20190211174642.38046-7-roger.pau@citrix.com> (raw)
In-Reply-To: <20190211174642.38046-1-roger.pau@citrix.com>

So that the specific handling can be removed from
atomic_write_ept_entry and be shared with npt and shadow code.

This commit also removes the check that prevent non-ept PVH dom0 from
mapping foreign pages.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Tim Deegan <tim@xen.org>
---
Changes since v1:
 - Simply code since mfn_to_page cannot return NULL.
 - Check if the mfn is valid before getting/dropping the page reference.
 - Use BUG_ON instead of ASSERTs, since getting the reference counting
   wrong is more dangerous than a DoS.
---
 xen/arch/x86/mm/hap/hap.c       |   3 +-
 xen/arch/x86/mm/p2m-ept.c       | 108 +++++++-------------------------
 xen/arch/x86/mm/p2m-pt.c        |   7 ---
 xen/arch/x86/mm/shadow/common.c |   3 +-
 xen/include/asm-x86/p2m.h       |  17 ++++-
 5 files changed, 40 insertions(+), 98 deletions(-)

diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index dc46d5e14f..4f52639be5 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -735,7 +735,8 @@ hap_write_p2m_entry(struct domain *d, unsigned long gfn, l1_pgentry_t *p,
     }
 
     p2m_entry_modify(p2m_get_hostp2m(d), p2m_flags_to_type(l1e_get_flags(new)),
-                     p2m_flags_to_type(old_flags), level);
+                     p2m_flags_to_type(old_flags), l1e_get_mfn(new),
+                     l1e_get_mfn(*p), level);
 
     safe_write_pte(p, new);
     if ( old_flags & _PAGE_PRESENT )
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 0ece6608cb..2b0c3ab265 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -45,65 +45,13 @@ static inline bool_t is_epte_valid(ept_entry_t *e)
     return ((e->epte & ~(1ul << 63)) != 0 && e->sa_p2mt != p2m_invalid);
 }
 
-/* returns : 0 for success, -errno otherwise */
-static int atomic_write_ept_entry(struct p2m_domain *p2m,
-                                  ept_entry_t *entryptr, ept_entry_t new,
-                                  int level)
+static void atomic_write_ept_entry(struct p2m_domain *p2m,
+                                   ept_entry_t *entryptr, ept_entry_t new,
+                                   int level)
 {
-    int rc;
-    unsigned long oldmfn = mfn_x(INVALID_MFN);
-    bool_t check_foreign = (new.mfn != entryptr->mfn ||
-                            new.sa_p2mt != entryptr->sa_p2mt);
-
-    if ( level )
-    {
-        ASSERT(!is_epte_superpage(&new) || !p2m_is_foreign(new.sa_p2mt));
-        write_atomic(&entryptr->epte, new.epte);
-        return 0;
-    }
-
-    if ( unlikely(p2m_is_foreign(new.sa_p2mt)) )
-    {
-        rc = -EINVAL;
-        if ( !is_epte_present(&new) )
-                goto out;
-
-        if ( check_foreign )
-        {
-            struct domain *fdom;
-
-            if ( !mfn_valid(_mfn(new.mfn)) )
-                goto out;
-
-            rc = -ESRCH;
-            fdom = page_get_owner(mfn_to_page(_mfn(new.mfn)));
-            if ( fdom == NULL )
-                goto out;
-
-            /* get refcount on the page */
-            rc = -EBUSY;
-            if ( !get_page(mfn_to_page(_mfn(new.mfn)), fdom) )
-                goto out;
-        }
-    }
-
-    if ( unlikely(p2m_is_foreign(entryptr->sa_p2mt)) && check_foreign )
-        oldmfn = entryptr->mfn;
-
-    p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, level);
-
+    p2m_entry_modify(p2m, new.sa_p2mt, entryptr->sa_p2mt, _mfn(new.mfn),
+                     _mfn(entryptr->mfn), level);
     write_atomic(&entryptr->epte, new.epte);
-
-    if ( unlikely(oldmfn != mfn_x(INVALID_MFN)) )
-        put_page(mfn_to_page(_mfn(oldmfn)));
-
-    rc = 0;
-
- out:
-    if ( rc )
-        gdprintk(XENLOG_ERR, "epte o:%"PRIx64" n:%"PRIx64" rc:%d\n",
-                 entryptr->epte, new.epte, rc);
-    return rc;
 }
 
 static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry,
@@ -396,7 +344,6 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t read_only,
 static bool_t ept_invalidate_emt(struct p2m_domain *p2m, mfn_t mfn,
                                  bool_t recalc, int level)
 {
-    int rc;
     ept_entry_t *epte = map_domain_page(mfn);
     unsigned int i;
     bool_t changed = 0;
@@ -412,8 +359,7 @@ static bool_t ept_invalidate_emt(struct p2m_domain *p2m, mfn_t mfn,
         e.emt = MTRR_NUM_TYPES;
         if ( recalc )
             e.recalc = 1;
-        rc = atomic_write_ept_entry(p2m, &epte[i], e, level);
-        ASSERT(rc == 0);
+        atomic_write_ept_entry(p2m, &epte[i], e, level);
         changed = 1;
     }
 
@@ -437,7 +383,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
     ept_entry_t *table;
     unsigned long gfn_remainder = first_gfn;
     unsigned int i, index;
-    int wrc, rc = 0, ret = GUEST_TABLE_MAP_FAILED;
+    int rc = 0, ret = GUEST_TABLE_MAP_FAILED;
 
     table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     for ( i = p2m->ept.wl; i > target; --i )
@@ -463,8 +409,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
             rc = -ENOMEM;
             goto out;
         }
-        wrc = atomic_write_ept_entry(p2m, &table[index], split_ept_entry, i);
-        ASSERT(wrc == 0);
+        atomic_write_ept_entry(p2m, &table[index], split_ept_entry, i);
 
         for ( ; i > target; --i )
             if ( !ept_next_level(p2m, 1, &table, &gfn_remainder, i) )
@@ -483,8 +428,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
         {
             e.emt = MTRR_NUM_TYPES;
             e.recalc = 1;
-            wrc = atomic_write_ept_entry(p2m, &table[index], e, target);
-            ASSERT(wrc == 0);
+            atomic_write_ept_entry(p2m, &table[index], e, target);
             rc = 1;
         }
     }
@@ -513,7 +457,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
     unsigned int level = ept->wl;
     unsigned long mfn = ept->mfn;
     ept_entry_t *epte;
-    int wrc, rc = 0;
+    int rc = 0;
 
     if ( !mfn )
         return 0;
@@ -557,8 +501,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
                         ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt, e.access);
                     }
                     e.recalc = 0;
-                    wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
-                    ASSERT(wrc == 0);
+                    atomic_write_ept_entry(p2m, &epte[i], e, level);
                 }
             }
             else
@@ -593,8 +536,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
                 {
                     if ( ept_split_super_page(p2m, &e, level, level - 1) )
                     {
-                        wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
-                        ASSERT(wrc == 0);
+                        atomic_write_ept_entry(p2m, &epte[i], e, level);
                         unmap_domain_page(epte);
                         mfn = e.mfn;
                         continue;
@@ -608,8 +550,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
                 e.recalc = 0;
                 if ( recalc && p2m_is_changeable(e.sa_p2mt) )
                     ept_p2m_type_to_flags(p2m, &e, e.sa_p2mt, e.access);
-                wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
-                ASSERT(wrc == 0);
+                atomic_write_ept_entry(p2m, &epte[i], e, level);
             }
 
             rc = 1;
@@ -623,8 +564,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, unsigned long gfn)
             smp_wmb();
             e.emt = 0;
             e.recalc = 0;
-            wrc = atomic_write_ept_entry(p2m, &epte[i], e, level);
-            ASSERT(wrc == 0);
+            atomic_write_ept_entry(p2m, &epte[i], e, level);
             unmap_domain_page(epte);
             rc = 1;
         }
@@ -784,8 +724,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
 
         /* now install the newly split ept sub-tree */
         /* NB: please make sure domian is paused and no in-fly VT-d DMA. */
-        rc = atomic_write_ept_entry(p2m, ept_entry, split_ept_entry, i);
-        ASSERT(rc == 0);
+        atomic_write_ept_entry(p2m, ept_entry, split_ept_entry, i);
 
         /* then move to the level we want to make real changes */
         for ( ; i > target; i-- )
@@ -831,18 +770,13 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         new_entry.suppress_ve = is_epte_valid(&old_entry) ?
                                     old_entry.suppress_ve : 1;
 
-    rc = atomic_write_ept_entry(p2m, ept_entry, new_entry, target);
-    if ( unlikely(rc) )
-        old_entry.epte = 0;
-    else
-    {
-        entry_written = 1;
+    atomic_write_ept_entry(p2m, ept_entry, new_entry, target);
+    entry_written = 1;
 
-        if ( p2mt != p2m_invalid &&
-             (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
-            /* Track the highest gfn for which we have ever had a valid mapping */
-            p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
-    }
+    if ( p2mt != p2m_invalid &&
+         (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+        /* Track the highest gfn for which we have ever had a valid mapping */
+        p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
 
 out:
     if ( needs_sync )
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index fd6386b8fd..aea8110254 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -523,13 +523,6 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
         __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
     }
 
-    if ( unlikely(p2m_is_foreign(p2mt)) )
-    {
-        /* hvm fixme: foreign types are only supported on ept at present */
-        gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
-        return -EINVAL;
-    }
-
     /* Carry out any eventually pending earlier changes first. */
     rc = do_recalc(p2m, gfn);
     if ( rc < 0 )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 6d8a950054..01fe81230a 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3181,7 +3181,8 @@ shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
          sh_unshadow_for_p2m_change(d, gfn, p, new, level);
 
     p2m_entry_modify(p2m_get_hostp2m(d), p2m_flags_to_type(l1e_get_flags(new)),
-                     p2m_flags_to_type(l1e_get_flags(*p)), level);
+                     p2m_flags_to_type(l1e_get_flags(*p)), l1e_get_mfn(new),
+                     l1e_get_mfn(*p), level);
 
     /* Update the entry with new content */
     safe_write_pte(p, new);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 834d49d2d4..0de27239be 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -933,9 +933,12 @@ struct hvm_ioreq_server *p2m_get_ioreq_server(struct domain *d,
                                               unsigned int *flags);
 
 static inline void p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
-                                    p2m_type_t ot, unsigned int level)
+                                    p2m_type_t ot, mfn_t nfn, mfn_t ofn,
+                                    unsigned int level)
 {
-    if ( level != 1 || nt == ot )
+    BUG_ON(level > 1 && (nt == p2m_ioreq_server || nt == p2m_map_foreign));
+
+    if ( level != 1 || (nt == ot && mfn_eq(nfn, ofn)) )
         return;
 
     switch ( nt )
@@ -948,6 +951,11 @@ static inline void p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
         p2m->ioreq.entry_count++;
         break;
 
+    case p2m_map_foreign:
+        BUG_ON(!mfn_valid(nfn) ||
+               !page_get_owner_and_reference(mfn_to_page(nfn)));
+        break;
+
     default:
         break;
     }
@@ -959,6 +967,11 @@ static inline void p2m_entry_modify(struct p2m_domain *p2m, p2m_type_t nt,
         p2m->ioreq.entry_count--;
         break;
 
+    case p2m_map_foreign:
+        BUG_ON(!mfn_valid(ofn));
+        put_page(mfn_to_page(ofn));
+        break;
+
     default:
         break;
     }
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-02-11 17:47 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-11 17:46 [PATCH for-4.12 v2 0/7] pvh/dom0/shadow/amd fixes Roger Pau Monne
2019-02-11 17:46 ` [PATCH for-4.12 v2 1/7] dom0/pvh: align allocation and mapping order to start address Roger Pau Monne
2019-02-12 10:52   ` Wei Liu
2019-02-13 15:32   ` Jan Beulich
2019-02-11 17:46 ` [PATCH for-4.12 v2 2/7] amd/npt/shadow: replace assert that prevents creating 2M/1G MMIO entries Roger Pau Monne
2019-02-13 15:53   ` Jan Beulich
2019-02-14 13:59     ` Roger Pau Monné
2019-02-14 14:48       ` Jan Beulich
2019-02-11 17:46 ` [PATCH for-4.12 v2 3/7] x86/pvh: reorder PVH dom0 iommu initialization Roger Pau Monne
2019-02-13 15:58   ` Jan Beulich
2019-02-11 17:46 ` [PATCH for-4.12 v2 4/7] pvh/dom0: warn when dom0_mem is not set Roger Pau Monne
2019-02-12 10:52   ` Wei Liu
2019-02-13 16:01   ` Jan Beulich
2019-02-13 17:13     ` Roger Pau Monné
2019-02-14  8:09       ` Jan Beulich
2019-02-11 17:46 ` [PATCH v2 5/7] x86/mm: split p2m ioreq server pages special handling into helper Roger Pau Monne
2019-02-13 10:23   ` Paul Durrant
2019-02-11 17:46 ` Roger Pau Monne [this message]
2019-02-14 11:25   ` [PATCH v2 6/7] x86/mm: handle foreign mappings in p2m_entry_modify Jan Beulich
2019-02-14 12:12     ` Roger Pau Monné
2019-02-14 12:25       ` Jan Beulich
2019-02-11 17:46 ` [PATCH v2 7/7] npt/shadow: allow getting foreign page table entries Roger Pau Monne
2019-02-14 11:38   ` Jan Beulich
2019-02-14 12:16     ` Roger Pau Monné
2019-02-14 12:30       ` Jan Beulich
2019-02-11 19:50 ` [PATCH for-4.12 v2 0/7] pvh/dom0/shadow/amd fixes Sander Eikelenboom

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190211174642.38046-7-roger.pau@citrix.com \
    --to=roger.pau@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.