xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Quan Xu <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Kevin Tian <kevin.tian@intel.com>, Keir Fraser <keir@xen.org>,
	Jan Beulich <jbeulich@suse.com>,
	George Dunlap <george.dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	dario.faggioli@citrix.com, Jun Nakajima <jun.nakajima@intel.com>,
	Quan Xu <quan.xu@intel.com>
Subject: [PATCH v4 03/10] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping
Date: Fri,  6 May 2016 16:54:33 +0800	[thread overview]
Message-ID: <1462524880-67205-4-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1462524880-67205-1-git-send-email-quan.xu@intel.com>

When IOMMU mapping is failed, we issue a best effort rollback, stopping
IOMMU mapping, unmapping the previous IOMMU maps and then reporting the
error up to the call trees. When rollback is not feasible (in early
initialization phase or trade-off of complexity) for the hardware domain,
we do things on a best effort basis, only throwing out an error message.

IOMMU unmapping should perhaps continue despite an error, in an attempt
to do best effort cleanup.

Signed-off-by: Quan Xu <quan.xu@intel.com>

CC: Keir Fraser <keir@xen.org>
CC: Jan Beulich <jbeulich@suse.com>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: George Dunlap <george.dunlap@eu.citrix.com>
---
 xen/arch/x86/mm.c               | 13 ++++++++-----
 xen/arch/x86/mm/p2m-ept.c       | 40 ++++++++++++++++++++++++++++++++--------
 xen/arch/x86/mm/p2m-pt.c        | 26 ++++++++++++++++++++++----
 xen/arch/x86/mm/p2m.c           | 11 +++++++++--
 xen/drivers/passthrough/iommu.c | 14 +++++++++++++-
 5 files changed, 84 insertions(+), 20 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 2bb920b..14b54a9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
                            int preemptible)
 {
     unsigned long nx, x, y = page->u.inuse.type_info;
-    int rc = 0;
+    int rc = 0, ret = 0;
 
     ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
 
@@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page, unsigned long type,
         if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
         {
             if ( (x & PGT_type_mask) == PGT_writable_page )
-                iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+                ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
             else if ( type == PGT_writable_page )
-                iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
-                               page_to_mfn(page),
-                               IOMMUF_readable|IOMMUF_writable);
+                ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+                                     page_to_mfn(page),
+                                     IOMMUF_readable|IOMMUF_writable);
         }
     }
 
@@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page, unsigned long type,
     if ( (x & PGT_partial) && !(nx & PGT_partial) )
         put_page(page);
 
+    if ( !rc )
+        rc = ret;
+
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index 1ed5b47..814cb72 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -667,6 +667,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     unsigned long gfn_remainder = gfn;
     unsigned int i, target = order / EPT_TABLE_ORDER;
     int ret, rc = 0;
+    bool_t entry_written = 0;
     bool_t direct_mmio = (p2mt == p2m_mmio_direct);
     uint8_t ipat = 0;
     bool_t need_modify_vtd_table = 1;
@@ -812,17 +813,22 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     rc = atomic_write_ept_entry(ept_entry, new_entry, target);
     if ( unlikely(rc) )
         old_entry.epte = 0;
-    else if ( p2mt != p2m_invalid &&
-              (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
-        /* Track the highest gfn for which we have ever had a valid mapping */
-        p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+    else
+    {
+        entry_written = 1;
+
+        if ( p2mt != p2m_invalid &&
+             (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+            /* Track the highest gfn for which we have ever had a valid mapping */
+            p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+    }
 
 out:
     if ( needs_sync )
         ept_sync_domain(p2m);
 
     /* For host p2m, may need to change VT-d page table.*/
-    if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
+    if ( entry_written && p2m_is_hostp2m(p2m) && need_iommu(d) &&
          need_modify_vtd_table )
     {
         if ( iommu_hap_pt_share )
@@ -831,10 +837,28 @@ out:
         {
             if ( iommu_flags )
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+                {
+                    ret = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+
+                    if ( unlikely(ret) )
+                    {
+                        while ( i-- )
+                            iommu_unmap_page(p2m->domain, gfn + i);
+
+                        if ( !rc )
+                            rc = ret;
+
+                        break;
+                    }
+                }
             else
                 for ( i = 0; i < (1 << order); i++ )
-                    iommu_unmap_page(d, gfn + i);
+                {
+                    ret = iommu_unmap_page(d, gfn + i);
+
+                    if ( !rc )
+                        rc = ret;
+                }
         }
     }
 
@@ -847,7 +871,7 @@ out:
     if ( is_epte_present(&old_entry) )
         ept_free_entry(p2m, &old_entry, target);
 
-    if ( rc == 0 && p2m_is_hostp2m(p2m) )
+    if ( entry_written && p2m_is_hostp2m(p2m) )
         p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
 
     return rc;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 3d80612..5426f92 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -498,7 +498,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     l1_pgentry_t intermediate_entry = l1e_empty();
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
-    int rc;
+    int rc, ret;
     unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
     /*
      * old_mfn and iommu_old_flags control possible flush/update needs on the
@@ -680,11 +680,29 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         }
         else if ( iommu_pte_flags )
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
-                               iommu_pte_flags);
+            {
+                ret = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+                                    iommu_pte_flags);
+
+                if ( unlikely(ret) )
+                {
+                    while ( i-- )
+                        iommu_unmap_page(p2m->domain, gfn + i);
+
+                    if ( !rc )
+                        rc = ret;
+
+                    break;
+                }
+            }
         else
             for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn + i);
+            {
+                ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+                if ( !rc )
+                    rc = ret;
+            }
     }
 
     /*
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 94eabf6..cb77ef2 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -638,13 +638,20 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
     mfn_t mfn_return;
     p2m_type_t t;
     p2m_access_t a;
+    int rc = 0, ret;
 
     if ( !paging_mode_translate(p2m->domain) )
     {
         if ( need_iommu(p2m->domain) )
             for ( i = 0; i < (1 << page_order); i++ )
-                iommu_unmap_page(p2m->domain, mfn + i);
-        return 0;
+            {
+               ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+               if ( !rc )
+                   rc = ret;
+            }
+
+        return rc;
     }
 
     ASSERT(gfn_locked_by_me(p2m, gfn));
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 09560c0..cca4cf3 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -171,6 +171,8 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
     {
         struct page_info *page;
         unsigned int i = 0;
+        int ret, rc = 0;
+
         page_list_for_each ( page, &d->page_list )
         {
             unsigned long mfn = page_to_mfn(page);
@@ -181,10 +183,20 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
                  ((page->u.inuse.type_info & PGT_type_mask)
                   == PGT_writable_page) )
                 mapping |= IOMMUF_writable;
-            hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+            ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+            if ( unlikely(ret) )
+                rc = ret;
+
             if ( !(i++ & 0xfffff) )
                 process_pending_softirqs();
         }
+
+        if ( rc )
+            printk(XENLOG_WARNING
+                   "iommu_hwdom_init: IOMMU mapping failed for dom%d.",
+                   d->domain_id);
     }
 
     return hd->platform_ops->hwdom_init(d);
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-05-06  8:54 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-06  8:54 [PATCH v4 00/10] Check VT-d Device-TLB flush error Quan Xu
2016-05-06  8:54 ` [PATCH v4 01/10] vt-d: fix the IOMMU flush issue Quan Xu
2016-05-09 16:09   ` Jan Beulich
2016-05-12  7:50     ` Xu, Quan
2016-05-12  8:53       ` Jan Beulich
2016-05-12 13:29         ` Xu, Quan
2016-05-12 13:37           ` Jan Beulich
2016-05-12 13:43             ` Xu, Quan
2016-05-06  8:54 ` [PATCH v4 02/10] IOMMU: handle IOMMU mapping and unmapping failures Quan Xu
2016-05-09 16:13   ` Jan Beulich
2016-05-10  3:41     ` Xu, Quan
2016-05-10  6:53       ` Jan Beulich
2016-05-10  7:53         ` Xu, Quan
2016-05-10  8:02           ` Jan Beulich
2016-05-10  8:20             ` Xu, Quan
2016-05-10  8:26               ` Jan Beulich
2016-05-12 14:28         ` Xu, Quan
2016-05-12 15:06           ` Jan Beulich
2016-05-13  8:04             ` Xu, Quan
2016-05-13  9:08               ` Jan Beulich
2016-05-13  9:20                 ` Xu, Quan
2016-05-06  8:54 ` Quan Xu [this message]
2016-05-10  8:44   ` [PATCH v4 03/10] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping Jan Beulich
2016-05-10 14:45     ` George Dunlap
2016-05-10 14:59       ` George Dunlap
2016-05-11  2:26         ` Xu, Quan
2016-05-11  8:45           ` George Dunlap
2016-05-11  8:58             ` Xu, Quan
2016-05-10 15:02       ` Jan Beulich
2016-05-11  2:29       ` Xu, Quan
2016-05-11  3:39     ` Xu, Quan
2016-05-11  7:02       ` Jan Beulich
2016-05-06  8:54 ` [PATCH v4 04/10] vt-d: propagate IOMMU Device-TLB flush error up to IOMMU unmapping Quan Xu
2016-05-10  8:50   ` Jan Beulich
2016-05-11  3:49     ` Xu, Quan
2016-05-06  8:54 ` [PATCH v4 05/10] vt-d: propagate IOMMU Device-TLB flush error up to IOMMU mapping Quan Xu
2016-05-06  8:54 ` [PATCH v4 06/10] IOMMU/MMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (top level ones) Quan Xu
2016-05-10  9:04   ` Jan Beulich
2016-05-11  5:52     ` Xu, Quan
2016-05-06  8:54 ` [PATCH v4 07/10] IOMMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} (leaf ones) Quan Xu
2016-05-10  9:06   ` Jan Beulich
2016-05-11  6:47     ` Xu, Quan
2016-05-11  7:06       ` Jan Beulich
2016-05-11  7:12         ` Xu, Quan
2016-05-11  7:16           ` Jan Beulich
2016-05-11  7:20             ` Xu, Quan
2016-05-11  7:37               ` Jan Beulich
2016-05-06  8:54 ` [PATCH v4 08/10] vt-d/ept: propagate IOMMU Device-TLB flush error up to EPT update Quan Xu
2016-05-10  9:09   ` Jan Beulich
2016-05-10 14:58     ` George Dunlap
2016-05-10 15:04       ` Jan Beulich
2016-05-11  7:25     ` Xu, Quan
2016-05-06  8:54 ` [PATCH v4 09/10] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU suspending Quan Xu
2016-05-10  9:24   ` Jan Beulich
2016-05-13  3:39     ` Xu, Quan
2016-05-13  6:16       ` Jan Beulich
2016-05-13  6:27         ` Xu, Quan
2016-05-06  8:54 ` [PATCH v4 10/10] vt-d: propagate error up to ME phantom function mapping and unmapping Quan Xu
2016-05-10  9:29   ` Jan Beulich
2016-05-11  8:35     ` Xu, Quan
2016-05-11  9:07       ` Jan Beulich
2016-05-12  5:16         ` Xu, Quan
2016-05-12  8:44           ` Jan Beulich
2016-05-12  9:02             ` Xu, Quan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1462524880-67205-4-git-send-email-quan.xu@intel.com \
    --to=quan.xu@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).