xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Xu, Quan" <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Kevin Tian <kevin.tian@intel.com>, Keir Fraser <keir@xen.org>,
	Quan Xu <quan.xu@intel.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	dario.faggioli@citrix.com, Jan Beulich <jbeulich@suse.com>,
	Feng Wu <feng.wu@intel.com>
Subject: [PATCH v10 1/3] vt-d: fix the IOMMU flush issue
Date: Wed, 22 Jun 2016 15:51:08 +0800	[thread overview]
Message-ID: <1466581870-53183-2-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1466581870-53183-1-git-send-email-quan.xu@intel.com>

From: Quan Xu <quan.xu@intel.com>

The propagation value from IOMMU flush interfaces may be positive, which
indicates callers need to flush cache, not one of faliures.

when the propagation value is positive, this patch fixes this flush issue
as follows:
  - call iommu_flush_write_buffer() to flush cache.
  - return zero.

Signed-off-by: Quan Xu <quan.xu@intel.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>

CC: Kevin Tian <kevin.tian@intel.com>
CC: Feng Wu <feng.wu@intel.com>
CC: Keir Fraser <keir@xen.org>
CC: Jan Beulich <jbeulich@suse.com>
CC: Andrew Cooper <andrew.cooper3@citrix.com>

---
v10: change 'if ( context_rc >= 0 )' to 'if ( rc >= 0 )'
---
 xen/drivers/passthrough/vtd/iommu.c | 150 +++++++++++++++++++++++++-----------
 1 file changed, 103 insertions(+), 47 deletions(-)

diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 48edb67..999cdf6 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -388,17 +388,18 @@ static int flush_context_reg(
     return 0;
 }
 
-static int iommu_flush_context_global(
-    struct iommu *iommu, int flush_non_present_entry)
+static int __must_check iommu_flush_context_global(struct iommu *iommu,
+                                                   int flush_non_present_entry)
 {
     struct iommu_flush *flush = iommu_get_flush(iommu);
     return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
                                  flush_non_present_entry);
 }
 
-static int iommu_flush_context_device(
-    struct iommu *iommu, u16 did, u16 source_id,
-    u8 function_mask, int flush_non_present_entry)
+static int __must_check iommu_flush_context_device(struct iommu *iommu,
+                                                   u16 did, u16 source_id,
+                                                   u8 function_mask,
+                                                   int flush_non_present_entry)
 {
     struct iommu_flush *flush = iommu_get_flush(iommu);
     return flush->context(iommu, did, source_id, function_mask,
@@ -473,8 +474,9 @@ static int flush_iotlb_reg(void *_iommu, u16 did,
     return 0;
 }
 
-static int iommu_flush_iotlb_global(struct iommu *iommu,
-    int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_global(struct iommu *iommu,
+                                                 int flush_non_present_entry,
+                                                 int flush_dev_iotlb)
 {
     struct iommu_flush *flush = iommu_get_flush(iommu);
     int status;
@@ -491,8 +493,9 @@ static int iommu_flush_iotlb_global(struct iommu *iommu,
     return status;
 }
 
-static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
-    int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
+                                              int flush_non_present_entry,
+                                              int flush_dev_iotlb)
 {
     struct iommu_flush *flush = iommu_get_flush(iommu);
     int status;
@@ -509,9 +512,10 @@ static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
     return status;
 }
 
-static int iommu_flush_iotlb_psi(
-    struct iommu *iommu, u16 did, u64 addr, unsigned int order,
-    int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did,
+                                              u64 addr, unsigned int order,
+                                              int flush_non_present_entry,
+                                              int flush_dev_iotlb)
 {
     struct iommu_flush *flush = iommu_get_flush(iommu);
     int status;
@@ -546,17 +550,37 @@ static int __must_check iommu_flush_all(void)
     struct acpi_drhd_unit *drhd;
     struct iommu *iommu;
     int flush_dev_iotlb;
+    int rc = 0;
 
     flush_all_cache();
     for_each_drhd_unit ( drhd )
     {
+        int context_rc, iotlb_rc;
+
         iommu = drhd->iommu;
-        iommu_flush_context_global(iommu, 0);
+        context_rc = iommu_flush_context_global(iommu, 0);
         flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
-        iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
+        iotlb_rc = iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
+
+        /*
+         * The current logic for returns:
+         *   - positive  invoke iommu_flush_write_buffer to flush cache.
+         *   - zero      on success.
+         *   - negative  on failure. Continue to flush IOMMU IOTLB on a
+         *               best effort basis.
+         */
+        if ( context_rc > 0 || iotlb_rc > 0 )
+            iommu_flush_write_buffer(iommu);
+        if ( rc >= 0 )
+            rc = context_rc;
+        if ( rc >= 0 )
+            rc = iotlb_rc;
     }
 
-    return 0;
+    if ( rc > 0 )
+        rc = 0;
+
+    return rc;
 }
 
 static int __must_check iommu_flush_iotlb(struct domain *d,
@@ -569,6 +593,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
     struct iommu *iommu;
     int flush_dev_iotlb;
     int iommu_domid;
+    int rc = 0;
 
     /*
      * No need pcideves_lock here because we have flush
@@ -587,21 +612,23 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
             continue;
 
         if ( page_count != 1 || gfn == INVALID_GFN )
-        {
-            if ( iommu_flush_iotlb_dsi(iommu, iommu_domid,
-                        0, flush_dev_iotlb) )
-                iommu_flush_write_buffer(iommu);
-        }
+            rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
+                                       0, flush_dev_iotlb);
         else
+            rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+                                       (paddr_t)gfn << PAGE_SHIFT_4K,
+                                       PAGE_ORDER_4K,
+                                       !dma_old_pte_present,
+                                       flush_dev_iotlb);
+
+        if ( rc > 0 )
         {
-            if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
-                        (paddr_t)gfn << PAGE_SHIFT_4K, PAGE_ORDER_4K,
-                        !dma_old_pte_present, flush_dev_iotlb) )
-                iommu_flush_write_buffer(iommu);
+            iommu_flush_write_buffer(iommu);
+            rc = 0;
         }
     }
 
-    return 0;
+    return rc;
 }
 
 static int __must_check iommu_flush_iotlb_pages(struct domain *d,
@@ -1290,7 +1317,8 @@ int domain_context_mapping_one(
     struct context_entry *context, *context_entries;
     u64 maddr, pgd_maddr;
     u16 seg = iommu->intel->drhd->segment;
-    int agaw;
+    int agaw, rc, ret;
+    int flush_dev_iotlb;
 
     ASSERT(pcidevs_locked());
     spin_lock(&iommu->lock);
@@ -1404,14 +1432,24 @@ int domain_context_mapping_one(
     spin_unlock(&iommu->lock);
 
     /* Context entry was previously non-present (with domid 0). */
-    if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
-                                    DMA_CCMD_MASK_NOBIT, 1) )
+    rc = iommu_flush_context_device(iommu, 0, PCI_BDF2(bus, devfn),
+                                    DMA_CCMD_MASK_NOBIT, 1);
+    flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+    ret = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
+
+    /*
+     * The current logic for returns:
+     *   - positive  invoke iommu_flush_write_buffer to flush cache.
+     *   - zero      on success.
+     *   - negative  on failure. Continue to flush IOMMU IOTLB on a
+     *               best effort basis.
+     */
+    if ( rc > 0 || ret > 0 )
         iommu_flush_write_buffer(iommu);
-    else
-    {
-        int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
-        iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
-    }
+    if ( rc >= 0 )
+        rc = ret;
+    if ( rc > 0 )
+        rc = 0;
 
     set_bit(iommu->index, &hd->arch.iommu_bitmap);
 
@@ -1420,7 +1458,7 @@ int domain_context_mapping_one(
     if ( !seg )
         me_wifi_quirk(domain, bus, devfn, MAP_ME_PHANTOM_FUNC);
 
-    return 0;
+    return rc;
 }
 
 static int domain_context_mapping(
@@ -1514,7 +1552,8 @@ int domain_context_unmap_one(
 {
     struct context_entry *context, *context_entries;
     u64 maddr;
-    int iommu_domid;
+    int iommu_domid, rc, ret;
+    int flush_dev_iotlb;
 
     ASSERT(pcidevs_locked());
     spin_lock(&iommu->lock);
@@ -1542,15 +1581,26 @@ int domain_context_unmap_one(
         return -EINVAL;
     }
 
-    if ( iommu_flush_context_device(iommu, iommu_domid,
-                                    (((u16)bus) << 8) | devfn,
-                                    DMA_CCMD_MASK_NOBIT, 0) )
+    rc = iommu_flush_context_device(iommu, iommu_domid,
+                                    PCI_BDF2(bus, devfn),
+                                    DMA_CCMD_MASK_NOBIT, 0);
+
+    flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+    ret = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
+
+    /*
+     * The current logic for returns:
+     *   - positive  invoke iommu_flush_write_buffer to flush cache.
+     *   - zero      on success.
+     *   - negative  on failure. Continue to flush IOMMU IOTLB on a
+     *               best effort basis.
+     */
+    if ( rc > 0 || ret > 0 )
         iommu_flush_write_buffer(iommu);
-    else
-    {
-        int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
-        iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
-    }
+    if ( rc >= 0 )
+        rc = ret;
+    if ( rc > 0 )
+        rc = 0;
 
     spin_unlock(&iommu->lock);
     unmap_vtd_domain_page(context_entries);
@@ -1558,7 +1608,7 @@ int domain_context_unmap_one(
     if ( !iommu->intel->drhd->segment )
         me_wifi_quirk(domain, bus, devfn, UNMAP_ME_PHANTOM_FUNC);
 
-    return 0;
+    return rc;
 }
 
 static int domain_context_unmap(
@@ -1772,6 +1822,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
     struct domain_iommu *hd = dom_iommu(d);
     int flush_dev_iotlb;
     int iommu_domid;
+    int rc = 0;
 
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
@@ -1785,13 +1836,18 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
         iommu_domid= domain_iommu_domid(d, iommu);
         if ( iommu_domid == -1 )
             continue;
-        if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
+
+        rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
                                    (paddr_t)gfn << PAGE_SHIFT_4K,
-                                   order, !present, flush_dev_iotlb) )
+                                   order, !present, flush_dev_iotlb);
+        if ( rc > 0 )
+        {
             iommu_flush_write_buffer(iommu);
+            rc = 0;
+        }
     }
 
-    return 0;
+    return rc;
 }
 
 static int __init vtd_ept_page_compatible(struct iommu *iommu)
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  reply	other threads:[~2016-06-22  7:51 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-22  7:51 [PATCH v10 0/3] Check VT-d Device-TLB flush error Xu, Quan
2016-06-22  7:51 ` Xu, Quan [this message]
2016-06-22  7:51 ` [PATCH v10 2/3] vt-d: propagate the IOMMU Device-TLB flush error up to ME phantom functions Xu, Quan
2016-06-22  7:51 ` [PATCH v10 3/3] vt-d: add __must_check annotation to IOMMU flush pointers and handlers Xu, Quan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466581870-53183-2-git-send-email-quan.xu@intel.com \
    --to=quan.xu@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dario.faggioli@citrix.com \
    --cc=feng.wu@intel.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).