xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: "Xu, Quan" <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Kevin Tian <kevin.tian@intel.com>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Feng Wu <feng.wu@intel.com>, Quan Xu <quan.xu@intel.com>,
	dario.faggioli@citrix.com, Julien Grall <julien.grall@arm.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [Patch v6 08/11] IOMMU: propagate IOMMU Device-TLB flush error (leaf ones).
Date: Tue, 31 May 2016 21:57:33 +0800	[thread overview]
Message-ID: <1464703056-4741-9-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1464703056-4741-1-git-send-email-quan.xu@intel.com>

From: Quan Xu <quan.xu@intel.com>

Signed-off-by: Quan Xu <quan.xu@intel.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>

CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien.grall@arm.com>
CC: Jan Beulich <jbeulich@suse.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Feng Wu <feng.wu@intel.com>

v6: Add __must_check annotation to intel_iommu_iotlb_flush_all().
---
 xen/drivers/passthrough/arm/smmu.c  | 13 ++++++++-----
 xen/drivers/passthrough/iommu.c     |  8 ++------
 xen/drivers/passthrough/vtd/iommu.c | 32 ++++++++++++++++++++------------
 xen/include/xen/iommu.h             |  5 +++--
 4 files changed, 33 insertions(+), 25 deletions(-)

diff --git a/xen/drivers/passthrough/arm/smmu.c b/xen/drivers/passthrough/arm/smmu.c
index ee5c89d..1d21568 100644
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2540,7 +2540,7 @@ static int force_stage = 2;
  */
 static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK;
 
-static void arm_smmu_iotlb_flush_all(struct domain *d)
+static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
 {
 	struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv;
 	struct iommu_domain *cfg;
@@ -2557,13 +2557,16 @@ static void arm_smmu_iotlb_flush_all(struct domain *d)
 		arm_smmu_tlb_inv_context(cfg->priv);
 	}
 	spin_unlock(&smmu_domain->lock);
+
+	return 0;
 }
 
-static void arm_smmu_iotlb_flush(struct domain *d, unsigned long gfn,
-                                 unsigned int page_count)
+static int __must_check arm_smmu_iotlb_flush(struct domain *d,
+                                             unsigned long gfn,
+                                             unsigned int page_count)
 {
-    /* ARM SMMU v1 doesn't have flush by VMA and VMID */
-    arm_smmu_iotlb_flush_all(d);
+	/* ARM SMMU v1 doesn't have flush by VMA and VMID */
+	return arm_smmu_iotlb_flush_all(d);
 }
 
 static struct iommu_domain *arm_smmu_get_domain(struct domain *d,
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index e611e72..098b601 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -320,9 +320,7 @@ int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
         return 0;
 
-    hd->platform_ops->iotlb_flush(d, gfn, page_count);
-
-    return 0;
+    return hd->platform_ops->iotlb_flush(d, gfn, page_count);
 }
 
 int iommu_iotlb_flush_all(struct domain *d)
@@ -332,9 +330,7 @@ int iommu_iotlb_flush_all(struct domain *d)
     if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
         return 0;
 
-    hd->platform_ops->iotlb_flush_all(d);
-
-    return 0;
+    return hd->platform_ops->iotlb_flush_all(d);
 }
 
 int __init iommu_setup(void)
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index c2bf1e2..0788a59 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -559,8 +559,10 @@ static int __must_check iommu_flush_all(void)
     return 0;
 }
 
-static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
-        int dma_old_pte_present, unsigned int page_count)
+static int __must_check iommu_flush_iotlb(struct domain *d,
+                                          unsigned long gfn,
+                                          bool_t dma_old_pte_present,
+                                          unsigned int page_count)
 {
     struct domain_iommu *hd = dom_iommu(d);
     struct acpi_drhd_unit *drhd;
@@ -598,16 +600,20 @@ static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
                 iommu_flush_write_buffer(iommu);
         }
     }
+
+    return 0;
 }
 
-static void intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
+static int __must_check iommu_flush_iotlb_pages(struct domain *d,
+                                                unsigned long gfn,
+                                                unsigned int page_count)
 {
-    __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
+    return iommu_flush_iotlb(d, gfn, 1, page_count);
 }
 
-static void intel_iommu_iotlb_flush_all(struct domain *d)
+static int __must_check iommu_flush_iotlb_all(struct domain *d)
 {
-    __intel_iommu_iotlb_flush(d, INVALID_GFN, 0, 0);
+    return iommu_flush_iotlb(d, INVALID_GFN, 0, 0);
 }
 
 /* clear one page's page table */
@@ -616,6 +622,7 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
     struct domain_iommu *hd = dom_iommu(domain);
     struct dma_pte *page = NULL, *pte = NULL;
     u64 pg_maddr;
+    int rc = 0;
 
     spin_lock(&hd->arch.mapping_lock);
     /* get last level pte */
@@ -641,11 +648,11 @@ static int __must_check dma_pte_clear_one(struct domain *domain, u64 addr)
     iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+        rc = iommu_flush_iotlb_pages(domain, addr >> PAGE_SHIFT_4K, 1);
 
     unmap_vtd_domain_page(page);
 
-    return 0;
+    return rc;
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1699,6 +1706,7 @@ static int __must_check intel_iommu_map_page(struct domain *d,
     struct domain_iommu *hd = dom_iommu(d);
     struct dma_pte *page = NULL, *pte = NULL, old, new = { 0 };
     u64 pg_maddr;
+    int rc = 0;
 
     /* Do nothing if VT-d shares EPT page table */
     if ( iommu_use_hap_pt(d) )
@@ -1741,9 +1749,9 @@ static int __must_check intel_iommu_map_page(struct domain *d,
     unmap_vtd_domain_page(page);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
+        rc = iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
 
-    return 0;
+    return rc;
 }
 
 static int __must_check intel_iommu_unmap_page(struct domain *d,
@@ -2574,8 +2582,8 @@ const struct iommu_ops intel_iommu_ops = {
     .resume = vtd_resume,
     .share_p2m = iommu_set_pgd,
     .crash_shutdown = vtd_crash_shutdown,
-    .iotlb_flush = intel_iommu_iotlb_flush,
-    .iotlb_flush_all = intel_iommu_iotlb_flush_all,
+    .iotlb_flush = iommu_flush_iotlb_pages,
+    .iotlb_flush_all = iommu_flush_iotlb_all,
     .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
     .dump_p2m_table = vtd_dump_p2m_table,
 };
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index d197cd0..e917031 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -179,8 +179,9 @@ struct iommu_ops {
     void (*resume)(void);
     void (*share_p2m)(struct domain *d);
     void (*crash_shutdown)(void);
-    void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count);
-    void (*iotlb_flush_all)(struct domain *d);
+    int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn,
+                                    unsigned int page_count);
+    int __must_check (*iotlb_flush_all)(struct domain *d);
     int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
     void (*dump_p2m_table)(struct domain *d);
 };
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-05-31 13:57 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-31 13:57 [Patch v6 00/11] Check VT-d Device-TLB flush error Xu, Quan
2016-05-31 13:57 ` [Patch v6 01/11] IOMMU: handle IOMMU mapping and unmapping failures Xu, Quan
2016-06-01  9:52   ` Jan Beulich
2016-05-31 13:57 ` [Patch v6 02/11] IOMMU/MMU: enhance the call trees of IOMMU unmapping and mapping Xu, Quan
2016-06-01 10:05   ` Jan Beulich
2016-06-02  6:00     ` Xu, Quan
2016-06-02  9:13       ` Jan Beulich
2016-05-31 13:57 ` [Patch v6 03/11] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU unmapping (top level ones) Xu, Quan
2016-05-31 13:57 ` [Patch v6 04/11] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU mapping " Xu, Quan
2016-06-01 10:24   ` Jan Beulich
2016-06-02  7:25     ` Xu, Quan
2016-06-02  9:21       ` Jan Beulich
2016-06-02 12:43         ` Xu, Quan
2016-06-07  7:51         ` Xu, Quan
2016-06-07  8:19           ` Jan Beulich
2016-06-07  8:40             ` Xu, Quan
2016-06-07 10:11               ` Jan Beulich
2016-05-31 13:57 ` [Patch v6 05/11] IOMMU/MMU: propagate IOMMU Device-TLB flush error up to iommu_iotlb_flush{, _all} " Xu, Quan
2016-05-31 13:57 ` [Patch v6 06/11] propagate IOMMU Device-TLB flush error up to EPT update " Xu, Quan
2016-05-31 13:57 ` [Patch v6 07/11] IOMMU: propagate IOMMU Device-TLB flush error up to IOMMU suspending " Xu, Quan
2016-06-01 10:39   ` Jan Beulich
2016-06-02  2:58     ` Xu, Quan
2016-06-02  9:22       ` Jan Beulich
2016-05-31 13:57 ` Xu, Quan [this message]
2016-05-31 13:57 ` [Patch v6 09/11] vt-d: fix the IOMMU flush issue Xu, Quan
2016-06-01 15:36   ` Jan Beulich
2016-06-02  2:50     ` Xu, Quan
2016-05-31 13:57 ` [Patch v6 10/11] vt-d: propagate the IOMMU Device-TLB flush error up to ME phantom functions Xu, Quan
2016-05-31 13:57 ` [Patch v6 11/11] vt-d: add __must_check annotation to IOMMU flush pointers and handlers Xu, Quan
2016-06-02 10:06   ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464703056-4741-9-git-send-email-quan.xu@intel.com \
    --to=quan.xu@intel.com \
    --cc=dario.faggioli@citrix.com \
    --cc=feng.wu@intel.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=kevin.tian@intel.com \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).