All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul@xen.org>, Kevin Tian <kevin.tian@intel.com>,
	Julien Grall <julien@xen.org>,
	Stefano Stabellini <sstabellini@kernel.org>,
	Volodymyr Babchuk <volodymyr_babchuk@epam.com>,
	Bertrand Marquis <bertrand.marquis@arm.com>,
	Rahul Singh <Rahul.Singh@arm.com>
Subject: [PATCH v2 14/18] IOMMU: fold flush-all hook into "flush one"
Date: Fri, 24 Sep 2021 11:53:59 +0200	[thread overview]
Message-ID: <e40ee980-9151-101a-1484-b1710aaeafb0@suse.com> (raw)
In-Reply-To: <957f067b-9fe1-2350-4266-51982f09d3a9@suse.com>

Having a separate flush-all hook has always been puzzling me some. We
will want to be able to force a full flush via accumulated flush flags
from the map/unmap functions. Introduce a respective new flag and fold
all flush handling to use the single remaining hook.

Note that because of the respective comments in SMMU and IPMMU-VMSA
code, I've folded the two prior hook functions into one. For SMMU-v3,
which lacks a comment towards incapable hardware, I've left both
functions in place on the assumption that selective and full flushes
will eventually want separating.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
TBD: What we really are going to need is for the map/unmap functions to
     specify that a wider region needs flushing than just the one
     covered by the present set of (un)maps. This may still be less than
     a full flush, but at least as a first step it seemed better to me
     to keep things simple and go the flush-all route.
---
v2: New.

--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -242,7 +242,6 @@ int amd_iommu_get_reserved_device_memory
 int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
                                              unsigned long page_count,
                                              unsigned int flush_flags);
-int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
 void amd_iommu_print_entries(const struct amd_iommu *iommu, unsigned int dev_id,
                              dfn_t dfn);
 
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -475,15 +475,18 @@ int amd_iommu_flush_iotlb_pages(struct d
 {
     unsigned long dfn_l = dfn_x(dfn);
 
-    ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
-    ASSERT(flush_flags);
+    if ( !(flush_flags & IOMMU_FLUSHF_all) )
+    {
+        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+        ASSERT(flush_flags);
+    }
 
     /* Unless a PTE was modified, no flush is required */
     if ( !(flush_flags & IOMMU_FLUSHF_modified) )
         return 0;
 
-    /* If the range wraps then just flush everything */
-    if ( dfn_l + page_count < dfn_l )
+    /* If so requested or if the range wraps then just flush everything. */
+    if ( (flush_flags & IOMMU_FLUSHF_all) || dfn_l + page_count < dfn_l )
     {
         amd_iommu_flush_all_pages(d);
         return 0;
@@ -508,13 +511,6 @@ int amd_iommu_flush_iotlb_pages(struct d
 
     return 0;
 }
-
-int amd_iommu_flush_iotlb_all(struct domain *d)
-{
-    amd_iommu_flush_all_pages(d);
-
-    return 0;
-}
 
 int amd_iommu_reserve_domain_unity_map(struct domain *d,
                                        const struct ivrs_unity_map *map,
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -642,7 +642,6 @@ static const struct iommu_ops __initcons
     .map_page = amd_iommu_map_page,
     .unmap_page = amd_iommu_unmap_page,
     .iotlb_flush = amd_iommu_flush_iotlb_pages,
-    .iotlb_flush_all = amd_iommu_flush_iotlb_all,
     .reassign_device = reassign_device,
     .get_device_group_id = amd_iommu_group_id,
     .enable_x2apic = iov_enable_xt,
--- a/xen/drivers/passthrough/arm/ipmmu-vmsa.c
+++ b/xen/drivers/passthrough/arm/ipmmu-vmsa.c
@@ -930,13 +930,19 @@ out:
 }
 
 /* Xen IOMMU ops */
-static int __must_check ipmmu_iotlb_flush_all(struct domain *d)
+static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
+                                          unsigned long page_count,
+                                          unsigned int flush_flags)
 {
     struct ipmmu_vmsa_xen_domain *xen_domain = dom_iommu(d)->arch.priv;
 
+    ASSERT(flush_flags);
+
     if ( !xen_domain || !xen_domain->root_domain )
         return 0;
 
+    /* The hardware doesn't support selective TLB flush. */
+
     spin_lock(&xen_domain->lock);
     ipmmu_tlb_invalidate(xen_domain->root_domain);
     spin_unlock(&xen_domain->lock);
@@ -944,16 +950,6 @@ static int __must_check ipmmu_iotlb_flus
     return 0;
 }
 
-static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
-                                          unsigned long page_count,
-                                          unsigned int flush_flags)
-{
-    ASSERT(flush_flags);
-
-    /* The hardware doesn't support selective TLB flush. */
-    return ipmmu_iotlb_flush_all(d);
-}
-
 static struct ipmmu_vmsa_domain *ipmmu_get_cache_domain(struct domain *d,
                                                         struct device *dev)
 {
@@ -1303,7 +1299,6 @@ static const struct iommu_ops ipmmu_iomm
     .hwdom_init      = ipmmu_iommu_hwdom_init,
     .teardown        = ipmmu_iommu_domain_teardown,
     .iotlb_flush     = ipmmu_iotlb_flush,
-    .iotlb_flush_all = ipmmu_iotlb_flush_all,
     .assign_device   = ipmmu_assign_device,
     .reassign_device = ipmmu_reassign_device,
     .map_page        = arm_iommu_map_page,
--- a/xen/drivers/passthrough/arm/smmu.c
+++ b/xen/drivers/passthrough/arm/smmu.c
@@ -2649,11 +2649,17 @@ static int force_stage = 2;
  */
 static u32 platform_features = ARM_SMMU_FEAT_COHERENT_WALK;
 
-static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
+static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
+					     unsigned long page_count,
+					     unsigned int flush_flags)
 {
 	struct arm_smmu_xen_domain *smmu_domain = dom_iommu(d)->arch.priv;
 	struct iommu_domain *cfg;
 
+	ASSERT(flush_flags);
+
+	/* ARM SMMU v1 doesn't have flush by VMA and VMID */
+
 	spin_lock(&smmu_domain->lock);
 	list_for_each_entry(cfg, &smmu_domain->contexts, list) {
 		/*
@@ -2670,16 +2676,6 @@ static int __must_check arm_smmu_iotlb_f
 	return 0;
 }
 
-static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
-					     unsigned long page_count,
-					     unsigned int flush_flags)
-{
-	ASSERT(flush_flags);
-
-	/* ARM SMMU v1 doesn't have flush by VMA and VMID */
-	return arm_smmu_iotlb_flush_all(d);
-}
-
 static struct iommu_domain *arm_smmu_get_domain(struct domain *d,
 						struct device *dev)
 {
@@ -2879,7 +2875,6 @@ static const struct iommu_ops arm_smmu_i
     .add_device = arm_smmu_dt_add_device_generic,
     .teardown = arm_smmu_iommu_domain_teardown,
     .iotlb_flush = arm_smmu_iotlb_flush,
-    .iotlb_flush_all = arm_smmu_iotlb_flush_all,
     .assign_device = arm_smmu_assign_dev,
     .reassign_device = arm_smmu_reassign_dev,
     .map_page = arm_iommu_map_page,
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -3431,7 +3431,6 @@ static const struct iommu_ops arm_smmu_i
 	.hwdom_init		= arm_smmu_iommu_hwdom_init,
 	.teardown		= arm_smmu_iommu_xen_domain_teardown,
 	.iotlb_flush		= arm_smmu_iotlb_flush,
-	.iotlb_flush_all	= arm_smmu_iotlb_flush_all,
 	.assign_device		= arm_smmu_assign_dev,
 	.reassign_device	= arm_smmu_reassign_dev,
 	.map_page		= arm_iommu_map_page,
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -463,15 +463,12 @@ int iommu_iotlb_flush_all(struct domain
     const struct domain_iommu *hd = dom_iommu(d);
     int rc;
 
-    if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush_all ||
+    if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush ||
          !flush_flags )
         return 0;
 
-    /*
-     * The operation does a full flush so we don't need to pass the
-     * flush_flags in.
-     */
-    rc = iommu_call(hd->platform_ops, iotlb_flush_all, d);
+    rc = iommu_call(hd->platform_ops, iotlb_flush, d, INVALID_DFN, 0,
+                    flush_flags | IOMMU_FLUSHF_all);
     if ( unlikely(rc) )
     {
         if ( !d->is_shutting_down && printk_ratelimit() )
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -731,18 +731,21 @@ static int __must_check iommu_flush_iotl
                                                 unsigned long page_count,
                                                 unsigned int flush_flags)
 {
-    ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
-    ASSERT(flush_flags);
+    if ( flush_flags & IOMMU_FLUSHF_all )
+    {
+        dfn = INVALID_DFN;
+        page_count = 0;
+    }
+    else
+    {
+        ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
+        ASSERT(flush_flags);
+    }
 
     return iommu_flush_iotlb(d, dfn, flush_flags & IOMMU_FLUSHF_modified,
                              page_count);
 }
 
-static int __must_check iommu_flush_iotlb_all(struct domain *d)
-{
-    return iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
-}
-
 static void queue_free_pt(struct domain *d, mfn_t mfn, unsigned int next_level)
 {
     if ( next_level > 1 )
@@ -2841,7 +2844,7 @@ static int __init intel_iommu_quarantine
     spin_unlock(&hd->arch.mapping_lock);
 
     if ( !rc )
-        rc = iommu_flush_iotlb_all(d);
+        rc = iommu_flush_iotlb(d, INVALID_DFN, 0, 0);
 
     /* Pages may be leaked in failure case */
     return rc;
@@ -2874,7 +2877,6 @@ static struct iommu_ops __initdata vtd_o
     .resume = vtd_resume,
     .crash_shutdown = vtd_crash_shutdown,
     .iotlb_flush = iommu_flush_iotlb_pages,
-    .iotlb_flush_all = iommu_flush_iotlb_all,
     .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
     .dump_page_tables = vtd_dump_page_tables,
 };
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -147,9 +147,11 @@ enum
 {
     _IOMMU_FLUSHF_added,
     _IOMMU_FLUSHF_modified,
+    _IOMMU_FLUSHF_all,
 };
 #define IOMMU_FLUSHF_added (1u << _IOMMU_FLUSHF_added)
 #define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
+#define IOMMU_FLUSHF_all (1u << _IOMMU_FLUSHF_all)
 
 int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
                            unsigned long page_count, unsigned int flags,
@@ -282,7 +284,6 @@ struct iommu_ops {
     int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
                                     unsigned long page_count,
                                     unsigned int flush_flags);
-    int __must_check (*iotlb_flush_all)(struct domain *d);
     int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
     void (*dump_page_tables)(struct domain *d);
 



  parent reply	other threads:[~2021-09-24  9:54 UTC|newest]

Thread overview: 100+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-24  9:39 [PATCH v2 00/18] IOMMU: superpage support when not sharing pagetables Jan Beulich
2021-09-24  9:41 ` [PATCH v2 01/18] AMD/IOMMU: have callers specify the target level for page table walks Jan Beulich
2021-09-24 10:58   ` Roger Pau Monné
2021-09-24 12:02     ` Jan Beulich
2021-09-24  9:42 ` [PATCH v2 02/18] VT-d: " Jan Beulich
2021-09-24 14:45   ` Roger Pau Monné
2021-09-27  9:04     ` Jan Beulich
2021-09-27  9:13       ` Jan Beulich
2021-11-30 11:56       ` Roger Pau Monné
2021-11-30 14:38         ` Jan Beulich
2021-09-24  9:43 ` [PATCH v2 03/18] IOMMU: have vendor code announce supported page sizes Jan Beulich
2021-11-30 12:25   ` Roger Pau Monné
2021-12-17 14:43   ` Julien Grall
2021-12-21  9:26   ` Rahul Singh
2021-09-24  9:44 ` [PATCH v2 04/18] IOMMU: add order parameter to ->{,un}map_page() hooks Jan Beulich
2021-11-30 13:49   ` Roger Pau Monné
2021-11-30 14:45     ` Jan Beulich
2021-12-17 14:42   ` Julien Grall
2021-09-24  9:45 ` [PATCH v2 05/18] IOMMU: have iommu_{,un}map() split requests into largest possible chunks Jan Beulich
2021-11-30 15:24   ` Roger Pau Monné
2021-12-02 15:59     ` Jan Beulich
2021-09-24  9:46 ` [PATCH v2 06/18] IOMMU/x86: restrict IO-APIC mappings for PV Dom0 Jan Beulich
2021-12-01  9:09   ` Roger Pau Monné
2021-12-01  9:27     ` Jan Beulich
2021-12-01 10:32       ` Roger Pau Monné
2021-12-01 11:45         ` Jan Beulich
2021-12-02 15:12           ` Roger Pau Monné
2021-12-02 15:28             ` Jan Beulich
2021-12-02 19:16               ` Andrew Cooper
2021-12-03  6:41                 ` Jan Beulich
2021-09-24  9:47 ` [PATCH v2 07/18] IOMMU/x86: perform PV Dom0 mappings in batches Jan Beulich
2021-12-02 14:10   ` Roger Pau Monné
2021-12-03 12:38     ` Jan Beulich
2021-12-10  9:36       ` Roger Pau Monné
2021-12-10 11:41         ` Jan Beulich
2021-12-10 12:35           ` Roger Pau Monné
2021-09-24  9:48 ` [PATCH v2 08/18] IOMMU/x86: support freeing of pagetables Jan Beulich
2021-12-02 16:03   ` Roger Pau Monné
2021-12-02 16:10     ` Jan Beulich
2021-12-03  8:30       ` Roger Pau Monné
2021-12-03  9:38         ` Roger Pau Monné
2021-12-03  9:40         ` Jan Beulich
2021-12-10 13:51   ` Roger Pau Monné
2021-12-13  8:38     ` Jan Beulich
2021-09-24  9:48 ` [PATCH v2 09/18] AMD/IOMMU: drop stray TLB flush Jan Beulich
2021-12-02 16:16   ` Roger Pau Monné
2021-09-24  9:51 ` [PATCH v2 10/18] AMD/IOMMU: walk trees upon page fault Jan Beulich
2021-12-03  9:03   ` Roger Pau Monné
2021-12-03  9:49     ` Jan Beulich
2021-12-03  9:55       ` Jan Beulich
2021-12-10 10:23         ` Roger Pau Monné
2021-12-03  9:59     ` Jan Beulich
2021-09-24  9:51 ` [PATCH v2 11/18] AMD/IOMMU: return old PTE from {set,clear}_iommu_pte_present() Jan Beulich
2021-12-10 12:05   ` Roger Pau Monné
2021-12-10 12:59     ` Jan Beulich
2021-12-10 13:53       ` Roger Pau Monné
2021-09-24  9:52 ` [PATCH v2 12/18] AMD/IOMMU: allow use of superpage mappings Jan Beulich
2021-12-10 15:06   ` Roger Pau Monné
2021-12-13  8:49     ` Jan Beulich
2021-12-13  9:45       ` Roger Pau Monné
2021-12-13 10:00         ` Jan Beulich
2021-12-13 10:33           ` Roger Pau Monné
2021-12-13 10:41             ` Jan Beulich
2021-09-24  9:52 ` [PATCH v2 13/18] VT-d: " Jan Beulich
2021-12-13 11:54   ` Roger Pau Monné
2021-12-13 13:39     ` Jan Beulich
2021-09-24  9:53 ` Jan Beulich [this message]
2021-12-13 15:04   ` [PATCH v2 14/18] IOMMU: fold flush-all hook into "flush one" Roger Pau Monné
2021-12-14  9:06     ` Jan Beulich
2021-12-14  9:27       ` Roger Pau Monné
2021-12-15 15:28   ` Oleksandr
2021-12-16  8:49     ` Jan Beulich
2021-12-16 10:39       ` Oleksandr
2021-12-16 11:30   ` Rahul Singh
2021-12-21  8:04     ` Jan Beulich
2021-12-17 14:38   ` Julien Grall
2021-09-24  9:54 ` [PATCH v2 15/18] IOMMU/x86: prefill newly allocate page tables Jan Beulich
2021-12-13 15:51   ` Roger Pau Monné
2021-12-14  9:15     ` Jan Beulich
2021-12-14 11:41       ` Roger Pau Monné
2021-12-14 11:48         ` Jan Beulich
2021-12-14 14:50   ` Roger Pau Monné
2021-12-14 15:05     ` Jan Beulich
2021-12-14 15:15       ` Roger Pau Monné
2021-12-14 15:21         ` Jan Beulich
2021-12-14 15:06   ` Roger Pau Monné
2021-12-14 15:10     ` Jan Beulich
2021-12-14 15:17       ` Roger Pau Monné
2021-12-14 15:24         ` Jan Beulich
2021-09-24  9:55 ` [PATCH v2 16/18] x86: introduce helper for recording degree of contiguity in " Jan Beulich
2021-12-15 13:57   ` Roger Pau Monné
2021-12-16 15:47     ` Jan Beulich
2021-12-20 15:25       ` Roger Pau Monné
2021-12-21  8:09         ` Jan Beulich
2022-01-04  8:57           ` Roger Pau Monné
2022-01-04  9:00             ` Jan Beulich
2021-09-24  9:55 ` [PATCH v2 17/18] AMD/IOMMU: free all-empty " Jan Beulich
2021-12-15 15:14   ` Roger Pau Monné
2021-12-16 15:54     ` Jan Beulich
2021-09-24  9:56 ` [PATCH v2 18/18] VT-d: " Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e40ee980-9151-101a-1484-b1710aaeafb0@suse.com \
    --to=jbeulich@suse.com \
    --cc=Rahul.Singh@arm.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=bertrand.marquis@arm.com \
    --cc=julien@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=paul@xen.org \
    --cc=sstabellini@kernel.org \
    --cc=volodymyr_babchuk@epam.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.