All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lu Baolu <baolu.lu@linux.intel.com>
To: Joerg Roedel <joro@8bytes.org>
Cc: iommu@lists.linux-foundation.org
Subject: [PATCH 17/22] iommu/vt-d: Flush PASID-based iotlb for iova over first level
Date: Thu,  2 Jan 2020 08:18:18 +0800	[thread overview]
Message-ID: <20200102001823.21976-18-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20200102001823.21976-1-baolu.lu@linux.intel.com>

When software has changed first-level tables, it should invalidate
the affected IOTLB and the paging-structure-caches using the PASID-
based-IOTLB Invalidate Descriptor defined in spec 6.5.2.4.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/dmar.c        | 41 +++++++++++++++++++++++++++
 drivers/iommu/intel-iommu.c | 56 +++++++++++++++++++++++++++----------
 include/linux/intel-iommu.h |  2 ++
 3 files changed, 84 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3acfa6a25fa2..fb30d5053664 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1371,6 +1371,47 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 	qi_submit_sync(&desc, iommu);
 }
 
+/* PASID-based IOTLB invalidation */
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+		     unsigned long npages, bool ih)
+{
+	struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
+
+	/*
+	 * npages == -1 means a PASID-selective invalidation, otherwise,
+	 * a positive value for Page-selective-within-PASID invalidation.
+	 * 0 is not a valid input.
+	 */
+	if (WARN_ON(!npages)) {
+		pr_err("Invalid input npages = %ld\n", npages);
+		return;
+	}
+
+	if (npages == -1) {
+		desc.qw0 = QI_EIOTLB_PASID(pasid) |
+				QI_EIOTLB_DID(did) |
+				QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+				QI_EIOTLB_TYPE;
+		desc.qw1 = 0;
+	} else {
+		int mask = ilog2(__roundup_pow_of_two(npages));
+		unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
+
+		if (WARN_ON_ONCE(!ALIGN(addr, align)))
+			addr &= ~(align - 1);
+
+		desc.qw0 = QI_EIOTLB_PASID(pasid) |
+				QI_EIOTLB_DID(did) |
+				QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+				QI_EIOTLB_TYPE;
+		desc.qw1 = QI_EIOTLB_ADDR(addr) |
+				QI_EIOTLB_IH(ih) |
+				QI_EIOTLB_AM(mask);
+	}
+
+	qi_submit_sync(&desc, iommu);
+}
+
 /*
  * Disable Queued Invalidation interface.
  */
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 071cbc172ce8..54db6bc0b281 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1509,6 +1509,20 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
 	spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
+static void domain_flush_piotlb(struct intel_iommu *iommu,
+				struct dmar_domain *domain,
+				u64 addr, unsigned long npages, bool ih)
+{
+	u16 did = domain->iommu_did[iommu->seq_id];
+
+	if (domain->default_pasid)
+		qi_flush_piotlb(iommu, did, domain->default_pasid,
+				addr, npages, ih);
+
+	if (!list_empty(&domain->devices))
+		qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
+}
+
 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 				  struct dmar_domain *domain,
 				  unsigned long pfn, unsigned int pages,
@@ -1522,18 +1536,23 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
 
 	if (ih)
 		ih = 1 << 6;
-	/*
-	 * Fallback to domain selective flush if no PSI support or the size is
-	 * too big.
-	 * PSI requires page size to be 2 ^ x, and the base address is naturally
-	 * aligned to the size
-	 */
-	if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
-		iommu->flush.flush_iotlb(iommu, did, 0, 0,
-						DMA_TLB_DSI_FLUSH);
-	else
-		iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
-						DMA_TLB_PSI_FLUSH);
+
+	if (domain_use_first_level(domain)) {
+		domain_flush_piotlb(iommu, domain, addr, pages, ih);
+	} else {
+		/*
+		 * Fallback to domain selective flush if no PSI support or
+		 * the size is too big. PSI requires page size to be 2 ^ x,
+		 * and the base address is naturally aligned to the size.
+		 */
+		if (!cap_pgsel_inv(iommu->cap) ||
+		    mask > cap_max_amask_val(iommu->cap))
+			iommu->flush.flush_iotlb(iommu, did, 0, 0,
+							DMA_TLB_DSI_FLUSH);
+		else
+			iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+							DMA_TLB_PSI_FLUSH);
+	}
 
 	/*
 	 * In caching mode, changes of pages from non-present to present require
@@ -1548,8 +1567,11 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu,
 					struct dmar_domain *domain,
 					unsigned long pfn, unsigned int pages)
 {
-	/* It's a non-present to present mapping. Only flush if caching mode */
-	if (cap_caching_mode(iommu->cap))
+	/*
+	 * It's a non-present to present mapping. Only flush if caching mode
+	 * and second level.
+	 */
+	if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
 		iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
 	else
 		iommu_flush_write_buffer(iommu);
@@ -1566,7 +1588,11 @@ static void iommu_flush_iova(struct iova_domain *iovad)
 		struct intel_iommu *iommu = g_iommus[idx];
 		u16 did = domain->iommu_did[iommu->seq_id];
 
-		iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+		if (domain_use_first_level(domain))
+			domain_flush_piotlb(iommu, domain, 0, -1, 0);
+		else
+			iommu->flush.flush_iotlb(iommu, did, 0, 0,
+						 DMA_TLB_DSI_FLUSH);
 
 		if (!cap_caching_mode(iommu->cap))
 			iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 454c69712131..3a4708a8a414 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -650,6 +650,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 			  unsigned int size_order, u64 type);
 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 			u16 qdep, u64 addr, unsigned mask);
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+		     unsigned long npages, bool ih);
 extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
 extern int dmar_ir_support(void);
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2020-01-02  0:19 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-02  0:18 [PULL REQUEST] iommu/vt-d: patches for v5.6 Lu Baolu
2020-01-02  0:18 ` [PATCH 01/22] iommu/vt-d: Add Kconfig option to enable/disable scalable mode Lu Baolu
2020-01-02  0:18 ` [PATCH 02/22] iommu/vt-d: Fix CPU and IOMMU SVM feature matching checks Lu Baolu
2020-01-02  0:18 ` [PATCH 03/22] iommu/vt-d: Match CPU and IOMMU paging mode Lu Baolu
2020-01-02  0:18 ` [PATCH 04/22] iommu/vt-d: Reject SVM bind for failed capability check Lu Baolu
2020-01-02  0:18 ` [PATCH 05/22] iommu/vt-d: Avoid duplicated code for PASID setup Lu Baolu
2020-01-02  0:18 ` [PATCH 06/22] iommu/vt-d: Fix off-by-one in PASID allocation Lu Baolu
2020-01-02  0:18 ` [PATCH 07/22] iommu/vt-d: Replace Intel specific PASID allocator with IOASID Lu Baolu
2020-01-02  0:18 ` [PATCH 08/22] iommu/vt-d: Avoid sending invalid page response Lu Baolu
2020-01-02  0:18 ` [PATCH 09/22] iommu/vt-d: Misc macro clean up for SVM Lu Baolu
2020-01-02  0:18 ` [PATCH 10/22] iommu/vt-d: trace: Extend map_sg trace event Lu Baolu
2020-01-02  0:18 ` [PATCH 11/22] iommu/vt-d: Avoid iova flush queue in strict mode Lu Baolu
2020-01-02  0:18 ` [PATCH 12/22] iommu/vt-d: Loose requirement for flush queue initializaton Lu Baolu
2020-01-02  0:18 ` [PATCH 13/22] iommu/vt-d: Identify domains using first level page table Lu Baolu
2020-01-02  0:18 ` [PATCH 14/22] iommu/vt-d: Add set domain DOMAIN_ATTR_NESTING attr Lu Baolu
2020-01-02  0:18 ` [PATCH 15/22] iommu/vt-d: Add PASID_FLAG_FL5LP for first-level pasid setup Lu Baolu
2020-01-02  0:18 ` [PATCH 16/22] iommu/vt-d: Setup pasid entries for iova over first level Lu Baolu
2020-01-02  0:18 ` Lu Baolu [this message]
2020-01-02  0:18 ` [PATCH 18/22] iommu/vt-d: Make first level IOVA canonical Lu Baolu
2020-01-02  0:18 ` [PATCH 19/22] iommu/vt-d: Update first level super page capability Lu Baolu
2020-01-02  0:18 ` [PATCH 20/22] iommu/vt-d: Use iova over first level Lu Baolu
2020-01-02  0:18 ` [PATCH 21/22] iommu/vt-d: debugfs: Add support to show page table internals Lu Baolu
2020-01-02  0:18 ` [PATCH 22/22] iommu/vt-d: Add a quirk flag for scope mismatched devices Lu Baolu
2020-01-02  2:11   ` Roland Dreier via iommu
2020-01-02  2:14     ` Lu Baolu
2020-01-02  2:25       ` Roland Dreier via iommu
2020-01-02  2:34         ` Lu Baolu
2020-01-03  0:32         ` Lu Baolu
2020-01-04 16:52           ` Roland Dreier via iommu
2020-01-05  3:43             ` Lu Baolu
2020-01-06 17:05         ` Jerry Snitselaar
2020-01-07  0:35           ` Lu Baolu
2020-01-07  1:30             ` Jerry Snitselaar
2020-01-07  1:47               ` Lu Baolu
2020-01-09  0:12                 ` Roland Dreier via iommu
2020-01-08 14:16   ` Christoph Hellwig
2020-01-08 23:28     ` Lu Baolu
2020-01-09  7:06       ` Christoph Hellwig
2020-01-09  8:53         ` Lu Baolu
2020-01-09  8:56           ` 答复: " Jim,Yan
2020-01-07 13:06 ` [PULL REQUEST] iommu/vt-d: patches for v5.6 Joerg Roedel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200102001823.21976-18-baolu.lu@linux.intel.com \
    --to=baolu.lu@linux.intel.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.