All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Isaac J. Manjarres" <isaacm@codeaurora.org>
To: iommu@lists.linux-foundation.org, linux-arm-kernel@lists.infradead.org
Cc: "Isaac J. Manjarres" <isaacm@codeaurora.org>,
	robin.murphy@arm.com, will@kernel.org, pratikp@codeaurora.org
Subject: [RFC PATCH v3 10/12] iommu/io-pgtable-arm: Implement arm_lpae_map_pages()
Date: Mon,  5 Apr 2021 12:11:10 -0700	[thread overview]
Message-ID: <20210405191112.28192-11-isaacm@codeaurora.org> (raw)
In-Reply-To: <20210405191112.28192-1-isaacm@codeaurora.org>

Implement the map_pages() callback for the ARM LPAE io-pgtable
format.

Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Suggested-by: Will Deacon <will@kernel.org>
---
 drivers/iommu/io-pgtable-arm.c | 95 +++++++++++++++++++++++++++++++---
 1 file changed, 88 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index fc63d57b8037..b8464305f1c2 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,20 +355,35 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 }
 
 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
-			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
-			  int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
+			  phys_addr_t paddr, size_t size, size_t pgcount,
+			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
+			  gfp_t gfp, size_t *mapped)
 {
 	arm_lpae_iopte *cptep, pte;
 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 	size_t tblsz = ARM_LPAE_GRANULE(data);
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+	int ret = 0;
 
 	/* Find our entry at the current level */
 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
-	/* If we can install a leaf entry at this level, then do so */
-	if (size == block_size)
-		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+	/* If we can install leaf entries at this level, then do so */
+	if (size == block_size) {
+		while (pgcount--) {
+			ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+			if (ret)
+				return ret;
+
+			iova += size;
+			paddr += size;
+			ptep++;
+			if (mapped)
+				*mapped += size;
+		}
+
+		return ret;
+	}
 
 	/* We can't allocate tables at the final level */
 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
@@ -397,7 +412,8 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 	}
 
 	/* Rinse, repeat */
-	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
+	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1, cptep,
+			      gfp, mapped);
 }
 
 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -487,7 +503,71 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 		return 0;
 
 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
-	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
+	ret = __arm_lpae_map(data, iova, paddr, size, 1, prot, lvl, ptep, gfp,
+			     NULL);
+	/*
+	 * Synchronise all PTE updates for the new mapping before there's
+	 * a chance for anything to kick off a table walk for the new iova.
+	 */
+	wmb();
+
+	return ret;
+}
+
+static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
+			      int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+	arm_lpae_iopte *ptep = data->pgd;
+	int ret, lvl = data->start_level, last_lvl;
+	arm_lpae_iopte prot;
+	long iaext = (s64)iova >> cfg->ias;
+	size_t table_size, pages, tbl_offset, max_entries;
+
+	/* If no access, then nothing to do */
+	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+		return 0;
+
+	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
+		return -EINVAL;
+
+	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
+		iaext = ~iaext;
+	if (WARN_ON(iaext || paddr >> cfg->oas))
+		return -ERANGE;
+
+	prot = arm_lpae_prot_to_pte(data, iommu_prot);
+
+	/*
+	 * Calculating the page table size here helps avoid situations where
+	 * a page range that is being mapped may be mapped at the same level
+	 * but not mapped by the same tables. Allowing such a scenario to
+	 * occur can complicate the logic in __arm_lpae_map().
+	 */
+	last_lvl = ARM_LPAE_BLOCK_SIZE_LVL(pgsize, data);
+
+	if (last_lvl == data->start_level)
+		table_size = ARM_LPAE_PGD_SIZE(data);
+	else
+		table_size = ARM_LPAE_GRANULE(data);
+
+	max_entries = table_size / sizeof(*ptep);
+
+	while (pgcount) {
+		tbl_offset = ARM_LPAE_LVL_IDX(iova, last_lvl, data);
+		pages = min_t(size_t, pgcount, max_entries - tbl_offset);
+		ret = __arm_lpae_map(data, iova, paddr, pgsize, pages, prot,
+				     lvl, ptep, gfp, mapped);
+		if (ret)
+			break;
+
+		iova += pages * pgsize;
+		paddr += pages * pgsize;
+		pgcount -= pages;
+	}
+
 	/*
 	 * Synchronise all PTE updates for the new mapping before there's
 	 * a chance for anything to kick off a table walk for the new iova.
@@ -833,6 +913,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 
 	data->iop.ops = (struct io_pgtable_ops) {
 		.map		= arm_lpae_map,
+		.map_pages	= arm_lpae_map_pages,
 		.unmap		= arm_lpae_unmap,
 		.unmap_pages	= arm_lpae_unmap_pages,
 		.iova_to_phys	= arm_lpae_iova_to_phys,
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2021-04-05 19:11 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-05 19:11 [RFC PATCH v3 00/12] Optimizing iommu_[map/unmap] performance Isaac J. Manjarres
2021-04-05 19:11 ` [RFC PATCH v3 01/12] iommu/io-pgtable: Introduce unmap_pages() as a page table op Isaac J. Manjarres
2021-04-05 19:11 ` [RFC PATCH v3 02/12] iommu: Add an unmap_pages() op for IOMMU drivers Isaac J. Manjarres
2021-04-06  1:48   ` Lu Baolu
2021-04-06  1:48     ` Lu Baolu
2021-04-05 19:11 ` [RFC PATCH v3 03/12] iommu/io-pgtable: Introduce map_pages() as a page table op Isaac J. Manjarres
2021-04-06 11:57   ` Will Deacon
2021-04-06 11:57     ` Will Deacon
2021-04-06 21:07     ` isaacm
2021-04-07  9:54       ` Will Deacon
2021-04-07  9:54         ` Will Deacon
2021-04-05 19:11 ` [RFC PATCH v3 04/12] iommu: Add a map_pages() op for IOMMU drivers Isaac J. Manjarres
2021-04-06  1:49   ` Lu Baolu
2021-04-06  1:49     ` Lu Baolu
2021-04-06 11:58   ` Will Deacon
2021-04-06 11:58     ` Will Deacon
2021-04-05 19:11 ` [RFC PATCH v3 05/12] iommu: Use bitmap to calculate page size in iommu_pgsize() Isaac J. Manjarres
2021-04-06 11:50   ` Will Deacon
2021-04-06 11:50     ` Will Deacon
2021-04-05 19:11 ` [RFC PATCH v3 06/12] iommu: Split 'addr_merge' argument to iommu_pgsize() into separate parts Isaac J. Manjarres
2021-04-06 11:53   ` Will Deacon
2021-04-06 11:53     ` Will Deacon
2021-04-06 19:38     ` isaacm
2021-04-05 19:11 ` [RFC PATCH v3 07/12] iommu: Hook up '->unmap_pages' driver callback Isaac J. Manjarres
2021-04-05 19:11 ` [RFC PATCH v3 08/12] iommu: Add support for the map_pages() callback Isaac J. Manjarres
2021-04-05 19:11 ` [RFC PATCH v3 09/12] iommu/io-pgtable-arm: Implement arm_lpae_unmap_pages() Isaac J. Manjarres
2021-04-06 12:15   ` Will Deacon
2021-04-06 12:15     ` Will Deacon
2021-04-06 21:02     ` isaacm
2021-04-07  9:57       ` Will Deacon
2021-04-07  9:57         ` Will Deacon
2021-04-08  4:56         ` isaacm
2021-04-05 19:11 ` Isaac J. Manjarres [this message]
2021-04-05 19:11 ` [RFC PATCH v3 11/12] iommu/arm-smmu: Implement the unmap_pages() IOMMU driver callback Isaac J. Manjarres
2021-04-06 12:19   ` Will Deacon
2021-04-06 12:19     ` Will Deacon
2021-04-05 19:11 ` [RFC PATCH v3 12/12] iommu/arm-smmu: Implement the map_pages() " Isaac J. Manjarres

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210405191112.28192-11-isaacm@codeaurora.org \
    --to=isaacm@codeaurora.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=pratikp@codeaurora.org \
    --cc=robin.murphy@arm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.