All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy@arm.com>
To: will@kernel.org
Cc: bjorn.andersson@linaro.org, iommu@lists.linux-foundation.org,
	gregory.clement@bootlin.com,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 05/15] iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync()
Date: Fri,  9 Aug 2019 18:07:42 +0100	[thread overview]
Message-ID: <33a49ca158509c95d50b0d3f9cba03bba2facdf3.1565369764.git.robin.murphy@arm.com> (raw)
In-Reply-To: <cover.1565369764.git.robin.murphy@arm.com>

Since we now use separate iommu_gather_ops for stage 1 and stage 2
contexts, we may as well divide up the monolithic callback into its
respective stage 1 and stage 2 parts.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
 drivers/iommu/arm-smmu.c | 66 ++++++++++++++++++++++------------------
 1 file changed, 37 insertions(+), 29 deletions(-)

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 463bc8d98adb..a681e000e704 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -490,46 +490,54 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
 	arm_smmu_tlb_sync_global(smmu);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-					  size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
+				      size_t granule, bool leaf, void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
-	void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+	void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx);
 
-	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
 		wmb();
 
-	if (stage1) {
-		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+	reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
-		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
-			iova &= ~12UL;
-			iova |= cfg->asid;
-			do {
-				writel_relaxed(iova, reg);
-				iova += granule;
-			} while (size -= granule);
-		} else {
-			iova >>= 12;
-			iova |= (u64)cfg->asid << 48;
-			do {
-				writeq_relaxed(iova, reg);
-				iova += granule >> 12;
-			} while (size -= granule);
-		}
-	} else {
-		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
-			      ARM_SMMU_CB_S2_TLBIIPAS2;
-		iova >>= 12;
+	if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
+		iova &= ~12UL;
+		iova |= cfg->asid;
 		do {
-			smmu_write_atomic_lq(iova, reg);
+			writel_relaxed(iova, reg);
+			iova += granule;
+		} while (size -= granule);
+	} else {
+		iova >>= 12;
+		iova |= (u64)cfg->asid << 48;
+		do {
+			writeq_relaxed(iova, reg);
 			iova += granule >> 12;
 		} while (size -= granule);
 	}
 }
 
+static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
+				      size_t granule, bool leaf, void *cookie)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+		wmb();
+
+	reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
+	iova >>= 12;
+	do {
+		smmu_write_atomic_lq(iova, reg);
+		iova += granule >> 12;
+	} while (size -= granule);
+}
+
 /*
  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
  * almost negligible, but the benefit of getting the first one in as far ahead
@@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
 
 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
-	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
+	.tlb_add_flush	= arm_smmu_tlb_inv_range_s1,
 	.tlb_sync	= arm_smmu_tlb_sync_context,
 };
 
 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
-	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
+	.tlb_add_flush	= arm_smmu_tlb_inv_range_s2,
 	.tlb_sync	= arm_smmu_tlb_sync_context,
 };
 
-- 
2.21.0.dirty

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Robin Murphy <robin.murphy@arm.com>
To: will@kernel.org
Cc: robdclark@gmail.com, joro@8bytes.org, bjorn.andersson@linaro.org,
	iommu@lists.linux-foundation.org, vivek.gautam@codeaurora.org,
	gregory.clement@bootlin.com,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 05/15] iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync()
Date: Fri,  9 Aug 2019 18:07:42 +0100	[thread overview]
Message-ID: <33a49ca158509c95d50b0d3f9cba03bba2facdf3.1565369764.git.robin.murphy@arm.com> (raw)
In-Reply-To: <cover.1565369764.git.robin.murphy@arm.com>

Since we now use separate iommu_gather_ops for stage 1 and stage 2
contexts, we may as well divide up the monolithic callback into its
respective stage 1 and stage 2 parts.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
 drivers/iommu/arm-smmu.c | 66 ++++++++++++++++++++++------------------
 1 file changed, 37 insertions(+), 29 deletions(-)

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 463bc8d98adb..a681e000e704 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -490,46 +490,54 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
 	arm_smmu_tlb_sync_global(smmu);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-					  size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
+				      size_t granule, bool leaf, void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
-	void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+	void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx);
 
-	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
 		wmb();
 
-	if (stage1) {
-		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+	reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
-		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
-			iova &= ~12UL;
-			iova |= cfg->asid;
-			do {
-				writel_relaxed(iova, reg);
-				iova += granule;
-			} while (size -= granule);
-		} else {
-			iova >>= 12;
-			iova |= (u64)cfg->asid << 48;
-			do {
-				writeq_relaxed(iova, reg);
-				iova += granule >> 12;
-			} while (size -= granule);
-		}
-	} else {
-		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
-			      ARM_SMMU_CB_S2_TLBIIPAS2;
-		iova >>= 12;
+	if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
+		iova &= ~12UL;
+		iova |= cfg->asid;
 		do {
-			smmu_write_atomic_lq(iova, reg);
+			writel_relaxed(iova, reg);
+			iova += granule;
+		} while (size -= granule);
+	} else {
+		iova >>= 12;
+		iova |= (u64)cfg->asid << 48;
+		do {
+			writeq_relaxed(iova, reg);
 			iova += granule >> 12;
 		} while (size -= granule);
 	}
 }
 
+static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
+				      size_t granule, bool leaf, void *cookie)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+	if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+		wmb();
+
+	reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
+	iova >>= 12;
+	do {
+		smmu_write_atomic_lq(iova, reg);
+		iova += granule >> 12;
+	} while (size -= granule);
+}
+
 /*
  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
  * almost negligible, but the benefit of getting the first one in as far ahead
@@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
 
 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s1,
-	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
+	.tlb_add_flush	= arm_smmu_tlb_inv_range_s1,
 	.tlb_sync	= arm_smmu_tlb_sync_context,
 };
 
 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context_s2,
-	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
+	.tlb_add_flush	= arm_smmu_tlb_inv_range_s2,
 	.tlb_sync	= arm_smmu_tlb_sync_context,
 };
 
-- 
2.21.0.dirty


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2019-08-09 17:08 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-09 17:07 [PATCH 00/15] Arm SMMU refactoring Robin Murphy
2019-08-09 17:07 ` Robin Murphy
2019-08-09 17:07 ` [PATCH 01/15] iommu/arm-smmu: Convert GR0 registers to bitfields Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-14 17:20   ` Will Deacon
2019-08-14 17:20     ` Will Deacon
2019-08-14 17:35     ` Robin Murphy
2019-08-14 17:35       ` Robin Murphy
2019-08-09 17:07 ` [PATCH 02/15] iommu/arm-smmu: Convert GR1 " Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 03/15] iommu/arm-smmu: Convert context bank " Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 04/15] iommu/arm-smmu: Rework cb_base handling Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-14 18:05   ` Will Deacon
2019-08-14 18:05     ` Will Deacon
2019-08-15 11:14     ` Robin Murphy
2019-08-15 11:14       ` Robin Murphy
2019-08-09 17:07 ` Robin Murphy [this message]
2019-08-09 17:07   ` [PATCH 05/15] iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync() Robin Murphy
2019-08-15 10:56   ` Will Deacon
2019-08-15 10:56     ` Will Deacon
2019-08-15 11:22     ` Robin Murphy
2019-08-15 11:22       ` Robin Murphy
2019-08-09 17:07 ` [PATCH 06/15] iommu/arm-smmu: Get rid of weird "atomic" write Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 07/15] iommu/arm-smmu: Abstract GR1 accesses Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 08/15] iommu/arm-smmu: Abstract context bank accesses Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-15 10:56   ` Will Deacon
2019-08-15 10:56     ` Will Deacon
2019-08-15 11:41     ` Robin Murphy
2019-08-15 11:41       ` Robin Murphy
2019-08-09 17:07 ` [PATCH 09/15] iommu/arm-smmu: Abstract GR0 accesses Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 10/15] iommu/arm-smmu: Rename arm-smmu-regs.h Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 11/15] iommu/arm-smmu: Add implementation infrastructure Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 12/15] iommu/arm-smmu: Move Secure access quirk to implementation Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 13/15] iommu/arm-smmu: Add configuration implementation hook Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 14/15] iommu/arm-smmu: Add reset " Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-09 17:07 ` [PATCH 15/15] iommu/arm-smmu: Add context init " Robin Murphy
2019-08-09 17:07   ` Robin Murphy
2019-08-13 19:11   ` Krishna Reddy
2019-08-13 19:11     ` Krishna Reddy
2019-08-15 10:56   ` Will Deacon
2019-08-15 10:56     ` Will Deacon
2019-08-15 12:09     ` Robin Murphy
2019-08-15 12:09       ` Robin Murphy
2019-08-15 15:09       ` Jordan Crouse
2019-08-15 15:09         ` Jordan Crouse
2019-08-09 17:11 ` [PATCH 00/15] Arm SMMU refactoring Robin Murphy
2019-08-09 17:11   ` Robin Murphy
2019-08-15 10:55 ` Will Deacon
2019-08-15 10:55   ` Will Deacon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=33a49ca158509c95d50b0d3f9cba03bba2facdf3.1565369764.git.robin.murphy@arm.com \
    --to=robin.murphy@arm.com \
    --cc=bjorn.andersson@linaro.org \
    --cc=gregory.clement@bootlin.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.