All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>
To: will.deacon-5wv7dgnIgG8@public.gmane.org,
	joro-zLv9SwRftAIdnm+yROfE0A@public.gmane.org
Cc: iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	brian.starkey-5wv7dgnIgG8@public.gmane.org,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	tchalamarla-M3mlKVOIwJVv6pq1l3V1OdBPR1lH4CV8@public.gmane.org
Subject: [PATCH 6/7] iommu/arm-smmu: Decouple context format from kernel config
Date: Wed, 13 Apr 2016 18:13:02 +0100	[thread overview]
Message-ID: <173006777218859d1671ae517c70592c6c02f630.1460391217.git.robin.murphy@arm.com> (raw)
In-Reply-To: <cover.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>

The way the driver currently forces an AArch32 or AArch64 context format
based on the kernel config and SMMU architecture version is suboptimal,
in that it makes it very hard to support oddball mix-and-match cases
like the SMMUv1 64KB supplement, or situations where the reduced table
depth of an AArch32 short descriptor context may be desirable under an
AArch64 kernel. It also only happens to work on current implementations
which do support all the relevant formats.

Introduce an explicit notion of context format, so we can manage that
independently and get rid of the inflexible #ifdeffery.

Signed-off-by: Robin Murphy <robin.murphy-5wv7dgnIgG8@public.gmane.org>
---
 drivers/iommu/arm-smmu.c | 98 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 80 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index acff332..1d4285f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -117,6 +117,8 @@
 #define ID0_NTS				(1 << 28)
 #define ID0_SMS				(1 << 27)
 #define ID0_ATOSNS			(1 << 26)
+#define ID0_PTFS_NO_AARCH32		(1 << 25)
+#define ID0_PTFS_NO_AARCH32S		(1 << 24)
 #define ID0_CTTW			(1 << 14)
 #define ID0_NUMIRPT_SHIFT		16
 #define ID0_NUMIRPT_MASK		0xff
@@ -317,6 +319,11 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_TRANS_NESTED	(1 << 4)
 #define ARM_SMMU_FEAT_TRANS_OPS		(1 << 5)
 #define ARM_SMMU_FEAT_VMID16		(1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K	(1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K	(1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K	(1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L	(1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S	(1 << 11)
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -346,10 +353,18 @@ struct arm_smmu_device {
 	u32				cavium_id_base; /* Specific to Cavium */
 };
 
+enum arm_smmu_context_fmt {
+	ARM_SMMU_CTX_FMT_NONE,
+	ARM_SMMU_CTX_FMT_AARCH64,
+	ARM_SMMU_CTX_FMT_AARCH32_L,
+	ARM_SMMU_CTX_FMT_AARCH32_S,
+};
+
 struct arm_smmu_cfg {
 	u8				cbndx;
 	u8				irptndx;
 	u32				cbar;
+	enum arm_smmu_context_fmt	fmt;
 };
 #define INVALID_IRPTNDX			0xff
 
@@ -619,14 +634,13 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
-		if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
 			iova &= ~12UL;
 			iova |= ARM_SMMU_CB_ASID(smmu, cfg);
 			do {
 				writel_relaxed(iova, reg);
 				iova += granule;
 			} while (size -= granule);
-#ifdef CONFIG_64BIT
 		} else {
 			iova >>= 12;
 			iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
@@ -634,9 +648,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 				writeq_relaxed(iova, reg);
 				iova += granule >> 12;
 			} while (size -= granule);
-#endif
 		}
-#ifdef CONFIG_64BIT
 	} else if (smmu->version == ARM_SMMU_V2) {
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
@@ -646,7 +658,6 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 			smmu_write_atomic_lq(iova, reg);
 			iova += granule >> 12;
 		} while (size -= granule);
-#endif
 	} else {
 		reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
 		writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
@@ -745,11 +756,10 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 
 	if (smmu->version > ARM_SMMU_V1) {
-#ifdef CONFIG_64BIT
-		reg = CBA2R_RW64_64BIT;
-#else
-		reg = CBA2R_RW64_32BIT;
-#endif
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+			reg = CBA2R_RW64_64BIT;
+		else
+			reg = CBA2R_RW64_32BIT;
 		/* 16-bit VMIDs live in CBA2R */
 		if (smmu->features & ARM_SMMU_FEAT_VMID16)
 			reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
@@ -860,16 +870,48 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
 
+	/*
+	 * Choosing a suitable context format is even more fiddly. Until we
+	 * grow some way for the caller to express a preference, just aim for
+	 * the closest match to the CPU format out of what we might have.
+	 */
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
+		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+	if (IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+		switch (PAGE_SIZE) {
+		case SZ_64K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) {
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+				break;
+			} /* else fall through */
+		case SZ_16K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) {
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+				break;
+			} /* else fall through */
+		case SZ_4K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_4K)
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+		}
+	}
+	if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	switch (smmu_domain->stage) {
 	case ARM_SMMU_DOMAIN_S1:
 		cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
 		start = smmu->num_s2_context_banks;
 		ias = smmu->va_size;
 		oas = smmu->ipa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S1;
+			ias = min(ias, 32UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
 		/*
@@ -881,10 +923,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 		start = 0;
 		ias = smmu->ipa_size;
 		oas = smmu->pa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S2;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S2;
+			ias = min(ias, 40UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	default:
 		ret = -EINVAL;
@@ -1670,6 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 					   ID0_NUMSIDB_MASK;
 	}
 
+	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+		if (!(id & ID0_PTFS_NO_AARCH32S))
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
+	}
+
 	/* ID1 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
 	smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
@@ -1725,7 +1776,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
 	if (smmu->version == ARM_SMMU_V1) {
 		smmu->va_size = smmu->ipa_size;
-		size = SZ_4K | SZ_2M | SZ_1G;
 	} else {
 		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
 		smmu->va_size = arm_smmu_id_size_to_bits(size);
@@ -1734,13 +1784,25 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 #endif
 		size = 0;
 		if (id & ID2_PTFS_4K)
-			size |= SZ_4K | SZ_2M | SZ_1G;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
 		if (id & ID2_PTFS_16K)
-			size |= SZ_16K | SZ_32M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
 		if (id & ID2_PTFS_64K)
-			size |= SZ_64K | SZ_512M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	}
 
+	/* Now we've corralled the various formats, what'll it do? */
+	size = 0;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
+		size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+	if (smmu->features &
+	    (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
+		size |= SZ_4K | SZ_2M | SZ_1G;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
+		size |= SZ_16K | SZ_32M;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+		size |= SZ_64K | SZ_512M;
+
 	arm_smmu_ops.pgsize_bitmap &= size;
 	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
 
-- 
2.7.3.dirty

WARNING: multiple messages have this Message-ID (diff)
From: robin.murphy@arm.com (Robin Murphy)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 6/7] iommu/arm-smmu: Decouple context format from kernel config
Date: Wed, 13 Apr 2016 18:13:02 +0100	[thread overview]
Message-ID: <173006777218859d1671ae517c70592c6c02f630.1460391217.git.robin.murphy@arm.com> (raw)
In-Reply-To: <cover.1460391217.git.robin.murphy@arm.com>

The way the driver currently forces an AArch32 or AArch64 context format
based on the kernel config and SMMU architecture version is suboptimal,
in that it makes it very hard to support oddball mix-and-match cases
like the SMMUv1 64KB supplement, or situations where the reduced table
depth of an AArch32 short descriptor context may be desirable under an
AArch64 kernel. It also only happens to work on current implementations
which do support all the relevant formats.

Introduce an explicit notion of context format, so we can manage that
independently and get rid of the inflexible #ifdeffery.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
---
 drivers/iommu/arm-smmu.c | 98 +++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 80 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index acff332..1d4285f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -117,6 +117,8 @@
 #define ID0_NTS				(1 << 28)
 #define ID0_SMS				(1 << 27)
 #define ID0_ATOSNS			(1 << 26)
+#define ID0_PTFS_NO_AARCH32		(1 << 25)
+#define ID0_PTFS_NO_AARCH32S		(1 << 24)
 #define ID0_CTTW			(1 << 14)
 #define ID0_NUMIRPT_SHIFT		16
 #define ID0_NUMIRPT_MASK		0xff
@@ -317,6 +319,11 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_TRANS_NESTED	(1 << 4)
 #define ARM_SMMU_FEAT_TRANS_OPS		(1 << 5)
 #define ARM_SMMU_FEAT_VMID16		(1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K	(1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K	(1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K	(1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L	(1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S	(1 << 11)
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -346,10 +353,18 @@ struct arm_smmu_device {
 	u32				cavium_id_base; /* Specific to Cavium */
 };
 
+enum arm_smmu_context_fmt {
+	ARM_SMMU_CTX_FMT_NONE,
+	ARM_SMMU_CTX_FMT_AARCH64,
+	ARM_SMMU_CTX_FMT_AARCH32_L,
+	ARM_SMMU_CTX_FMT_AARCH32_S,
+};
+
 struct arm_smmu_cfg {
 	u8				cbndx;
 	u8				irptndx;
 	u32				cbar;
+	enum arm_smmu_context_fmt	fmt;
 };
 #define INVALID_IRPTNDX			0xff
 
@@ -619,14 +634,13 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
 
-		if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+		if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
 			iova &= ~12UL;
 			iova |= ARM_SMMU_CB_ASID(smmu, cfg);
 			do {
 				writel_relaxed(iova, reg);
 				iova += granule;
 			} while (size -= granule);
-#ifdef CONFIG_64BIT
 		} else {
 			iova >>= 12;
 			iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
@@ -634,9 +648,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 				writeq_relaxed(iova, reg);
 				iova += granule >> 12;
 			} while (size -= granule);
-#endif
 		}
-#ifdef CONFIG_64BIT
 	} else if (smmu->version == ARM_SMMU_V2) {
 		reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
@@ -646,7 +658,6 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 			smmu_write_atomic_lq(iova, reg);
 			iova += granule >> 12;
 		} while (size -= granule);
-#endif
 	} else {
 		reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
 		writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
@@ -745,11 +756,10 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 
 	if (smmu->version > ARM_SMMU_V1) {
-#ifdef CONFIG_64BIT
-		reg = CBA2R_RW64_64BIT;
-#else
-		reg = CBA2R_RW64_32BIT;
-#endif
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+			reg = CBA2R_RW64_64BIT;
+		else
+			reg = CBA2R_RW64_32BIT;
 		/* 16-bit VMIDs live in CBA2R */
 		if (smmu->features & ARM_SMMU_FEAT_VMID16)
 			reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
@@ -860,16 +870,48 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
 		smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
 
+	/*
+	 * Choosing a suitable context format is even more fiddly. Until we
+	 * grow some way for the caller to express a preference, just aim for
+	 * the closest match to the CPU format out of what we might have.
+	 */
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
+		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+	if (IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+		switch (PAGE_SIZE) {
+		case SZ_64K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) {
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+				break;
+			} /* else fall through */
+		case SZ_16K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) {
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+				break;
+			} /* else fall through */
+		case SZ_4K:
+			if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_4K)
+				cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+		}
+	}
+	if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	switch (smmu_domain->stage) {
 	case ARM_SMMU_DOMAIN_S1:
 		cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
 		start = smmu->num_s2_context_banks;
 		ias = smmu->va_size;
 		oas = smmu->ipa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S1;
+			ias = min(ias, 32UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
 		/*
@@ -881,10 +923,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 		start = 0;
 		ias = smmu->ipa_size;
 		oas = smmu->pa_size;
-		if (IS_ENABLED(CONFIG_64BIT))
+		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S2;
-		else
+		} else {
 			fmt = ARM_32_LPAE_S2;
+			ias = min(ias, 40UL);
+			oas = min(oas, 40UL);
+		}
 		break;
 	default:
 		ret = -EINVAL;
@@ -1670,6 +1715,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 					   ID0_NUMSIDB_MASK;
 	}
 
+	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+		if (!(id & ID0_PTFS_NO_AARCH32S))
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
+	}
+
 	/* ID1 */
 	id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
 	smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
@@ -1725,7 +1776,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
 	if (smmu->version == ARM_SMMU_V1) {
 		smmu->va_size = smmu->ipa_size;
-		size = SZ_4K | SZ_2M | SZ_1G;
 	} else {
 		size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
 		smmu->va_size = arm_smmu_id_size_to_bits(size);
@@ -1734,13 +1784,25 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 #endif
 		size = 0;
 		if (id & ID2_PTFS_4K)
-			size |= SZ_4K | SZ_2M | SZ_1G;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
 		if (id & ID2_PTFS_16K)
-			size |= SZ_16K | SZ_32M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
 		if (id & ID2_PTFS_64K)
-			size |= SZ_64K | SZ_512M;
+			smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
 	}
 
+	/* Now we've corralled the various formats, what'll it do? */
+	size = 0;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
+		size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+	if (smmu->features &
+	    (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
+		size |= SZ_4K | SZ_2M | SZ_1G;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
+		size |= SZ_16K | SZ_32M;
+	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+		size |= SZ_64K | SZ_512M;
+
 	arm_smmu_ops.pgsize_bitmap &= size;
 	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
 
-- 
2.7.3.dirty

  parent reply	other threads:[~2016-04-13 17:13 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-13 17:12 [PATCH 0/7] arm-smmu: Implementation and context format differentiation Robin Murphy
2016-04-13 17:12 ` Robin Murphy
     [not found] ` <cover.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-13 17:12   ` [PATCH 1/7] iommu/arm-smmu: Differentiate specific implementations Robin Murphy
2016-04-13 17:12     ` Robin Murphy
     [not found]     ` <cc1789284c5efa05514231fa3dede9d1d5f2df18.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-13 21:15       ` Chalamarla, Tirumalesh
2016-04-13 21:15         ` Chalamarla, Tirumalesh
2016-04-13 17:12   ` [PATCH 2/7] iommu/arm-smmu: Convert ThunderX workaround to new method Robin Murphy
2016-04-13 17:12     ` Robin Murphy
     [not found]     ` <98b8079ee3ede4427b045214a60ba77f1cb3552c.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-13 21:16       ` Chalamarla, Tirumalesh
2016-04-13 21:16         ` Chalamarla, Tirumalesh
2016-04-13 17:12   ` [PATCH 3/7] iommu/arm-smmu: Work around MMU-500 prefetch errata Robin Murphy
2016-04-13 17:12     ` Robin Murphy
     [not found]     ` <0484444b6257bfb6adb68405a72c64fc4fc98142.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-21 16:15       ` Will Deacon
2016-04-21 16:15         ` Will Deacon
2016-04-21 16:16       ` Will Deacon
2016-04-21 16:16         ` Will Deacon
2016-04-13 17:13   ` [PATCH 4/7] io-64-nonatomic: Add relaxed accessor variants Robin Murphy
2016-04-13 17:13     ` Robin Murphy
     [not found]     ` <44173fd4e8efd27d670cadc6b30e215243a14099.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-21 16:18       ` Will Deacon
2016-04-21 16:18         ` Will Deacon
     [not found]         ` <20160421161859.GK929-5wv7dgnIgG8@public.gmane.org>
2016-04-22 17:08           ` Robin Murphy
2016-04-22 17:08             ` Robin Murphy
     [not found]             ` <571A5A9E.7040305-5wv7dgnIgG8@public.gmane.org>
2016-04-25 13:32               ` Will Deacon
2016-04-25 13:32                 ` Will Deacon
     [not found]                 ` <20160425133242.GC30830-5wv7dgnIgG8@public.gmane.org>
2016-04-25 15:21                   ` Arnd Bergmann
2016-04-25 15:21                     ` Arnd Bergmann
2016-04-25 15:28                     ` Robin Murphy
2016-04-25 15:28                       ` Robin Murphy
     [not found]                       ` <571E3781.3070609-5wv7dgnIgG8@public.gmane.org>
2016-04-25 15:41                         ` Arnd Bergmann
2016-04-25 15:41                           ` Arnd Bergmann
2016-04-25 16:11                           ` Will Deacon
2016-04-25 16:11                             ` Will Deacon
2016-04-25 16:11       ` Arnd Bergmann
2016-04-25 16:11         ` Arnd Bergmann
2016-04-26 10:38       ` [PATCH v2] " Robin Murphy
2016-04-26 10:38         ` Robin Murphy
2016-04-13 17:13   ` [PATCH 5/7] iommu/arm-smmu: Tidy up 64-bit/atomic I/O accesses Robin Murphy
2016-04-13 17:13     ` Robin Murphy
2016-04-13 17:13   ` Robin Murphy [this message]
2016-04-13 17:13     ` [PATCH 6/7] iommu/arm-smmu: Decouple context format from kernel config Robin Murphy
     [not found]     ` <173006777218859d1671ae517c70592c6c02f630.1460391217.git.robin.murphy-5wv7dgnIgG8@public.gmane.org>
2016-04-21 16:30       ` Will Deacon
2016-04-21 16:30         ` Will Deacon
     [not found]         ` <20160421163019.GL929-5wv7dgnIgG8@public.gmane.org>
2016-04-22 17:38           ` Robin Murphy
2016-04-22 17:38             ` Robin Murphy
     [not found]             ` <571A617C.3020102-5wv7dgnIgG8@public.gmane.org>
2016-04-25 11:02               ` Will Deacon
2016-04-25 11:02                 ` Will Deacon
     [not found]                 ` <20160425110219.GH16065-5wv7dgnIgG8@public.gmane.org>
2016-04-25 13:14                   ` Robin Murphy
2016-04-25 13:14                     ` Robin Murphy
     [not found]                     ` <571E1851.2030400-5wv7dgnIgG8@public.gmane.org>
2016-04-25 13:41                       ` Will Deacon
2016-04-25 13:41                         ` Will Deacon
     [not found]                         ` <20160425134108.GD30830-5wv7dgnIgG8@public.gmane.org>
2016-04-25 16:21                           ` Robin Murphy
2016-04-25 16:21                             ` Robin Murphy
2016-04-28 16:12       ` [PATCH v2] " Robin Murphy
2016-04-28 16:12         ` Robin Murphy
2016-04-13 17:13   ` [PATCH 7/7] iommu/arm-smmu: Support SMMUv1 64KB supplement Robin Murphy
2016-04-13 17:13     ` Robin Murphy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=173006777218859d1671ae517c70592c6c02f630.1460391217.git.robin.murphy@arm.com \
    --to=robin.murphy-5wv7dgnigg8@public.gmane.org \
    --cc=brian.starkey-5wv7dgnIgG8@public.gmane.org \
    --cc=iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org \
    --cc=joro-zLv9SwRftAIdnm+yROfE0A@public.gmane.org \
    --cc=linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org \
    --cc=tchalamarla-M3mlKVOIwJVv6pq1l3V1OdBPR1lH4CV8@public.gmane.org \
    --cc=will.deacon-5wv7dgnIgG8@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.