All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leonardo Bras <leobras.c@gmail.com>
To: Michael Ellerman <mpe@ellerman.id.au>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Alexey Kardashevskiy <aik@ozlabs.ru>,
	Christophe Leroy <christophe.leroy@c-s.fr>,
	Leonardo Bras <leobras.c@gmail.com>,
	Joel Stanley <joel@jms.id.au>,
	Thiago Jung Bauermann <bauerman@linux.ibm.com>,
	Ram Pai <linuxram@us.ibm.com>,
	Brian King <brking@linux.vnet.ibm.com>,
	Murilo Fossa Vicentini <muvic@linux.ibm.com>,
	David Dai <zdai@linux.vnet.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: [PATCH v1 01/10] powerpc/pseries/iommu: Replace hard-coded page shift
Date: Mon, 17 Aug 2020 20:40:24 -0300	[thread overview]
Message-ID: <20200817234033.442511-2-leobras.c@gmail.com> (raw)
In-Reply-To: <20200817234033.442511-1-leobras.c@gmail.com>

Some functions assume IOMMU page size can only be 4K (pageshift == 12).
Update them to accept any page size passed, so we can use 64K pages.

In the process, some defines like TCE_SHIFT were made obsolete, and then
removed. TCE_RPN_MASK was updated to generate a mask according to
the pageshift used.

Most places had a tbl struct, so using tbl->it_page_shift was simple.
tce_free_pSeriesLP() was a special case, since callers not always have a
tbl struct, so adding a tceshift parameter seems the right thing to do.

Signed-off-by: Leonardo Bras <leobras.c@gmail.com>
---
 arch/powerpc/include/asm/tce.h         | 10 ++----
 arch/powerpc/platforms/pseries/iommu.c | 42 ++++++++++++++++----------
 2 files changed, 28 insertions(+), 24 deletions(-)

diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index db5fc2f2262d..971cba2d87cc 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -19,15 +19,9 @@
 #define TCE_VB			0
 #define TCE_PCI			1
 
-/* TCE page size is 4096 bytes (1 << 12) */
-
-#define TCE_SHIFT	12
-#define TCE_PAGE_SIZE	(1 << TCE_SHIFT)
-
 #define TCE_ENTRY_SIZE		8		/* each TCE is 64 bits */
-
-#define TCE_RPN_MASK		0xfffffffffful  /* 40-bit RPN (4K pages) */
-#define TCE_RPN_SHIFT		12
+#define TCE_RPN_BITS		52		/* Bits 0-51 represent RPN on TCE */
+#define TCE_RPN_MASK(ps)	((1ul << (TCE_RPN_BITS - (ps))) - 1)
 #define TCE_VALID		0x800		/* TCE valid */
 #define TCE_ALLIO		0x400		/* TCE valid for all lpars */
 #define TCE_PCI_WRITE		0x2		/* write from PCI allowed */
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index e4198700ed1a..8fe23b7dff3a 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -107,6 +107,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
 	u64 proto_tce;
 	__be64 *tcep;
 	u64 rpn;
+	const unsigned long tceshift = tbl->it_page_shift;
+	const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
+	const u64 rpn_mask = TCE_RPN_MASK(tceshift);
 
 	proto_tce = TCE_PCI_READ; // Read allowed
 
@@ -117,10 +120,10 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
 
 	while (npages--) {
 		/* can't move this out since we might cross MEMBLOCK boundary */
-		rpn = __pa(uaddr) >> TCE_SHIFT;
-		*tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
+		rpn = __pa(uaddr) >> tceshift;
+		*tcep = cpu_to_be64(proto_tce | (rpn & rpn_mask) << tceshift);
 
-		uaddr += TCE_PAGE_SIZE;
+		uaddr += pagesize;
 		tcep++;
 	}
 	return 0;
@@ -146,7 +149,7 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
 	return be64_to_cpu(*tcep);
 }
 
-static void tce_free_pSeriesLP(unsigned long liobn, long, long);
+static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
 
 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
@@ -159,6 +162,7 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
 	u64 rpn;
 	int ret = 0;
 	long tcenum_start = tcenum, npages_start = npages;
+	const u64 rpn_mask = TCE_RPN_MASK(tceshift);
 
 	rpn = __pa(uaddr) >> tceshift;
 	proto_tce = TCE_PCI_READ;
@@ -166,12 +170,12 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
 		proto_tce |= TCE_PCI_WRITE;
 
 	while (npages--) {
-		tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
+		tce = proto_tce | (rpn & rpn_mask) << tceshift;
 		rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
 
 		if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
 			ret = (int)rc;
-			tce_free_pSeriesLP(liobn, tcenum_start,
+			tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
 			                   (npages_start - (npages + 1)));
 			break;
 		}
@@ -205,10 +209,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	long tcenum_start = tcenum, npages_start = npages;
 	int ret = 0;
 	unsigned long flags;
+	const unsigned long tceshift = tbl->it_page_shift;
+	const u64 rpn_mask = TCE_RPN_MASK(tceshift);
 
 	if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
 		return tce_build_pSeriesLP(tbl->it_index, tcenum,
-					   tbl->it_page_shift, npages, uaddr,
+					   tceshift, npages, uaddr,
 		                           direction, attrs);
 	}
 
@@ -225,13 +231,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 		if (!tcep) {
 			local_irq_restore(flags);
 			return tce_build_pSeriesLP(tbl->it_index, tcenum,
-					tbl->it_page_shift,
+					tceshift,
 					npages, uaddr, direction, attrs);
 		}
 		__this_cpu_write(tce_page, tcep);
 	}
 
-	rpn = __pa(uaddr) >> TCE_SHIFT;
+	rpn = __pa(uaddr) >> tceshift;
 	proto_tce = TCE_PCI_READ;
 	if (direction != DMA_TO_DEVICE)
 		proto_tce |= TCE_PCI_WRITE;
@@ -245,12 +251,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 		limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
 
 		for (l = 0; l < limit; l++) {
-			tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
+			tcep[l] = cpu_to_be64(proto_tce | (rpn & rpn_mask) << tceshift);
 			rpn++;
 		}
 
 		rc = plpar_tce_put_indirect((u64)tbl->it_index,
-					    (u64)tcenum << 12,
+					    (u64)tcenum << tceshift,
 					    (u64)__pa(tcep),
 					    limit);
 
@@ -277,12 +283,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 	return ret;
 }
 
-static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
+static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+			       long npages)
 {
 	u64 rc;
 
 	while (npages--) {
-		rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
+		rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
 
 		if (rc && printk_ratelimit()) {
 			printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
@@ -301,9 +308,11 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
 	u64 rc;
 
 	if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
-		return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
+		return tce_free_pSeriesLP(tbl->it_index, tcenum,
+					  tbl->it_page_shift, npages);
 
-	rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
+	rc = plpar_tce_stuff((u64)tbl->it_index,
+			     (u64)tcenum << tbl->it_page_shift, 0, npages);
 
 	if (rc && printk_ratelimit()) {
 		printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
@@ -319,7 +328,8 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
 	u64 rc;
 	unsigned long tce_ret;
 
-	rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
+	rc = plpar_tce_get((u64)tbl->it_index,
+			   (u64)tcenum << tbl->it_page_shift, &tce_ret);
 
 	if (rc && printk_ratelimit()) {
 		printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
-- 
2.25.4


  reply	other threads:[~2020-08-17 23:41 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-17 23:40 [PATCH v1 00/10] DDW indirect mapping Leonardo Bras
2020-08-17 23:40 ` Leonardo Bras [this message]
2020-08-22  9:33   ` [PATCH v1 01/10] powerpc/pseries/iommu: Replace hard-coded page shift Alexey Kardashevskiy
2020-08-27 15:32     ` Leonardo Bras
2020-08-27 15:32       ` Leonardo Bras
2020-08-28  2:27       ` Alexey Kardashevskiy
2020-08-28 19:55         ` Leonardo Bras
2020-08-28 19:55           ` Leonardo Bras
2020-08-31  0:06           ` Alexey Kardashevskiy
2020-08-31  1:41             ` Oliver O'Halloran
2020-08-31  1:41               ` Oliver O'Halloran
2020-08-31  3:48               ` Alexey Kardashevskiy
2020-08-31  3:48                 ` Alexey Kardashevskiy
2020-09-01 21:38                 ` Leonardo Bras
2020-09-01 21:38                   ` Leonardo Bras
2020-09-03  4:26                   ` Alexey Kardashevskiy
2020-09-03  4:26                     ` Alexey Kardashevskiy
2020-08-17 23:40 ` [PATCH v1 02/10] powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE on iommu_*_coherent() Leonardo Bras
2020-08-22 10:07   ` Alexey Kardashevskiy
2020-08-27 16:51     ` Leonardo Bras
2020-08-27 16:51       ` Leonardo Bras
2020-08-28  1:40       ` Alexey Kardashevskiy
2020-08-28 20:41         ` Leonardo Bras
2020-08-28 20:41           ` Leonardo Bras
2020-08-31  0:47           ` Alexey Kardashevskiy
2020-09-01 22:34             ` Leonardo Bras
2020-09-01 22:34               ` Leonardo Bras
2020-09-03  4:41               ` Alexey Kardashevskiy
2020-09-04  6:04                 ` Leonardo Bras
2020-09-04  6:04                   ` Leonardo Bras
2020-09-08  3:18                   ` Alexey Kardashevskiy
2020-08-17 23:40 ` [PATCH v1 03/10] powerpc/kernel/iommu: Use largepool as a last resort when !largealloc Leonardo Bras
2020-08-22 10:09   ` Alexey Kardashevskiy
2020-08-27 16:58     ` Leonardo Bras
2020-08-27 16:58       ` Leonardo Bras
2020-08-17 23:40 ` [PATCH v1 04/10] powerpc/kernel/iommu: Add new iommu_table_in_use() helper Leonardo Bras
2020-08-22 10:34   ` Alexey Kardashevskiy
2020-08-27 18:34     ` Leonardo Bras
2020-08-27 18:34       ` Leonardo Bras
2020-08-28  1:51       ` Alexey Kardashevskiy
2020-08-17 23:40 ` [PATCH v1 05/10] powerpc/pseries/iommu: Add iommu_pseries_alloc_table() helper Leonardo Bras
2020-08-24  0:38   ` Alexey Kardashevskiy
2020-08-27 21:23     ` Leonardo Bras
2020-08-27 21:23       ` Leonardo Bras
2020-08-17 23:40 ` [PATCH v1 06/10] powerpc/pseries/iommu: Add ddw_list_add() helper Leonardo Bras
2020-08-24  3:46   ` Alexey Kardashevskiy
2020-08-27 22:11     ` Leonardo Bras
2020-08-27 22:11       ` Leonardo Bras
2020-08-28  1:58       ` Alexey Kardashevskiy
2020-08-28 21:28         ` Leonardo Bras
2020-08-28 21:28           ` Leonardo Bras
2020-08-17 23:40 ` [PATCH v1 07/10] powerpc/pseries/iommu: Allow DDW windows starting at 0x00 Leonardo Bras
2020-08-24  3:44   ` Alexey Kardashevskiy
2020-08-28 14:04     ` Leonardo Bras
2020-08-28 14:04       ` Leonardo Bras
2020-08-31  0:50       ` Alexey Kardashevskiy
2020-08-17 23:40 ` [PATCH v1 08/10] powerpc/pseries/iommu: Add ddw_property_create() and refactor enable_ddw() Leonardo Bras
2020-08-24  5:07   ` Alexey Kardashevskiy
2020-08-28 15:25     ` Leonardo Bras
2020-08-28 15:25       ` Leonardo Bras
2020-08-31  4:34       ` Alexey Kardashevskiy
2020-09-02  5:27         ` Leonardo Bras
2020-09-02  5:27           ` Leonardo Bras
2020-08-17 23:40 ` [PATCH v1 09/10] powerpc/pseries/iommu: Make use of DDW even if it does not map the partition Leonardo Bras
2020-08-24  5:17   ` Alexey Kardashevskiy
2020-08-28 18:36     ` Leonardo Bras
2020-08-28 18:36       ` Leonardo Bras
2020-08-31  4:35       ` Alexey Kardashevskiy
2020-09-02  6:11         ` Leonardo Bras
2020-09-02  6:11           ` Leonardo Bras
2020-09-04  1:00           ` Alexey Kardashevskiy
2020-08-17 23:40 ` [PATCH v1 10/10] powerpc/pseries/iommu: Rename "direct window" to "dma window" Leonardo Bras

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200817234033.442511-2-leobras.c@gmail.com \
    --to=leobras.c@gmail.com \
    --cc=aik@ozlabs.ru \
    --cc=bauerman@linux.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=brking@linux.vnet.ibm.com \
    --cc=christophe.leroy@c-s.fr \
    --cc=joel@jms.id.au \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=linuxram@us.ibm.com \
    --cc=mpe@ellerman.id.au \
    --cc=muvic@linux.ibm.com \
    --cc=paulus@samba.org \
    --cc=zdai@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.