All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jonathan Marek <jonathan@marek.ca>
To: freedreno@lists.freedesktop.org
Cc: Rob Clark <robdclark@gmail.com>, Sean Paul <sean@poorly.run>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>,
	Jordan Crouse <jcrouse@codeaurora.org>,
	Stephen Boyd <swboyd@chromium.org>,
	Sharat Masetty <smasetty@codeaurora.org>,
	"Michael J. Ruhl" <michael.j.ruhl@intel.com>,
	linux-arm-msm@vger.kernel.org (open list:DRM DRIVER FOR MSM
	ADRENO GPU),
	dri-devel@lists.freedesktop.org (open list:DRM DRIVER FOR MSM
	ADRENO GPU), linux-kernel@vger.kernel.org (open list)
Subject: [PATCH 2/9] Revert "drm/msm/a6xx: Use the DMA API for GMU memory objects"
Date: Mon, 20 Apr 2020 10:03:06 -0400	[thread overview]
Message-ID: <20200420140313.7263-3-jonathan@marek.ca> (raw)
In-Reply-To: <20200420140313.7263-1-jonathan@marek.ca>

This reverts commit a5fb8b918920c6f7706a8b5b8ea535a7f077a7f6.
---
 drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 115 +++++++++++++++++++++++---
 drivers/gpu/drm/msm/adreno/a6xx_gmu.h |   6 +-
 2 files changed, 107 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c4e71abbdd53..748cd379065f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,7 +2,6 @@
 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
 
 #include <linux/clk.h>
-#include <linux/dma-mapping.h>
 #include <linux/interconnect.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
@@ -921,10 +920,21 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
 
 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
 {
+	int count, i;
+	u64 iova;
+
 	if (IS_ERR_OR_NULL(bo))
 		return;
 
-	dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
+	count = bo->size >> PAGE_SHIFT;
+	iova = bo->iova;
+
+	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+		__free_pages(bo->pages[i], 0);
+	}
+
+	kfree(bo->pages);
 	kfree(bo);
 }
 
@@ -932,6 +942,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 		size_t size)
 {
 	struct a6xx_gmu_bo *bo;
+	int ret, count, i;
 
 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 	if (!bo)
@@ -939,14 +950,86 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 
 	bo->size = PAGE_ALIGN(size);
 
-	bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
+	count = bo->size >> PAGE_SHIFT;
 
-	if (!bo->virt) {
+	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+	if (!bo->pages) {
 		kfree(bo);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	for (i = 0; i < count; i++) {
+		bo->pages[i] = alloc_page(GFP_KERNEL);
+		if (!bo->pages[i])
+			goto err;
+	}
+
+	bo->iova = gmu->uncached_iova_base;
+
+	for (i = 0; i < count; i++) {
+		ret = iommu_map(gmu->domain,
+			bo->iova + (PAGE_SIZE * i),
+			page_to_phys(bo->pages[i]), PAGE_SIZE,
+			IOMMU_READ | IOMMU_WRITE);
+
+		if (ret) {
+			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
+
+			for (i = i - 1 ; i >= 0; i--)
+				iommu_unmap(gmu->domain,
+					bo->iova + (PAGE_SIZE * i),
+					PAGE_SIZE);
+
+			goto err;
+		}
+	}
+
+	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+		pgprot_writecombine(PAGE_KERNEL));
+	if (!bo->virt)
+		goto err;
+
+	/* Align future IOVA addresses on 1MB boundaries */
+	gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
 	return bo;
+
+err:
+	for (i = 0; i < count; i++) {
+		if (bo->pages[i])
+			__free_pages(bo->pages[i], 0);
+	}
+
+	kfree(bo->pages);
+	kfree(bo);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+	int ret;
+
+	/*
+	 * The GMU address space is hardcoded to treat the range
+	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+	 * between the GMU and the CPU will live in this space
+	 */
+	gmu->uncached_iova_base = 0x60000000;
+
+
+	gmu->domain = iommu_domain_alloc(&platform_bus_type);
+	if (!gmu->domain)
+		return -ENODEV;
+
+	ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+	if (ret) {
+		iommu_domain_free(gmu->domain);
+		gmu->domain = NULL;
+	}
+
+	return ret;
 }
 
 /* Return the 'arc-level' for the given frequency */
@@ -1206,6 +1289,10 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
 
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
+	iommu_detach_device(gmu->domain, gmu->dev);
+
+	iommu_domain_free(gmu->domain);
+
 	free_irq(gmu->gmu_irq, gmu);
 	free_irq(gmu->hfi_irq, gmu);
 
@@ -1226,15 +1313,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 
 	gmu->dev = &pdev->dev;
 
-	/* Pass force_dma false to require the DT to set the dma region */
-	ret = of_dma_configure(gmu->dev, node, false);
-	if (ret)
-		return ret;
-
-	/* Set the mask after the of_dma_configure() */
-	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
-	if (ret)
-		return ret;
+	of_dma_configure(gmu->dev, node, true);
 
 	/* Fow now, don't do anything fancy until we get our feet under us */
 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1246,6 +1325,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 	if (ret)
 		goto err_put_device;
 
+	/* Set up the IOMMU context bank */
+	ret = a6xx_gmu_memory_probe(gmu);
+	if (ret)
+		goto err_put_device;
+
 	/* Allocate memory for for the HFI queues */
 	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
 	if (IS_ERR(gmu->hfi))
@@ -1291,6 +1375,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 err_memory:
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
+	if (gmu->domain) {
+		iommu_detach_device(gmu->domain, gmu->dev);
+
+		iommu_domain_free(gmu->domain);
+	}
 	ret = -ENODEV;
 
 err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 4af65a36d5ca..2af91ed7ed0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,7 +12,8 @@
 struct a6xx_gmu_bo {
 	void *virt;
 	size_t size;
-	dma_addr_t iova;
+	u64 iova;
+	struct page **pages;
 };
 
 /*
@@ -48,6 +49,9 @@ struct a6xx_gmu {
 	int hfi_irq;
 	int gmu_irq;
 
+	struct iommu_domain *domain;
+	u64 uncached_iova_base;
+
 	struct device *gxpd;
 
 	int idle_level;
-- 
2.26.1


WARNING: multiple messages have this Message-ID (diff)
From: Jonathan Marek <jonathan@marek.ca>
To: freedreno@lists.freedesktop.org
Cc: David Airlie <airlied@linux.ie>,
	"open list:DRM DRIVER FOR MSM ADRENO GPU"
	<linux-arm-msm@vger.kernel.org>,
	Sharat Masetty <smasetty@codeaurora.org>,
	"open list:DRM DRIVER FOR MSM ADRENO GPU"
	<dri-devel@lists.freedesktop.org>,
	Stephen Boyd <swboyd@chromium.org>,
	"Michael J. Ruhl" <michael.j.ruhl@intel.com>,
	Sean Paul <sean@poorly.run>,
	open list <linux-kernel@vger.kernel.org>
Subject: [PATCH 2/9] Revert "drm/msm/a6xx: Use the DMA API for GMU memory objects"
Date: Mon, 20 Apr 2020 10:03:06 -0400	[thread overview]
Message-ID: <20200420140313.7263-3-jonathan@marek.ca> (raw)
In-Reply-To: <20200420140313.7263-1-jonathan@marek.ca>

This reverts commit a5fb8b918920c6f7706a8b5b8ea535a7f077a7f6.
---
 drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 115 +++++++++++++++++++++++---
 drivers/gpu/drm/msm/adreno/a6xx_gmu.h |   6 +-
 2 files changed, 107 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c4e71abbdd53..748cd379065f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,7 +2,6 @@
 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
 
 #include <linux/clk.h>
-#include <linux/dma-mapping.h>
 #include <linux/interconnect.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_opp.h>
@@ -921,10 +920,21 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
 
 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
 {
+	int count, i;
+	u64 iova;
+
 	if (IS_ERR_OR_NULL(bo))
 		return;
 
-	dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
+	count = bo->size >> PAGE_SHIFT;
+	iova = bo->iova;
+
+	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+		__free_pages(bo->pages[i], 0);
+	}
+
+	kfree(bo->pages);
 	kfree(bo);
 }
 
@@ -932,6 +942,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 		size_t size)
 {
 	struct a6xx_gmu_bo *bo;
+	int ret, count, i;
 
 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
 	if (!bo)
@@ -939,14 +950,86 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
 
 	bo->size = PAGE_ALIGN(size);
 
-	bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
+	count = bo->size >> PAGE_SHIFT;
 
-	if (!bo->virt) {
+	bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+	if (!bo->pages) {
 		kfree(bo);
 		return ERR_PTR(-ENOMEM);
 	}
 
+	for (i = 0; i < count; i++) {
+		bo->pages[i] = alloc_page(GFP_KERNEL);
+		if (!bo->pages[i])
+			goto err;
+	}
+
+	bo->iova = gmu->uncached_iova_base;
+
+	for (i = 0; i < count; i++) {
+		ret = iommu_map(gmu->domain,
+			bo->iova + (PAGE_SIZE * i),
+			page_to_phys(bo->pages[i]), PAGE_SIZE,
+			IOMMU_READ | IOMMU_WRITE);
+
+		if (ret) {
+			DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
+
+			for (i = i - 1 ; i >= 0; i--)
+				iommu_unmap(gmu->domain,
+					bo->iova + (PAGE_SIZE * i),
+					PAGE_SIZE);
+
+			goto err;
+		}
+	}
+
+	bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+		pgprot_writecombine(PAGE_KERNEL));
+	if (!bo->virt)
+		goto err;
+
+	/* Align future IOVA addresses on 1MB boundaries */
+	gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
 	return bo;
+
+err:
+	for (i = 0; i < count; i++) {
+		if (bo->pages[i])
+			__free_pages(bo->pages[i], 0);
+	}
+
+	kfree(bo->pages);
+	kfree(bo);
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+	int ret;
+
+	/*
+	 * The GMU address space is hardcoded to treat the range
+	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+	 * between the GMU and the CPU will live in this space
+	 */
+	gmu->uncached_iova_base = 0x60000000;
+
+
+	gmu->domain = iommu_domain_alloc(&platform_bus_type);
+	if (!gmu->domain)
+		return -ENODEV;
+
+	ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+	if (ret) {
+		iommu_domain_free(gmu->domain);
+		gmu->domain = NULL;
+	}
+
+	return ret;
 }
 
 /* Return the 'arc-level' for the given frequency */
@@ -1206,6 +1289,10 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
 
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
+	iommu_detach_device(gmu->domain, gmu->dev);
+
+	iommu_domain_free(gmu->domain);
+
 	free_irq(gmu->gmu_irq, gmu);
 	free_irq(gmu->hfi_irq, gmu);
 
@@ -1226,15 +1313,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 
 	gmu->dev = &pdev->dev;
 
-	/* Pass force_dma false to require the DT to set the dma region */
-	ret = of_dma_configure(gmu->dev, node, false);
-	if (ret)
-		return ret;
-
-	/* Set the mask after the of_dma_configure() */
-	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
-	if (ret)
-		return ret;
+	of_dma_configure(gmu->dev, node, true);
 
 	/* Fow now, don't do anything fancy until we get our feet under us */
 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1246,6 +1325,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 	if (ret)
 		goto err_put_device;
 
+	/* Set up the IOMMU context bank */
+	ret = a6xx_gmu_memory_probe(gmu);
+	if (ret)
+		goto err_put_device;
+
 	/* Allocate memory for for the HFI queues */
 	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
 	if (IS_ERR(gmu->hfi))
@@ -1291,6 +1375,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
 err_memory:
 	a6xx_gmu_memory_free(gmu, gmu->hfi);
 
+	if (gmu->domain) {
+		iommu_detach_device(gmu->domain, gmu->dev);
+
+		iommu_domain_free(gmu->domain);
+	}
 	ret = -ENODEV;
 
 err_put_device:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 4af65a36d5ca..2af91ed7ed0c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -12,7 +12,8 @@
 struct a6xx_gmu_bo {
 	void *virt;
 	size_t size;
-	dma_addr_t iova;
+	u64 iova;
+	struct page **pages;
 };
 
 /*
@@ -48,6 +49,9 @@ struct a6xx_gmu {
 	int hfi_irq;
 	int gmu_irq;
 
+	struct iommu_domain *domain;
+	u64 uncached_iova_base;
+
 	struct device *gxpd;
 
 	int idle_level;
-- 
2.26.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2020-04-20 14:03 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20200420140313.7263-1-jonathan@marek.ca>
2020-04-20 14:03 ` [PATCH 1/9] drm/msm/adreno: add A640/A650 to gpulist Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-20 14:30   ` Greg Kroah-Hartman
2020-04-20 14:30     ` Greg Kroah-Hartman
2020-04-20 14:03 ` Jonathan Marek [this message]
2020-04-20 14:03   ` [PATCH 2/9] Revert "drm/msm/a6xx: Use the DMA API for GMU memory objects" Jonathan Marek
2020-04-20 19:51   ` Bjorn Andersson
2020-04-20 19:51     ` Bjorn Andersson
2020-04-20 19:59     ` Jonathan Marek
2020-04-20 19:59       ` Jonathan Marek
2020-04-20 21:16       ` Rob Clark
2020-04-20 21:16         ` Rob Clark
2020-04-21 11:21   ` Christoph Hellwig
2020-04-20 14:03 ` [PATCH 3/9] drm/msm/a6xx: allow allocating GMU memory with a fixed address Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-20 14:03 ` [PATCH 4/9] drm/msm/a6xx: HFI v2 for A640 and A650 Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-21 16:30   ` Jordan Crouse
2020-04-21 16:30     ` Jordan Crouse
2020-04-21 16:52     ` Jonathan Marek
2020-04-21 16:52       ` Jonathan Marek
2020-04-20 14:03 ` [PATCH 5/9] drm/msm/a6xx: A640/A650 GMU firmware path Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-20 14:03 ` [PATCH 6/9] drm/msm/a6xx: add support for A650 gmu rscc registers Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-20 14:03 ` [PATCH 7/9] drm/msm/a6xx: gmu_pdc register values for A640 and A650 Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-21 16:34   ` Jordan Crouse
2020-04-21 16:34     ` Jordan Crouse
2020-04-20 14:03 ` [PATCH 8/9] drm/msm/a6xx: enable GMU log Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-21 16:38   ` [Freedreno] " Jordan Crouse
2020-04-21 16:38     ` Jordan Crouse
2020-04-20 14:03 ` [PATCH 9/9] drm/msm/a6xx: update a6xx_hw_init for A640 and A650 Jonathan Marek
2020-04-20 14:03   ` Jonathan Marek
2020-04-21 16:48   ` Jordan Crouse
2020-04-21 16:48     ` Jordan Crouse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200420140313.7263-3-jonathan@marek.ca \
    --to=jonathan@marek.ca \
    --cc=airlied@linux.ie \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=freedreno@lists.freedesktop.org \
    --cc=jcrouse@codeaurora.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=michael.j.ruhl@intel.com \
    --cc=robdclark@gmail.com \
    --cc=sean@poorly.run \
    --cc=smasetty@codeaurora.org \
    --cc=swboyd@chromium.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.