* [PATCH v4 0/2] drm/msm: rework msm_iommu_new() and .create_address_space cb
@ 2022-11-02 17:54 Dmitry Baryshkov
2022-11-02 17:54 ` [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new() Dmitry Baryshkov
2022-11-02 17:54 ` [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space Dmitry Baryshkov
0 siblings, 2 replies; 5+ messages in thread
From: Dmitry Baryshkov @ 2022-11-02 17:54 UTC (permalink / raw)
To: Rob Clark, Sean Paul, Abhinav Kumar
Cc: freedreno, linux-arm-msm, Bjorn Andersson, dri-devel, Stephen Boyd
Simplify the MSM IOMMU code a bit. This moves iommu_domain_alloc() and
iommu_set_pgtable_quirks() calls to msm_iommu_new() to get rid of the
disbalance, when the iommu domain is allocated by the caller of
msm_iommu_new() and then it is freed by the msm_iommu code itself.
Changes since v3:
- Restored the condition to enable the quirk in a6xx code.
Changes since v2:
- Reorder the patches.
- Move iommu_set_pgtable_quirks() to the msm_iommu_new() too. It will
not work if it's called after attaching the device.
Changes since v1:
- Fixed the uninitialized variable usage in a6xx_gmu_memory_probe()
(reported by lkp)
Dmitry Baryshkov (2):
drm/msm: move domain allocation into msm_iommu_new()
drm/msm: remove duplicated code from a6xx_create_address_space
drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 12 ++++-----
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 33 +++---------------------
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 29 +++++++++++----------
drivers/gpu/drm/msm/adreno/adreno_gpu.h | 9 ++++---
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 19 +++++++-------
drivers/gpu/drm/msm/msm_drv.c | 18 ++++++-------
drivers/gpu/drm/msm/msm_iommu.c | 20 +++++++++++---
drivers/gpu/drm/msm/msm_mmu.h | 3 ++-
11 files changed, 69 insertions(+), 80 deletions(-)
--
2.35.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new()
2022-11-02 17:54 [PATCH v4 0/2] drm/msm: rework msm_iommu_new() and .create_address_space cb Dmitry Baryshkov
@ 2022-11-02 17:54 ` Dmitry Baryshkov
2022-11-02 23:11 ` Rob Clark
2022-11-02 17:54 ` [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space Dmitry Baryshkov
1 sibling, 1 reply; 5+ messages in thread
From: Dmitry Baryshkov @ 2022-11-02 17:54 UTC (permalink / raw)
To: Rob Clark, Sean Paul, Abhinav Kumar
Cc: freedreno, kernel test robot, linux-arm-msm, Bjorn Andersson,
dri-devel, Stephen Boyd
After the msm_iommu instance is created, the IOMMU domain is completely
handled inside the msm_iommu code. Move the iommu_domain_alloc() call
into the msm_iommu_new() to simplify callers code.
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
---
drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 12 +++++-------
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 23 +++++++++++-----------
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 25 +++++++++---------------
drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 --
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 19 +++++++++---------
drivers/gpu/drm/msm/msm_drv.c | 18 ++++++++---------
drivers/gpu/drm/msm/msm_iommu.c | 20 ++++++++++++++++---
drivers/gpu/drm/msm/msm_mmu.h | 3 ++-
8 files changed, 62 insertions(+), 60 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index e033d6a67a20..6484b97c5344 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1213,19 +1213,17 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
- struct iommu_domain *domain;
struct msm_mmu *mmu;
- domain = iommu_domain_alloc(&platform_bus_type);
- if (!domain)
+ mmu = msm_iommu_new(gmu->dev, 0);
+ if (!mmu)
return -ENODEV;
+ if (IS_ERR(mmu))
+ return PTR_ERR(mmu);
- mmu = msm_iommu_new(gmu->dev, domain);
gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
- if (IS_ERR(gmu->aspace)) {
- iommu_domain_free(domain);
+ if (IS_ERR(gmu->aspace))
return PTR_ERR(gmu->aspace);
- }
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index fdc578016e0b..db4b3a48c708 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1786,35 +1786,34 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct iommu_domain *iommu;
+ struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
-
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
+ unsigned long quirks = 0;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
- adreno_set_llc_attributes(iommu);
+ quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(mmu)) {
- iommu_domain_free(iommu);
+ mmu = msm_iommu_new(&pdev->dev, quirks);
+ if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
- }
+
+ geometry = msm_iommu_get_geometry(mmu);
+ if (IS_ERR(geometry))
+ return ERR_CAST(geometry);
/*
* Use the aperture start or SZ_16M, whichever is greater. This will
* ensure that we align with the allocated pagetable range while still
* allowing room in the lower 32 bits for GMEM and whatnot
*/
- start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
- size = iommu->geometry.aperture_end - start + 1;
+ start = max_t(u64, SZ_16M, geometry->aperture_start);
+ size = geometry->aperture_end - start + 1;
aspace = msm_gem_address_space_create(mmu, "gpu",
start & GENMASK_ULL(48, 0), size);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 382fb7f9e497..12d0497f57e1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,37 +191,30 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
-void adreno_set_llc_attributes(struct iommu_domain *iommu)
-{
- iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
-}
-
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
- struct iommu_domain *iommu;
+ struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(mmu)) {
- iommu_domain_free(iommu);
+ mmu = msm_iommu_new(&pdev->dev, 0);
+ if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
- }
+
+ geometry = msm_iommu_get_geometry(mmu);
+ if (IS_ERR(geometry))
+ return ERR_CAST(geometry);
/*
* Use the aperture start or SZ_16M, whichever is greater. This will
* ensure that we align with the allocated pagetable range while still
* allowing room in the lower 32 bits for GMEM and whatnot
*/
- start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
- size = iommu->geometry.aperture_end - start + 1;
+ start = max_t(u64, SZ_16M, geometry->aperture_start);
+ size = geometry->aperture_end - start + 1;
aspace = msm_gem_address_space_create(mmu, "gpu",
start & GENMASK_ULL(48, 0), size);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index e7adc5c632d0..707273339969 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -338,8 +338,6 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev);
-void adreno_set_llc_attributes(struct iommu_domain *iommu);
-
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
/*
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 964573d26d26..9a1a0769575d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -387,7 +387,7 @@ static int mdp4_kms_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
int irq, ret;
u32 major, minor;
@@ -499,10 +499,15 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_disable(mdp4_kms);
mdelay(16);
- iommu = iommu_domain_alloc(pdev->dev.bus);
- if (iommu) {
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
-
+ mmu = msm_iommu_new(&pdev->dev, 0);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ goto fail;
+ } else if (!mmu) {
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
+ "contig buffers for scanout\n");
+ aspace = NULL;
+ } else {
aspace = msm_gem_address_space_create(mmu,
"mdp4", 0x1000, 0x100000000 - 0x1000);
@@ -514,10 +519,6 @@ static int mdp4_kms_init(struct drm_device *dev)
}
kms->aspace = aspace;
- } else {
- DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- aspace = NULL;
}
ret = modeset_init(mdp4_kms);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 28034c21f6bc..be32b4460e94 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -276,7 +276,6 @@ static int msm_drm_uninit(struct device *dev)
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
- struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
@@ -292,22 +291,21 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
else
iommu_dev = mdss_dev;
- domain = iommu_domain_alloc(iommu_dev->bus);
- if (!domain) {
+ mmu = msm_iommu_new(iommu_dev, 0);
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
+
+ if (!mmu) {
drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
return NULL;
}
- mmu = msm_iommu_new(iommu_dev, domain);
- if (IS_ERR(mmu)) {
- iommu_domain_free(domain);
- return ERR_CAST(mmu);
- }
-
aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000);
- if (IS_ERR(aspace))
+ if (IS_ERR(aspace)) {
+ dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
+ }
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 5577cea7c009..c2507582ecf3 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -186,6 +186,13 @@ int msm_iommu_pagetable_params(struct msm_mmu *mmu,
return 0;
}
+struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return &iommu->domain->geometry;
+}
+
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
@@ -367,17 +374,23 @@ static const struct msm_mmu_funcs funcs = {
.resume_translation = msm_iommu_resume_translation,
};
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
{
+ struct iommu_domain *domain;
struct msm_iommu *iommu;
int ret;
+ domain = iommu_domain_alloc(dev->bus);
if (!domain)
- return ERR_PTR(-ENODEV);
+ return NULL;
+
+ iommu_set_pgtable_quirks(domain, quirks);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
- if (!iommu)
+ if (!iommu) {
+ iommu_domain_free(domain);
return ERR_PTR(-ENOMEM);
+ }
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
@@ -386,6 +399,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
+ iommu_domain_free(domain);
kfree(iommu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index de158e1bf765..74cd81e701ff 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -40,7 +40,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->type = type;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
@@ -58,5 +58,6 @@ void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
int *asid);
+struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
#endif /* __MSM_MMU_H__ */
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space
2022-11-02 17:54 [PATCH v4 0/2] drm/msm: rework msm_iommu_new() and .create_address_space cb Dmitry Baryshkov
2022-11-02 17:54 ` [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new() Dmitry Baryshkov
@ 2022-11-02 17:54 ` Dmitry Baryshkov
2022-11-02 23:10 ` Rob Clark
1 sibling, 1 reply; 5+ messages in thread
From: Dmitry Baryshkov @ 2022-11-02 17:54 UTC (permalink / raw)
To: Rob Clark, Sean Paul, Abhinav Kumar
Cc: freedreno, linux-arm-msm, Bjorn Andersson, dri-devel, Stephen Boyd
The function a6xx_create_address_space() is mostly a copy of
adreno_iommu_create_address_space() with added quirk setting. Rework
these two functions to be a thin wrappers around a common helper.
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
---
drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +-
drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 28 +------------------------
drivers/gpu/drm/msm/adreno/adreno_gpu.c | 12 +++++++++--
drivers/gpu/drm/msm/adreno/adreno_gpu.h | 7 ++++++-
6 files changed, 20 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 2c8b9899625b..948785ed07bb 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -500,7 +500,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a3xx_get_rptr,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7cb8d9849c07..2fb32d5552c4 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -635,7 +635,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 3dcec7acb384..3c537c0016fa 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1705,7 +1705,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
+ .create_address_space = adreno_create_address_space,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index db4b3a48c708..e87196457b9a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1786,10 +1786,6 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct iommu_domain_geometry *geometry;
- struct msm_mmu *mmu;
- struct msm_gem_address_space *aspace;
- u64 start, size;
unsigned long quirks = 0;
/*
@@ -1799,29 +1795,7 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- mmu = msm_iommu_new(&pdev->dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
- return ERR_CAST(mmu);
-
- geometry = msm_iommu_get_geometry(mmu);
- if (IS_ERR(geometry))
- return ERR_CAST(geometry);
-
- /*
- * Use the aperture start or SZ_16M, whichever is greater. This will
- * ensure that we align with the allocated pagetable range while still
- * allowing room in the lower 32 bits for GMEM and whatnot
- */
- start = max_t(u64, SZ_16M, geometry->aperture_start);
- size = geometry->aperture_end - start + 1;
-
- aspace = msm_gem_address_space_create(mmu, "gpu",
- start & GENMASK_ULL(48, 0), size);
-
- if (IS_ERR(aspace) && !IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
-
- return aspace;
+ return adreno_iommu_create_address_space(gpu, pdev, quirks);
}
static struct msm_gem_address_space *
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 12d0497f57e1..12a964dc3b8d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,16 +191,24 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ return adreno_iommu_create_address_space(gpu, pdev, 0);
+}
+
struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ unsigned long quirks)
{
struct iommu_domain_geometry *geometry;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
u64 start, size;
- mmu = msm_iommu_new(&pdev->dev, 0);
+ mmu = msm_iommu_new(&pdev->dev, quirks);
if (IS_ERR_OR_NULL(mmu))
return ERR_CAST(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 707273339969..5d4b1c95033f 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -335,8 +335,13 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len,
* attached targets
*/
struct msm_gem_address_space *
+adreno_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
- struct platform_device *pdev);
+ struct platform_device *pdev,
+ unsigned long quirks);
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
--
2.35.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space
2022-11-02 17:54 ` [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space Dmitry Baryshkov
@ 2022-11-02 23:10 ` Rob Clark
0 siblings, 0 replies; 5+ messages in thread
From: Rob Clark @ 2022-11-02 23:10 UTC (permalink / raw)
To: Dmitry Baryshkov
Cc: freedreno, Sean Paul, Bjorn Andersson, Abhinav Kumar, dri-devel,
Stephen Boyd, linux-arm-msm
On Wed, Nov 2, 2022 at 10:54 AM Dmitry Baryshkov
<dmitry.baryshkov@linaro.org> wrote:
>
> The function a6xx_create_address_space() is mostly a copy of
> adreno_iommu_create_address_space() with added quirk setting. Rework
> these two functions to be a thin wrappers around a common helper.
>
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Rob Clark <robdclark@gmail.com>
> ---
> drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 +-
> drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 +-
> drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +-
> drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 28 +------------------------
> drivers/gpu/drm/msm/adreno/adreno_gpu.c | 12 +++++++++--
> drivers/gpu/drm/msm/adreno/adreno_gpu.h | 7 ++++++-
> 6 files changed, 20 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
> index 2c8b9899625b..948785ed07bb 100644
> --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
> @@ -500,7 +500,7 @@ static const struct adreno_gpu_funcs funcs = {
> #endif
> .gpu_state_get = a3xx_gpu_state_get,
> .gpu_state_put = adreno_gpu_state_put,
> - .create_address_space = adreno_iommu_create_address_space,
> + .create_address_space = adreno_create_address_space,
> .get_rptr = a3xx_get_rptr,
> },
> };
> diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
> index 7cb8d9849c07..2fb32d5552c4 100644
> --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
> @@ -635,7 +635,7 @@ static const struct adreno_gpu_funcs funcs = {
> #endif
> .gpu_state_get = a4xx_gpu_state_get,
> .gpu_state_put = adreno_gpu_state_put,
> - .create_address_space = adreno_iommu_create_address_space,
> + .create_address_space = adreno_create_address_space,
> .get_rptr = a4xx_get_rptr,
> },
> .get_timestamp = a4xx_get_timestamp,
> diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> index 3dcec7acb384..3c537c0016fa 100644
> --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> @@ -1705,7 +1705,7 @@ static const struct adreno_gpu_funcs funcs = {
> .gpu_busy = a5xx_gpu_busy,
> .gpu_state_get = a5xx_gpu_state_get,
> .gpu_state_put = a5xx_gpu_state_put,
> - .create_address_space = adreno_iommu_create_address_space,
> + .create_address_space = adreno_create_address_space,
> .get_rptr = a5xx_get_rptr,
> },
> .get_timestamp = a5xx_get_timestamp,
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> index db4b3a48c708..e87196457b9a 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> @@ -1786,10 +1786,6 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
> {
> struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> - struct iommu_domain_geometry *geometry;
> - struct msm_mmu *mmu;
> - struct msm_gem_address_space *aspace;
> - u64 start, size;
> unsigned long quirks = 0;
>
> /*
> @@ -1799,29 +1795,7 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
> if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
> quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
>
> - mmu = msm_iommu_new(&pdev->dev, quirks);
> - if (IS_ERR_OR_NULL(mmu))
> - return ERR_CAST(mmu);
> -
> - geometry = msm_iommu_get_geometry(mmu);
> - if (IS_ERR(geometry))
> - return ERR_CAST(geometry);
> -
> - /*
> - * Use the aperture start or SZ_16M, whichever is greater. This will
> - * ensure that we align with the allocated pagetable range while still
> - * allowing room in the lower 32 bits for GMEM and whatnot
> - */
> - start = max_t(u64, SZ_16M, geometry->aperture_start);
> - size = geometry->aperture_end - start + 1;
> -
> - aspace = msm_gem_address_space_create(mmu, "gpu",
> - start & GENMASK_ULL(48, 0), size);
> -
> - if (IS_ERR(aspace) && !IS_ERR(mmu))
> - mmu->funcs->destroy(mmu);
> -
> - return aspace;
> + return adreno_iommu_create_address_space(gpu, pdev, quirks);
> }
>
> static struct msm_gem_address_space *
> diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> index 12d0497f57e1..12a964dc3b8d 100644
> --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> @@ -191,16 +191,24 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
> return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
> }
>
> +struct msm_gem_address_space *
> +adreno_create_address_space(struct msm_gpu *gpu,
> + struct platform_device *pdev)
> +{
> + return adreno_iommu_create_address_space(gpu, pdev, 0);
> +}
> +
> struct msm_gem_address_space *
> adreno_iommu_create_address_space(struct msm_gpu *gpu,
> - struct platform_device *pdev)
> + struct platform_device *pdev,
> + unsigned long quirks)
> {
> struct iommu_domain_geometry *geometry;
> struct msm_mmu *mmu;
> struct msm_gem_address_space *aspace;
> u64 start, size;
>
> - mmu = msm_iommu_new(&pdev->dev, 0);
> + mmu = msm_iommu_new(&pdev->dev, quirks);
> if (IS_ERR_OR_NULL(mmu))
> return ERR_CAST(mmu);
>
> diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> index 707273339969..5d4b1c95033f 100644
> --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> @@ -335,8 +335,13 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len,
> * attached targets
> */
> struct msm_gem_address_space *
> +adreno_create_address_space(struct msm_gpu *gpu,
> + struct platform_device *pdev);
> +
> +struct msm_gem_address_space *
> adreno_iommu_create_address_space(struct msm_gpu *gpu,
> - struct platform_device *pdev);
> + struct platform_device *pdev,
> + unsigned long quirks);
>
> int adreno_read_speedbin(struct device *dev, u32 *speedbin);
>
> --
> 2.35.1
>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new()
2022-11-02 17:54 ` [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new() Dmitry Baryshkov
@ 2022-11-02 23:11 ` Rob Clark
0 siblings, 0 replies; 5+ messages in thread
From: Rob Clark @ 2022-11-02 23:11 UTC (permalink / raw)
To: Dmitry Baryshkov
Cc: freedreno, kernel test robot, Sean Paul, Bjorn Andersson,
Abhinav Kumar, dri-devel, Stephen Boyd, linux-arm-msm
On Wed, Nov 2, 2022 at 10:54 AM Dmitry Baryshkov
<dmitry.baryshkov@linaro.org> wrote:
>
> After the msm_iommu instance is created, the IOMMU domain is completely
> handled inside the msm_iommu code. Move the iommu_domain_alloc() call
> into the msm_iommu_new() to simplify callers code.
>
> Reported-by: kernel test robot <lkp@intel.com>
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Rob Clark <robdclark@gmail.com>
> ---
> drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 12 +++++-------
> drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 23 +++++++++++-----------
> drivers/gpu/drm/msm/adreno/adreno_gpu.c | 25 +++++++++---------------
> drivers/gpu/drm/msm/adreno/adreno_gpu.h | 2 --
> drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 19 +++++++++---------
> drivers/gpu/drm/msm/msm_drv.c | 18 ++++++++---------
> drivers/gpu/drm/msm/msm_iommu.c | 20 ++++++++++++++++---
> drivers/gpu/drm/msm/msm_mmu.h | 3 ++-
> 8 files changed, 62 insertions(+), 60 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> index e033d6a67a20..6484b97c5344 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
> @@ -1213,19 +1213,17 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
>
> static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
> {
> - struct iommu_domain *domain;
> struct msm_mmu *mmu;
>
> - domain = iommu_domain_alloc(&platform_bus_type);
> - if (!domain)
> + mmu = msm_iommu_new(gmu->dev, 0);
> + if (!mmu)
> return -ENODEV;
> + if (IS_ERR(mmu))
> + return PTR_ERR(mmu);
>
> - mmu = msm_iommu_new(gmu->dev, domain);
> gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
> - if (IS_ERR(gmu->aspace)) {
> - iommu_domain_free(domain);
> + if (IS_ERR(gmu->aspace))
> return PTR_ERR(gmu->aspace);
> - }
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> index fdc578016e0b..db4b3a48c708 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> @@ -1786,35 +1786,34 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
> {
> struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> - struct iommu_domain *iommu;
> + struct iommu_domain_geometry *geometry;
> struct msm_mmu *mmu;
> struct msm_gem_address_space *aspace;
> u64 start, size;
> -
> - iommu = iommu_domain_alloc(&platform_bus_type);
> - if (!iommu)
> - return NULL;
> + unsigned long quirks = 0;
>
> /*
> * This allows GPU to set the bus attributes required to use system
> * cache on behalf of the iommu page table walker.
> */
> if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
> - adreno_set_llc_attributes(iommu);
> + quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
>
> - mmu = msm_iommu_new(&pdev->dev, iommu);
> - if (IS_ERR(mmu)) {
> - iommu_domain_free(iommu);
> + mmu = msm_iommu_new(&pdev->dev, quirks);
> + if (IS_ERR_OR_NULL(mmu))
> return ERR_CAST(mmu);
> - }
> +
> + geometry = msm_iommu_get_geometry(mmu);
> + if (IS_ERR(geometry))
> + return ERR_CAST(geometry);
>
> /*
> * Use the aperture start or SZ_16M, whichever is greater. This will
> * ensure that we align with the allocated pagetable range while still
> * allowing room in the lower 32 bits for GMEM and whatnot
> */
> - start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
> - size = iommu->geometry.aperture_end - start + 1;
> + start = max_t(u64, SZ_16M, geometry->aperture_start);
> + size = geometry->aperture_end - start + 1;
>
> aspace = msm_gem_address_space_create(mmu, "gpu",
> start & GENMASK_ULL(48, 0), size);
> diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> index 382fb7f9e497..12d0497f57e1 100644
> --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
> @@ -191,37 +191,30 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
> return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
> }
>
> -void adreno_set_llc_attributes(struct iommu_domain *iommu)
> -{
> - iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
> -}
> -
> struct msm_gem_address_space *
> adreno_iommu_create_address_space(struct msm_gpu *gpu,
> struct platform_device *pdev)
> {
> - struct iommu_domain *iommu;
> + struct iommu_domain_geometry *geometry;
> struct msm_mmu *mmu;
> struct msm_gem_address_space *aspace;
> u64 start, size;
>
> - iommu = iommu_domain_alloc(&platform_bus_type);
> - if (!iommu)
> - return NULL;
> -
> - mmu = msm_iommu_new(&pdev->dev, iommu);
> - if (IS_ERR(mmu)) {
> - iommu_domain_free(iommu);
> + mmu = msm_iommu_new(&pdev->dev, 0);
> + if (IS_ERR_OR_NULL(mmu))
> return ERR_CAST(mmu);
> - }
> +
> + geometry = msm_iommu_get_geometry(mmu);
> + if (IS_ERR(geometry))
> + return ERR_CAST(geometry);
>
> /*
> * Use the aperture start or SZ_16M, whichever is greater. This will
> * ensure that we align with the allocated pagetable range while still
> * allowing room in the lower 32 bits for GMEM and whatnot
> */
> - start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
> - size = iommu->geometry.aperture_end - start + 1;
> + start = max_t(u64, SZ_16M, geometry->aperture_start);
> + size = geometry->aperture_end - start + 1;
>
> aspace = msm_gem_address_space_create(mmu, "gpu",
> start & GENMASK_ULL(48, 0), size);
> diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> index e7adc5c632d0..707273339969 100644
> --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
> @@ -338,8 +338,6 @@ struct msm_gem_address_space *
> adreno_iommu_create_address_space(struct msm_gpu *gpu,
> struct platform_device *pdev);
>
> -void adreno_set_llc_attributes(struct iommu_domain *iommu);
> -
> int adreno_read_speedbin(struct device *dev, u32 *speedbin);
>
> /*
> diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
> index 964573d26d26..9a1a0769575d 100644
> --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
> +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
> @@ -387,7 +387,7 @@ static int mdp4_kms_init(struct drm_device *dev)
> struct msm_drm_private *priv = dev->dev_private;
> struct mdp4_kms *mdp4_kms;
> struct msm_kms *kms = NULL;
> - struct iommu_domain *iommu;
> + struct msm_mmu *mmu;
> struct msm_gem_address_space *aspace;
> int irq, ret;
> u32 major, minor;
> @@ -499,10 +499,15 @@ static int mdp4_kms_init(struct drm_device *dev)
> mdp4_disable(mdp4_kms);
> mdelay(16);
>
> - iommu = iommu_domain_alloc(pdev->dev.bus);
> - if (iommu) {
> - struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
> -
> + mmu = msm_iommu_new(&pdev->dev, 0);
> + if (IS_ERR(mmu)) {
> + ret = PTR_ERR(mmu);
> + goto fail;
> + } else if (!mmu) {
> + DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
> + "contig buffers for scanout\n");
> + aspace = NULL;
> + } else {
> aspace = msm_gem_address_space_create(mmu,
> "mdp4", 0x1000, 0x100000000 - 0x1000);
>
> @@ -514,10 +519,6 @@ static int mdp4_kms_init(struct drm_device *dev)
> }
>
> kms->aspace = aspace;
> - } else {
> - DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
> - "contig buffers for scanout\n");
> - aspace = NULL;
> }
>
> ret = modeset_init(mdp4_kms);
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 28034c21f6bc..be32b4460e94 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -276,7 +276,6 @@ static int msm_drm_uninit(struct device *dev)
>
> struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
> {
> - struct iommu_domain *domain;
> struct msm_gem_address_space *aspace;
> struct msm_mmu *mmu;
> struct device *mdp_dev = dev->dev;
> @@ -292,22 +291,21 @@ struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
> else
> iommu_dev = mdss_dev;
>
> - domain = iommu_domain_alloc(iommu_dev->bus);
> - if (!domain) {
> + mmu = msm_iommu_new(iommu_dev, 0);
> + if (IS_ERR(mmu))
> + return ERR_CAST(mmu);
> +
> + if (!mmu) {
> drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
> return NULL;
> }
>
> - mmu = msm_iommu_new(iommu_dev, domain);
> - if (IS_ERR(mmu)) {
> - iommu_domain_free(domain);
> - return ERR_CAST(mmu);
> - }
> -
> aspace = msm_gem_address_space_create(mmu, "mdp_kms",
> 0x1000, 0x100000000 - 0x1000);
> - if (IS_ERR(aspace))
> + if (IS_ERR(aspace)) {
> + dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
> mmu->funcs->destroy(mmu);
> + }
>
> return aspace;
> }
> diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
> index 5577cea7c009..c2507582ecf3 100644
> --- a/drivers/gpu/drm/msm/msm_iommu.c
> +++ b/drivers/gpu/drm/msm/msm_iommu.c
> @@ -186,6 +186,13 @@ int msm_iommu_pagetable_params(struct msm_mmu *mmu,
> return 0;
> }
>
> +struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
> +{
> + struct msm_iommu *iommu = to_msm_iommu(mmu);
> +
> + return &iommu->domain->geometry;
> +}
> +
> static const struct msm_mmu_funcs pagetable_funcs = {
> .map = msm_iommu_pagetable_map,
> .unmap = msm_iommu_pagetable_unmap,
> @@ -367,17 +374,23 @@ static const struct msm_mmu_funcs funcs = {
> .resume_translation = msm_iommu_resume_translation,
> };
>
> -struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
> +struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
> {
> + struct iommu_domain *domain;
> struct msm_iommu *iommu;
> int ret;
>
> + domain = iommu_domain_alloc(dev->bus);
> if (!domain)
> - return ERR_PTR(-ENODEV);
> + return NULL;
> +
> + iommu_set_pgtable_quirks(domain, quirks);
>
> iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
> - if (!iommu)
> + if (!iommu) {
> + iommu_domain_free(domain);
> return ERR_PTR(-ENOMEM);
> + }
>
> iommu->domain = domain;
> msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
> @@ -386,6 +399,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
>
> ret = iommu_attach_device(iommu->domain, dev);
> if (ret) {
> + iommu_domain_free(domain);
> kfree(iommu);
> return ERR_PTR(ret);
> }
> diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
> index de158e1bf765..74cd81e701ff 100644
> --- a/drivers/gpu/drm/msm/msm_mmu.h
> +++ b/drivers/gpu/drm/msm/msm_mmu.h
> @@ -40,7 +40,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
> mmu->type = type;
> }
>
> -struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
> +struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks);
> struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
>
> static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
> @@ -58,5 +58,6 @@ void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
>
> int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
> int *asid);
> +struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu);
>
> #endif /* __MSM_MMU_H__ */
> --
> 2.35.1
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2022-11-02 23:11 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-02 17:54 [PATCH v4 0/2] drm/msm: rework msm_iommu_new() and .create_address_space cb Dmitry Baryshkov
2022-11-02 17:54 ` [PATCH v4 1/2] drm/msm: move domain allocation into msm_iommu_new() Dmitry Baryshkov
2022-11-02 23:11 ` Rob Clark
2022-11-02 17:54 ` [PATCH v4 2/2] drm/msm: remove duplicated code from a6xx_create_address_space Dmitry Baryshkov
2022-11-02 23:10 ` Rob Clark
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).