All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jeffy Chen <jeffy.chen@rock-chips.com>
To: linux-kernel@vger.kernel.org
Cc: jcliang@chromium.org, robin.murphy@arm.com, xxm@rock-chips.com,
	tfiga@chromium.org, Jeffy Chen <jeffy.chen@rock-chips.com>,
	Heiko Stuebner <heiko@sntech.de>,
	linux-rockchip@lists.infradead.org,
	iommu@lists.linux-foundation.org, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org
Subject: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
Date: Thu,  1 Mar 2018 18:18:36 +0800	[thread overview]
Message-ID: <20180301101837.27969-14-jeffy.chen@rock-chips.com> (raw)
In-Reply-To: <20180301101837.27969-1-jeffy.chen@rock-chips.com>

When the power domain is powered off, the IOMMU cannot be accessed and
register programming must be deferred until the power domain becomes
enabled.

Add runtime PM support, and use runtime PM device link from IOMMU to
master to startup and shutdown IOMMU.

Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
---

Changes in v6: None
Changes in v5:
Avoid race about pm_runtime_get_if_in_use() and pm_runtime_enabled().

Changes in v4: None
Changes in v3:
Only call startup() and shutdown() when iommu attached.
Remove pm_mutex.
Check runtime PM disabled.
Check pm_runtime in rk_iommu_irq().

Changes in v2: None

 drivers/iommu/rockchip-iommu.c | 181 +++++++++++++++++++++++++++++++----------
 1 file changed, 140 insertions(+), 41 deletions(-)

diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2448a0528e39..0e0a42f41818 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -22,6 +22,7 @@
 #include <linux/of_iommu.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
@@ -106,6 +107,7 @@ struct rk_iommu {
 };
 
 struct rk_iommudata {
+	struct device_link *link; /* runtime PM link from IOMMU to master */
 	struct rk_iommu *iommu;
 };
 
@@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 	u32 int_status;
 	dma_addr_t iova;
 	irqreturn_t ret = IRQ_NONE;
-	int i;
+	int i, err, need_runtime_put;
+
+	err = pm_runtime_get_if_in_use(iommu->dev);
+	if (err <= 0 && err != -EINVAL)
+		return ret;
+	need_runtime_put = err > 0;
 
 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
 
@@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 
 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
+
 	return ret;
 }
 
@@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_for_each(pos, &rk_domain->iommus) {
 		struct rk_iommu *iommu;
+		int ret;
+
 		iommu = list_entry(pos, struct rk_iommu, node);
-		WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-		rk_iommu_zap_lines(iommu, iova, size);
-		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+
+		/* Only zap TLBs of IOMMUs that are powered on. */
+		ret = pm_runtime_get_if_in_use(iommu->dev);
+		if (ret > 0 || ret == -EINVAL) {
+			WARN_ON(clk_bulk_enable(iommu->num_clocks,
+						iommu->clocks));
+			rk_iommu_zap_lines(iommu, iova, size);
+			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+		}
+		if (ret > 0)
+			pm_runtime_put(iommu->dev);
 	}
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 }
@@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 	return data ? data->iommu : NULL;
 }
 
-static int rk_iommu_attach_device(struct iommu_domain *domain,
-				  struct device *dev)
+/* Must be called with iommu powered on and attached */
+static void rk_iommu_shutdown(struct rk_iommu *iommu)
 {
-	struct rk_iommu *iommu;
+	int i;
+
+	/* Ignore error while disabling, just keep going */
+	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
+	rk_iommu_enable_stall(iommu);
+	rk_iommu_disable_paging(iommu);
+	for (i = 0; i < iommu->num_mmu; i++) {
+		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+	}
+	rk_iommu_disable_stall(iommu);
+	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+}
+
+/* Must be called with iommu powered on and attached */
+static int rk_iommu_startup(struct rk_iommu *iommu)
+{
+	struct iommu_domain *domain = iommu->domain;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
-	unsigned long flags;
 	int ret, i;
 
-	/*
-	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
-	 * Such a device does not belong to an iommu group.
-	 */
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
-		return 0;
-
 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
 	if (ret)
 		return ret;
@@ -845,8 +873,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	if (ret)
 		goto out_disable_stall;
 
-	iommu->domain = domain;
-
 	for (i = 0; i < iommu->num_mmu; i++) {
 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
 			       rk_domain->dt_dma);
@@ -855,14 +881,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	}
 
 	ret = rk_iommu_enable_paging(iommu);
-	if (ret)
-		goto out_disable_stall;
-
-	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
-	list_add_tail(&iommu->node, &rk_domain->iommus);
-	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
-	dev_dbg(dev, "Attached to iommu domain\n");
 
 out_disable_stall:
 	rk_iommu_disable_stall(iommu);
@@ -877,31 +895,76 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
 	struct rk_iommu *iommu;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 	unsigned long flags;
-	int i;
+	int ret;
 
 	/* Allow 'virtual devices' (eg drm) to detach from domain */
 	iommu = rk_iommu_from_dev(dev);
 	if (!iommu)
 		return;
 
+	dev_dbg(dev, "Detaching from iommu domain\n");
+
+	/* iommu already detached */
+	if (iommu->domain != domain)
+		return;
+
+	iommu->domain = NULL;
+
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_del_init(&iommu->node);
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-	/* Ignore error while disabling, just keep going */
-	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-	rk_iommu_enable_stall(iommu);
-	rk_iommu_disable_paging(iommu);
-	for (i = 0; i < iommu->num_mmu; i++) {
-		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
-		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
-	}
-	rk_iommu_disable_stall(iommu);
-	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret > 0 || ret == -EINVAL)
+		rk_iommu_shutdown(iommu);
+	if (ret > 0)
+		pm_runtime_put(iommu->dev);
+}
 
-	iommu->domain = NULL;
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+		struct device *dev)
+{
+	struct rk_iommu *iommu;
+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+	unsigned long flags;
+	int ret, need_runtime_put;
+
+	/*
+	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
+	 * Such a device does not belong to an iommu group.
+	 */
+	iommu = rk_iommu_from_dev(dev);
+	if (!iommu)
+		return 0;
+
+	dev_dbg(dev, "Attaching to iommu domain\n");
+
+	/* iommu already attached */
+	if (iommu->domain == domain)
+		return 0;
+
+	if (iommu->domain)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	iommu->domain = domain;
+
+	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+	list_add_tail(&iommu->node, &rk_domain->iommus);
+	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret <= 0 && ret != -EINVAL)
+		return 0;
+	need_runtime_put = ret > 0;
+
+	ret = rk_iommu_startup(iommu);
+	if (ret)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
 
-	dev_dbg(dev, "Detached from iommu domain\n");
+	return ret;
 }
 
 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
@@ -989,17 +1052,21 @@ static int rk_iommu_add_device(struct device *dev)
 {
 	struct iommu_group *group;
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data;
 
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
+	data = dev->archdata.iommu;
+	if (!data)
 		return -ENODEV;
 
+	iommu = rk_iommu_from_dev(dev);
+
 	group = iommu_group_get_for_dev(dev);
 	if (IS_ERR(group))
 		return PTR_ERR(group);
 	iommu_group_put(group);
 
 	iommu_device_link(&iommu->iommu, dev);
+	data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
 
 	return 0;
 }
@@ -1007,9 +1074,11 @@ static int rk_iommu_add_device(struct device *dev)
 static void rk_iommu_remove_device(struct device *dev)
 {
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data = dev->archdata.iommu;
 
 	iommu = rk_iommu_from_dev(dev);
 
+	device_link_del(data->link);
 	iommu_device_unlink(&iommu->iommu, dev);
 	iommu_group_remove_device(dev);
 }
@@ -1135,6 +1204,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
 
+	pm_runtime_enable(dev);
+
 	return 0;
 err_remove_sysfs:
 	iommu_device_sysfs_remove(&iommu->iommu);
@@ -1143,6 +1214,33 @@ static int rk_iommu_probe(struct platform_device *pdev)
 	return err;
 }
 
+static int __maybe_unused rk_iommu_suspend(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	rk_iommu_shutdown(iommu);
+	return 0;
+}
+
+static int __maybe_unused rk_iommu_resume(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	return rk_iommu_startup(iommu);
+}
+
+static const struct dev_pm_ops rk_iommu_pm_ops = {
+	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+};
+
 static const struct of_device_id rk_iommu_dt_ids[] = {
 	{ .compatible = "rockchip,iommu" },
 	{ /* sentinel */ }
@@ -1154,6 +1252,7 @@ static struct platform_driver rk_iommu_driver = {
 	.driver = {
 		   .name = "rk_iommu",
 		   .of_match_table = rk_iommu_dt_ids,
+		   .pm = &rk_iommu_pm_ops,
 		   .suppress_bind_attrs = true,
 	},
 };
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: Jeffy Chen <jeffy.chen-TNX95d0MmH7DzftRWevZcw@public.gmane.org>
To: linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
Cc: Jeffy Chen <jeffy.chen-TNX95d0MmH7DzftRWevZcw@public.gmane.org>,
	jcliang-F7+t8E8rja9g9hUCZPvPmw@public.gmane.org,
	linux-rockchip-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
	Heiko Stuebner <heiko-4mtYJXux2i+zQB+pC5nmwQ@public.gmane.org>
Subject: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
Date: Thu,  1 Mar 2018 18:18:36 +0800	[thread overview]
Message-ID: <20180301101837.27969-14-jeffy.chen@rock-chips.com> (raw)
In-Reply-To: <20180301101837.27969-1-jeffy.chen-TNX95d0MmH7DzftRWevZcw@public.gmane.org>

When the power domain is powered off, the IOMMU cannot be accessed and
register programming must be deferred until the power domain becomes
enabled.

Add runtime PM support, and use runtime PM device link from IOMMU to
master to startup and shutdown IOMMU.

Signed-off-by: Jeffy Chen <jeffy.chen-TNX95d0MmH7DzftRWevZcw@public.gmane.org>
---

Changes in v6: None
Changes in v5:
Avoid race about pm_runtime_get_if_in_use() and pm_runtime_enabled().

Changes in v4: None
Changes in v3:
Only call startup() and shutdown() when iommu attached.
Remove pm_mutex.
Check runtime PM disabled.
Check pm_runtime in rk_iommu_irq().

Changes in v2: None

 drivers/iommu/rockchip-iommu.c | 181 +++++++++++++++++++++++++++++++----------
 1 file changed, 140 insertions(+), 41 deletions(-)

diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2448a0528e39..0e0a42f41818 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -22,6 +22,7 @@
 #include <linux/of_iommu.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
@@ -106,6 +107,7 @@ struct rk_iommu {
 };
 
 struct rk_iommudata {
+	struct device_link *link; /* runtime PM link from IOMMU to master */
 	struct rk_iommu *iommu;
 };
 
@@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 	u32 int_status;
 	dma_addr_t iova;
 	irqreturn_t ret = IRQ_NONE;
-	int i;
+	int i, err, need_runtime_put;
+
+	err = pm_runtime_get_if_in_use(iommu->dev);
+	if (err <= 0 && err != -EINVAL)
+		return ret;
+	need_runtime_put = err > 0;
 
 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
 
@@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 
 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
+
 	return ret;
 }
 
@@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_for_each(pos, &rk_domain->iommus) {
 		struct rk_iommu *iommu;
+		int ret;
+
 		iommu = list_entry(pos, struct rk_iommu, node);
-		WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-		rk_iommu_zap_lines(iommu, iova, size);
-		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+
+		/* Only zap TLBs of IOMMUs that are powered on. */
+		ret = pm_runtime_get_if_in_use(iommu->dev);
+		if (ret > 0 || ret == -EINVAL) {
+			WARN_ON(clk_bulk_enable(iommu->num_clocks,
+						iommu->clocks));
+			rk_iommu_zap_lines(iommu, iova, size);
+			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+		}
+		if (ret > 0)
+			pm_runtime_put(iommu->dev);
 	}
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 }
@@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 	return data ? data->iommu : NULL;
 }
 
-static int rk_iommu_attach_device(struct iommu_domain *domain,
-				  struct device *dev)
+/* Must be called with iommu powered on and attached */
+static void rk_iommu_shutdown(struct rk_iommu *iommu)
 {
-	struct rk_iommu *iommu;
+	int i;
+
+	/* Ignore error while disabling, just keep going */
+	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
+	rk_iommu_enable_stall(iommu);
+	rk_iommu_disable_paging(iommu);
+	for (i = 0; i < iommu->num_mmu; i++) {
+		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+	}
+	rk_iommu_disable_stall(iommu);
+	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+}
+
+/* Must be called with iommu powered on and attached */
+static int rk_iommu_startup(struct rk_iommu *iommu)
+{
+	struct iommu_domain *domain = iommu->domain;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
-	unsigned long flags;
 	int ret, i;
 
-	/*
-	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
-	 * Such a device does not belong to an iommu group.
-	 */
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
-		return 0;
-
 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
 	if (ret)
 		return ret;
@@ -845,8 +873,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	if (ret)
 		goto out_disable_stall;
 
-	iommu->domain = domain;
-
 	for (i = 0; i < iommu->num_mmu; i++) {
 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
 			       rk_domain->dt_dma);
@@ -855,14 +881,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	}
 
 	ret = rk_iommu_enable_paging(iommu);
-	if (ret)
-		goto out_disable_stall;
-
-	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
-	list_add_tail(&iommu->node, &rk_domain->iommus);
-	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
-	dev_dbg(dev, "Attached to iommu domain\n");
 
 out_disable_stall:
 	rk_iommu_disable_stall(iommu);
@@ -877,31 +895,76 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
 	struct rk_iommu *iommu;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 	unsigned long flags;
-	int i;
+	int ret;
 
 	/* Allow 'virtual devices' (eg drm) to detach from domain */
 	iommu = rk_iommu_from_dev(dev);
 	if (!iommu)
 		return;
 
+	dev_dbg(dev, "Detaching from iommu domain\n");
+
+	/* iommu already detached */
+	if (iommu->domain != domain)
+		return;
+
+	iommu->domain = NULL;
+
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_del_init(&iommu->node);
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-	/* Ignore error while disabling, just keep going */
-	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-	rk_iommu_enable_stall(iommu);
-	rk_iommu_disable_paging(iommu);
-	for (i = 0; i < iommu->num_mmu; i++) {
-		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
-		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
-	}
-	rk_iommu_disable_stall(iommu);
-	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret > 0 || ret == -EINVAL)
+		rk_iommu_shutdown(iommu);
+	if (ret > 0)
+		pm_runtime_put(iommu->dev);
+}
 
-	iommu->domain = NULL;
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+		struct device *dev)
+{
+	struct rk_iommu *iommu;
+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+	unsigned long flags;
+	int ret, need_runtime_put;
+
+	/*
+	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
+	 * Such a device does not belong to an iommu group.
+	 */
+	iommu = rk_iommu_from_dev(dev);
+	if (!iommu)
+		return 0;
+
+	dev_dbg(dev, "Attaching to iommu domain\n");
+
+	/* iommu already attached */
+	if (iommu->domain == domain)
+		return 0;
+
+	if (iommu->domain)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	iommu->domain = domain;
+
+	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+	list_add_tail(&iommu->node, &rk_domain->iommus);
+	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret <= 0 && ret != -EINVAL)
+		return 0;
+	need_runtime_put = ret > 0;
+
+	ret = rk_iommu_startup(iommu);
+	if (ret)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
 
-	dev_dbg(dev, "Detached from iommu domain\n");
+	return ret;
 }
 
 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
@@ -989,17 +1052,21 @@ static int rk_iommu_add_device(struct device *dev)
 {
 	struct iommu_group *group;
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data;
 
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
+	data = dev->archdata.iommu;
+	if (!data)
 		return -ENODEV;
 
+	iommu = rk_iommu_from_dev(dev);
+
 	group = iommu_group_get_for_dev(dev);
 	if (IS_ERR(group))
 		return PTR_ERR(group);
 	iommu_group_put(group);
 
 	iommu_device_link(&iommu->iommu, dev);
+	data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
 
 	return 0;
 }
@@ -1007,9 +1074,11 @@ static int rk_iommu_add_device(struct device *dev)
 static void rk_iommu_remove_device(struct device *dev)
 {
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data = dev->archdata.iommu;
 
 	iommu = rk_iommu_from_dev(dev);
 
+	device_link_del(data->link);
 	iommu_device_unlink(&iommu->iommu, dev);
 	iommu_group_remove_device(dev);
 }
@@ -1135,6 +1204,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
 
+	pm_runtime_enable(dev);
+
 	return 0;
 err_remove_sysfs:
 	iommu_device_sysfs_remove(&iommu->iommu);
@@ -1143,6 +1214,33 @@ static int rk_iommu_probe(struct platform_device *pdev)
 	return err;
 }
 
+static int __maybe_unused rk_iommu_suspend(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	rk_iommu_shutdown(iommu);
+	return 0;
+}
+
+static int __maybe_unused rk_iommu_resume(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	return rk_iommu_startup(iommu);
+}
+
+static const struct dev_pm_ops rk_iommu_pm_ops = {
+	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+};
+
 static const struct of_device_id rk_iommu_dt_ids[] = {
 	{ .compatible = "rockchip,iommu" },
 	{ /* sentinel */ }
@@ -1154,6 +1252,7 @@ static struct platform_driver rk_iommu_driver = {
 	.driver = {
 		   .name = "rk_iommu",
 		   .of_match_table = rk_iommu_dt_ids,
+		   .pm = &rk_iommu_pm_ops,
 		   .suppress_bind_attrs = true,
 	},
 };
-- 
2.11.0

WARNING: multiple messages have this Message-ID (diff)
From: jeffy.chen@rock-chips.com (Jeffy Chen)
To: linux-arm-kernel@lists.infradead.org
Subject: [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support
Date: Thu,  1 Mar 2018 18:18:36 +0800	[thread overview]
Message-ID: <20180301101837.27969-14-jeffy.chen@rock-chips.com> (raw)
In-Reply-To: <20180301101837.27969-1-jeffy.chen@rock-chips.com>

When the power domain is powered off, the IOMMU cannot be accessed and
register programming must be deferred until the power domain becomes
enabled.

Add runtime PM support, and use runtime PM device link from IOMMU to
master to startup and shutdown IOMMU.

Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
---

Changes in v6: None
Changes in v5:
Avoid race about pm_runtime_get_if_in_use() and pm_runtime_enabled().

Changes in v4: None
Changes in v3:
Only call startup() and shutdown() when iommu attached.
Remove pm_mutex.
Check runtime PM disabled.
Check pm_runtime in rk_iommu_irq().

Changes in v2: None

 drivers/iommu/rockchip-iommu.c | 181 +++++++++++++++++++++++++++++++----------
 1 file changed, 140 insertions(+), 41 deletions(-)

diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2448a0528e39..0e0a42f41818 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -22,6 +22,7 @@
 #include <linux/of_iommu.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
@@ -106,6 +107,7 @@ struct rk_iommu {
 };
 
 struct rk_iommudata {
+	struct device_link *link; /* runtime PM link from IOMMU to master */
 	struct rk_iommu *iommu;
 };
 
@@ -518,7 +520,12 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 	u32 int_status;
 	dma_addr_t iova;
 	irqreturn_t ret = IRQ_NONE;
-	int i;
+	int i, err, need_runtime_put;
+
+	err = pm_runtime_get_if_in_use(iommu->dev);
+	if (err <= 0 && err != -EINVAL)
+		return ret;
+	need_runtime_put = err > 0;
 
 	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
 
@@ -570,6 +577,9 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
 
 	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
 
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
+
 	return ret;
 }
 
@@ -611,10 +621,20 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_for_each(pos, &rk_domain->iommus) {
 		struct rk_iommu *iommu;
+		int ret;
+
 		iommu = list_entry(pos, struct rk_iommu, node);
-		WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-		rk_iommu_zap_lines(iommu, iova, size);
-		clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+
+		/* Only zap TLBs of IOMMUs that are powered on. */
+		ret = pm_runtime_get_if_in_use(iommu->dev);
+		if (ret > 0 || ret == -EINVAL) {
+			WARN_ON(clk_bulk_enable(iommu->num_clocks,
+						iommu->clocks));
+			rk_iommu_zap_lines(iommu, iova, size);
+			clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+		}
+		if (ret > 0)
+			pm_runtime_put(iommu->dev);
 	}
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 }
@@ -817,22 +837,30 @@ static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
 	return data ? data->iommu : NULL;
 }
 
-static int rk_iommu_attach_device(struct iommu_domain *domain,
-				  struct device *dev)
+/* Must be called with iommu powered on and attached */
+static void rk_iommu_shutdown(struct rk_iommu *iommu)
 {
-	struct rk_iommu *iommu;
+	int i;
+
+	/* Ignore error while disabling, just keep going */
+	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
+	rk_iommu_enable_stall(iommu);
+	rk_iommu_disable_paging(iommu);
+	for (i = 0; i < iommu->num_mmu; i++) {
+		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+	}
+	rk_iommu_disable_stall(iommu);
+	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+}
+
+/* Must be called with iommu powered on and attached */
+static int rk_iommu_startup(struct rk_iommu *iommu)
+{
+	struct iommu_domain *domain = iommu->domain;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
-	unsigned long flags;
 	int ret, i;
 
-	/*
-	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
-	 * Such a device does not belong to an iommu group.
-	 */
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
-		return 0;
-
 	ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
 	if (ret)
 		return ret;
@@ -845,8 +873,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	if (ret)
 		goto out_disable_stall;
 
-	iommu->domain = domain;
-
 	for (i = 0; i < iommu->num_mmu; i++) {
 		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
 			       rk_domain->dt_dma);
@@ -855,14 +881,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
 	}
 
 	ret = rk_iommu_enable_paging(iommu);
-	if (ret)
-		goto out_disable_stall;
-
-	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
-	list_add_tail(&iommu->node, &rk_domain->iommus);
-	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
-
-	dev_dbg(dev, "Attached to iommu domain\n");
 
 out_disable_stall:
 	rk_iommu_disable_stall(iommu);
@@ -877,31 +895,76 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
 	struct rk_iommu *iommu;
 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
 	unsigned long flags;
-	int i;
+	int ret;
 
 	/* Allow 'virtual devices' (eg drm) to detach from domain */
 	iommu = rk_iommu_from_dev(dev);
 	if (!iommu)
 		return;
 
+	dev_dbg(dev, "Detaching from iommu domain\n");
+
+	/* iommu already detached */
+	if (iommu->domain != domain)
+		return;
+
+	iommu->domain = NULL;
+
 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
 	list_del_init(&iommu->node);
 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-	/* Ignore error while disabling, just keep going */
-	WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
-	rk_iommu_enable_stall(iommu);
-	rk_iommu_disable_paging(iommu);
-	for (i = 0; i < iommu->num_mmu; i++) {
-		rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
-		rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
-	}
-	rk_iommu_disable_stall(iommu);
-	clk_bulk_disable(iommu->num_clocks, iommu->clocks);
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret > 0 || ret == -EINVAL)
+		rk_iommu_shutdown(iommu);
+	if (ret > 0)
+		pm_runtime_put(iommu->dev);
+}
 
-	iommu->domain = NULL;
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+		struct device *dev)
+{
+	struct rk_iommu *iommu;
+	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+	unsigned long flags;
+	int ret, need_runtime_put;
+
+	/*
+	 * Allow 'virtual devices' (e.g., drm) to attach to domain.
+	 * Such a device does not belong to an iommu group.
+	 */
+	iommu = rk_iommu_from_dev(dev);
+	if (!iommu)
+		return 0;
+
+	dev_dbg(dev, "Attaching to iommu domain\n");
+
+	/* iommu already attached */
+	if (iommu->domain == domain)
+		return 0;
+
+	if (iommu->domain)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	iommu->domain = domain;
+
+	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+	list_add_tail(&iommu->node, &rk_domain->iommus);
+	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+	ret = pm_runtime_get_if_in_use(iommu->dev);
+	if (ret <= 0 && ret != -EINVAL)
+		return 0;
+	need_runtime_put = ret > 0;
+
+	ret = rk_iommu_startup(iommu);
+	if (ret)
+		rk_iommu_detach_device(iommu->domain, dev);
+
+	if (need_runtime_put)
+		pm_runtime_put(iommu->dev);
 
-	dev_dbg(dev, "Detached from iommu domain\n");
+	return ret;
 }
 
 static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
@@ -989,17 +1052,21 @@ static int rk_iommu_add_device(struct device *dev)
 {
 	struct iommu_group *group;
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data;
 
-	iommu = rk_iommu_from_dev(dev);
-	if (!iommu)
+	data = dev->archdata.iommu;
+	if (!data)
 		return -ENODEV;
 
+	iommu = rk_iommu_from_dev(dev);
+
 	group = iommu_group_get_for_dev(dev);
 	if (IS_ERR(group))
 		return PTR_ERR(group);
 	iommu_group_put(group);
 
 	iommu_device_link(&iommu->iommu, dev);
+	data->link = device_link_add(dev, iommu->dev, DL_FLAG_PM_RUNTIME);
 
 	return 0;
 }
@@ -1007,9 +1074,11 @@ static int rk_iommu_add_device(struct device *dev)
 static void rk_iommu_remove_device(struct device *dev)
 {
 	struct rk_iommu *iommu;
+	struct rk_iommudata *data = dev->archdata.iommu;
 
 	iommu = rk_iommu_from_dev(dev);
 
+	device_link_del(data->link);
 	iommu_device_unlink(&iommu->iommu, dev);
 	iommu_group_remove_device(dev);
 }
@@ -1135,6 +1204,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
 	bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
 
+	pm_runtime_enable(dev);
+
 	return 0;
 err_remove_sysfs:
 	iommu_device_sysfs_remove(&iommu->iommu);
@@ -1143,6 +1214,33 @@ static int rk_iommu_probe(struct platform_device *pdev)
 	return err;
 }
 
+static int __maybe_unused rk_iommu_suspend(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	rk_iommu_shutdown(iommu);
+	return 0;
+}
+
+static int __maybe_unused rk_iommu_resume(struct device *dev)
+{
+	struct rk_iommu *iommu = dev_get_drvdata(dev);
+
+	if (!iommu->domain)
+		return 0;
+
+	return rk_iommu_startup(iommu);
+}
+
+static const struct dev_pm_ops rk_iommu_pm_ops = {
+	SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+};
+
 static const struct of_device_id rk_iommu_dt_ids[] = {
 	{ .compatible = "rockchip,iommu" },
 	{ /* sentinel */ }
@@ -1154,6 +1252,7 @@ static struct platform_driver rk_iommu_driver = {
 	.driver = {
 		   .name = "rk_iommu",
 		   .of_match_table = rk_iommu_dt_ids,
+		   .pm = &rk_iommu_pm_ops,
 		   .suppress_bind_attrs = true,
 	},
 };
-- 
2.11.0

  parent reply	other threads:[~2018-03-01 10:21 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-01 10:18 [RESEND PATCH v6 00/14] iommu/rockchip: Use OF_IOMMU Jeffy Chen
2018-03-01 10:18 ` Jeffy Chen
2018-03-01 10:18 ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 01/14] iommu/rockchip: Prohibit unbind and remove Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 02/14] iommu/rockchip: Fix error handling in probe Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 03/14] iommu/rockchip: Request irqs in rk_iommu_probe() Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-05 13:30   ` Tomasz Figa
2018-03-05 13:30     ` Tomasz Figa
2018-03-05 13:30     ` Tomasz Figa
2018-03-01 10:18 ` [RESEND PATCH v6 04/14] iommu/rockchip: Fix error handling in attach Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 05/14] iommu/rockchip: Use iopoll helpers to wait for hardware Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 06/14] iommu/rockchip: Fix TLB flush of secondary IOMMUs Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 07/14] ARM: dts: rockchip: add clocks in iommu nodes Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 08/14] iommu/rockchip: Control clocks needed to access the IOMMU Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 09/14] dt-bindings: iommu/rockchip: Add clock property Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-06  2:27   ` Rob Herring
2018-03-06  2:27     ` Rob Herring
2018-03-06  2:27     ` Rob Herring
2018-03-06  2:32     ` JeffyChen
2018-03-06  2:32       ` JeffyChen
2018-03-06  2:32       ` JeffyChen
2018-03-01 10:18 ` [RESEND PATCH v6 10/14] iommu/rockchip: Use IOMMU device for dma mapping operations Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 11/14] iommu/rockchip: Use OF_IOMMU to attach devices automatically Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` [RESEND PATCH v6 12/14] iommu/rockchip: Fix error handling in init Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18 ` Jeffy Chen [this message]
2018-03-01 10:18   ` [RESEND PATCH v6 13/14] iommu/rockchip: Add runtime PM support Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-05 13:49   ` Tomasz Figa
2018-03-05 13:49     ` Tomasz Figa
2018-03-05 14:13     ` Robin Murphy
2018-03-05 14:13       ` Robin Murphy
2018-03-05 14:13       ` Robin Murphy
2018-03-05 14:34       ` Tomasz Figa
2018-03-05 14:34         ` Tomasz Figa
2018-03-05 14:34         ` Tomasz Figa
     [not found]     ` <CAAFQd5D2SY2RV9HLTeTQigfnVCkF9wDq74_+t3OV2YEb9wJtqg-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-03-06  1:57       ` JeffyChen
2018-03-06  2:04         ` Tomasz Figa
2018-03-06  2:04           ` Tomasz Figa
2018-03-01 10:18 ` [RESEND PATCH v6 14/14] iommu/rockchip: Support sharing IOMMU between masters Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 10:18   ` Jeffy Chen
2018-03-01 11:03   ` Robin Murphy
2018-03-01 11:03     ` Robin Murphy
2018-03-01 11:03     ` Robin Murphy
2018-03-01 11:14     ` JeffyChen
2018-03-01 11:14       ` JeffyChen
2018-03-01 11:14       ` JeffyChen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180301101837.27969-14-jeffy.chen@rock-chips.com \
    --to=jeffy.chen@rock-chips.com \
    --cc=heiko@sntech.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jcliang@chromium.org \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rockchip@lists.infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=tfiga@chromium.org \
    --cc=xxm@rock-chips.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.