All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  7:09 ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-27  7:09 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, Mike Turquette,
	Rajagopal Venkat, David Brown, Ulf Hansson, Laurent Pinchart

Reentrancy into the clock framework from the clk.h api is necessary for
clocks that are prepared and unprepared via i2c_transfer (which includes
many PMICs and discrete audio chips) as well as for several other use
cases.

This patch implements reentrancy by adding two global atomic_t's which
track the context of the current caller.  Context in this case is the
return value from get_current().  One context variable is for slow
operations protected by the prepare_mutex and the other is for fast
operations protected by the enable_lock spinlock.

The clk.h api implementations are modified to first see if the relevant
global lock is already held and if so compare the global context (set by
whoever is holding the lock) against their own context (via a call to
get_current()).  If the two match then this function is a nested call
from the one already holding the lock and we procede.  If the context
does not match then procede to call mutex_lock and busy-wait for the
existing task to complete.

This patch does not increase concurrency for unrelated calls into the
clock framework.  Instead it simply allows reentrancy by the single task
which is currently holding the global clock framework lock.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
---
 drivers/clk/clk.c |  255 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 186 insertions(+), 69 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..17432a5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,9 +19,12 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
+static atomic_t prepare_context;
+static atomic_t enable_context;
 
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
@@ -456,27 +459,6 @@ unsigned int __clk_get_prepare_count(struct clk *clk)
 	return !clk ? 0 : clk->prepare_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
-{
-	unsigned long ret;
-
-	if (!clk) {
-		ret = 0;
-		goto out;
-	}
-
-	ret = clk->rate;
-
-	if (clk->flags & CLK_IS_ROOT)
-		goto out;
-
-	if (!clk->parent)
-		ret = 0;
-
-out:
-	return ret;
-}
-
 unsigned long __clk_get_flags(struct clk *clk)
 {
 	return !clk ? 0 : clk->flags;
@@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
 	return NULL;
 }
 
+/***  locking & reentrancy ***/
+
+static void clk_fwk_lock(void)
+{
+	/* hold the framework-wide lock, context == NULL */
+	mutex_lock(&prepare_lock);
+
+	/* set context for any reentrant calls */
+	atomic_set(&prepare_context, (int) get_current());
+}
+
+static void clk_fwk_unlock(void)
+{
+	/* clear the context */
+	atomic_set(&prepare_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
+	mutex_unlock(&prepare_lock);
+}
+
+static bool clk_is_reentrant(void)
+{
+	if (mutex_is_locked(&prepare_lock))
+		if ((void *) atomic_read(&prepare_context) == get_current())
+			return true;
+
+	return false;
+}
+
 /***        clk api        ***/
 
 void __clk_unprepare(struct clk *clk)
@@ -600,9 +611,15 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	/* re-enter if call is from the same context */
+	if (clk_is_reentrant()) {
+		__clk_unprepare(clk);
+		return;
+	}
+
+	clk_fwk_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,10 +665,16 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
-	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	/* re-enter if call is from the same context */
+	if (clk_is_reentrant()) {
+		ret = __clk_prepare(clk);
+		goto out;
+	}
 
+	clk_fwk_lock();
+	ret = __clk_prepare(clk);
+	clk_fwk_unlock();
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
@@ -692,8 +715,27 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
+	/* this call re-enters if it is from the same context */
+	if (spin_is_locked(&enable_lock)) {
+		if ((void *) atomic_read(&enable_context) == get_current()) {
+			__clk_disable(clk);
+			return;
+		}
+	}
+
+	/* hold the framework-wide lock, context == NULL */
 	spin_lock_irqsave(&enable_lock, flags);
+
+	/* set context for any reentrant calls */
+	atomic_set(&enable_context, (int) get_current());
+
+	/* disable the clock(s) */
 	__clk_disable(clk);
+
+	/* clear the context */
+	atomic_set(&enable_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
 	spin_unlock_irqrestore(&enable_lock, flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
@@ -745,10 +787,29 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
+	/* this call re-enters if it is from the same context */
+	if (spin_is_locked(&enable_lock)) {
+		if ((void *) atomic_read(&enable_context) == get_current()) {
+			ret = __clk_enable(clk);
+			goto out;
+		}
+	}
+
+	/* hold the framework-wide lock, context == NULL */
 	spin_lock_irqsave(&enable_lock, flags);
+
+	/* set context for any reentrant calls */
+	atomic_set(&enable_context, (int) get_current());
+
+	/* enable the clock(s) */
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
 
+	/* clear the context */
+	atomic_set(&enable_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
+	spin_unlock_irqrestore(&enable_lock, flags);
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_enable);
@@ -792,10 +853,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	/* this call re-enters if it is from the same context */
+	if (clk_is_reentrant()) {
+		ret = __clk_round_rate(clk, rate);
+		goto out;
+	}
+
+	clk_fwk_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_round_rate);
@@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 		__clk_recalc_rates(child, msg);
 }
 
+unsigned long __clk_get_rate(struct clk *clk)
+{
+	unsigned long ret;
+
+	if (!clk) {
+		ret = 0;
+		goto out;
+	}
+
+	if (clk->flags & CLK_GET_RATE_NOCACHE)
+		__clk_recalc_rates(clk, 0);
+
+	ret = clk->rate;
+
+	if (clk->flags & CLK_IS_ROOT)
+		goto out;
+
+	if (!clk->parent)
+		ret = 0;
+
+out:
+	return ret;
+}
+
 /**
  * clk_get_rate - return the rate of clk
  * @clk: the clk whose rate is being returned
@@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	/*
+	 * FIXME - any locking here seems heavy weight
+	 * can clk->rate be replaced with an atomic_t?
+	 * same logic can likely be applied to prepare_count & enable_count
+	 */
 
-	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
-		__clk_recalc_rates(clk, 0);
+	if (clk_is_reentrant()) {
+		rate = __clk_get_rate(clk);
+		goto out;
+	}
 
+	clk_fwk_lock();
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return rate;
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
 		clk_change_rate(child);
 }
 
+int __clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = 0;
+	struct clk *top, *fail_clk;
+
+	/* bail early if nothing to do */
+	if (rate == clk->rate)
+		return 0;
+
+	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+		return -EBUSY;
+	}
+
+	/* calculate new rates and get the topmost changed clock */
+	top = clk_calc_new_rates(clk, rate);
+	if (!top)
+		return -EINVAL;
+
+	/* notify that we are about to change rates */
+	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+	if (fail_clk) {
+		pr_warn("%s: failed to set %s rate\n", __func__,
+				fail_clk->name);
+		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+		return -EBUSY;
+	}
+
+	/* change the rates */
+	clk_change_rate(top);
+
+	return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1096,44 +1229,18 @@ static void clk_change_rate(struct clk *clk)
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk *top, *fail_clk;
 	int ret = 0;
 
-	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
-
-	/* bail early if nothing to do */
-	if (rate == clk->rate)
-		goto out;
-
-	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/* calculate new rates and get the topmost changed clock */
-	top = clk_calc_new_rates(clk, rate);
-	if (!top) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* notify that we are about to change rates */
-	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-	if (fail_clk) {
-		pr_warn("%s: failed to set %s rate\n", __func__,
-				fail_clk->name);
-		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-		ret = -EBUSY;
+	if (clk_is_reentrant()) {
+		ret = __clk_set_rate(clk, rate);
 		goto out;
 	}
 
-	/* change the rates */
-	clk_change_rate(top);
+	clk_fwk_lock();
+	ret = __clk_set_rate(clk, rate);
+	clk_fwk_unlock();
 
 out:
-	mutex_unlock(&prepare_lock);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_rate);
@@ -1148,10 +1255,16 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	if (clk_is_reentrant()) {
+		parent = __clk_get_parent(clk);
+		goto out;
+	}
+
+	clk_fwk_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return parent;
 }
 EXPORT_SYMBOL_GPL(clk_get_parent);
@@ -1330,6 +1443,7 @@ out:
 int clk_set_parent(struct clk *clk, struct clk *parent)
 {
 	int ret = 0;
+	bool reenter;
 
 	if (!clk || !clk->ops)
 		return -EINVAL;
@@ -1337,8 +1451,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	if (!clk->ops->set_parent)
 		return -ENOSYS;
 
-	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	reenter = clk_is_reentrant();
+
+	if (!reenter)
+		clk_fwk_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1483,8 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	if (!reenter)
+		clk_fwk_unlock();
 
 	return ret;
 }
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  7:09 ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-27  7:09 UTC (permalink / raw)
  To: linux-arm-kernel

Reentrancy into the clock framework from the clk.h api is necessary for
clocks that are prepared and unprepared via i2c_transfer (which includes
many PMICs and discrete audio chips) as well as for several other use
cases.

This patch implements reentrancy by adding two global atomic_t's which
track the context of the current caller.  Context in this case is the
return value from get_current().  One context variable is for slow
operations protected by the prepare_mutex and the other is for fast
operations protected by the enable_lock spinlock.

The clk.h api implementations are modified to first see if the relevant
global lock is already held and if so compare the global context (set by
whoever is holding the lock) against their own context (via a call to
get_current()).  If the two match then this function is a nested call
from the one already holding the lock and we procede.  If the context
does not match then procede to call mutex_lock and busy-wait for the
existing task to complete.

This patch does not increase concurrency for unrelated calls into the
clock framework.  Instead it simply allows reentrancy by the single task
which is currently holding the global clock framework lock.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
---
 drivers/clk/clk.c |  255 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 186 insertions(+), 69 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..17432a5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,9 +19,12 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
+static atomic_t prepare_context;
+static atomic_t enable_context;
 
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
@@ -456,27 +459,6 @@ unsigned int __clk_get_prepare_count(struct clk *clk)
 	return !clk ? 0 : clk->prepare_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
-{
-	unsigned long ret;
-
-	if (!clk) {
-		ret = 0;
-		goto out;
-	}
-
-	ret = clk->rate;
-
-	if (clk->flags & CLK_IS_ROOT)
-		goto out;
-
-	if (!clk->parent)
-		ret = 0;
-
-out:
-	return ret;
-}
-
 unsigned long __clk_get_flags(struct clk *clk)
 {
 	return !clk ? 0 : clk->flags;
@@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
 	return NULL;
 }
 
+/***  locking & reentrancy ***/
+
+static void clk_fwk_lock(void)
+{
+	/* hold the framework-wide lock, context == NULL */
+	mutex_lock(&prepare_lock);
+
+	/* set context for any reentrant calls */
+	atomic_set(&prepare_context, (int) get_current());
+}
+
+static void clk_fwk_unlock(void)
+{
+	/* clear the context */
+	atomic_set(&prepare_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
+	mutex_unlock(&prepare_lock);
+}
+
+static bool clk_is_reentrant(void)
+{
+	if (mutex_is_locked(&prepare_lock))
+		if ((void *) atomic_read(&prepare_context) == get_current())
+			return true;
+
+	return false;
+}
+
 /***        clk api        ***/
 
 void __clk_unprepare(struct clk *clk)
@@ -600,9 +611,15 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	/* re-enter if call is from the same context */
+	if (clk_is_reentrant()) {
+		__clk_unprepare(clk);
+		return;
+	}
+
+	clk_fwk_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,10 +665,16 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
-	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	/* re-enter if call is from the same context */
+	if (clk_is_reentrant()) {
+		ret = __clk_prepare(clk);
+		goto out;
+	}
 
+	clk_fwk_lock();
+	ret = __clk_prepare(clk);
+	clk_fwk_unlock();
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
@@ -692,8 +715,27 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
+	/* this call re-enters if it is from the same context */
+	if (spin_is_locked(&enable_lock)) {
+		if ((void *) atomic_read(&enable_context) == get_current()) {
+			__clk_disable(clk);
+			return;
+		}
+	}
+
+	/* hold the framework-wide lock, context == NULL */
 	spin_lock_irqsave(&enable_lock, flags);
+
+	/* set context for any reentrant calls */
+	atomic_set(&enable_context, (int) get_current());
+
+	/* disable the clock(s) */
 	__clk_disable(clk);
+
+	/* clear the context */
+	atomic_set(&enable_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
 	spin_unlock_irqrestore(&enable_lock, flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
@@ -745,10 +787,29 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
+	/* this call re-enters if it is from the same context */
+	if (spin_is_locked(&enable_lock)) {
+		if ((void *) atomic_read(&enable_context) == get_current()) {
+			ret = __clk_enable(clk);
+			goto out;
+		}
+	}
+
+	/* hold the framework-wide lock, context == NULL */
 	spin_lock_irqsave(&enable_lock, flags);
+
+	/* set context for any reentrant calls */
+	atomic_set(&enable_context, (int) get_current());
+
+	/* enable the clock(s) */
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
 
+	/* clear the context */
+	atomic_set(&enable_context, 0);
+
+	/* release the framework-wide lock, context == NULL */
+	spin_unlock_irqrestore(&enable_lock, flags);
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_enable);
@@ -792,10 +853,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	/* this call re-enters if it is from the same context */
+	if (clk_is_reentrant()) {
+		ret = __clk_round_rate(clk, rate);
+		goto out;
+	}
+
+	clk_fwk_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_round_rate);
@@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 		__clk_recalc_rates(child, msg);
 }
 
+unsigned long __clk_get_rate(struct clk *clk)
+{
+	unsigned long ret;
+
+	if (!clk) {
+		ret = 0;
+		goto out;
+	}
+
+	if (clk->flags & CLK_GET_RATE_NOCACHE)
+		__clk_recalc_rates(clk, 0);
+
+	ret = clk->rate;
+
+	if (clk->flags & CLK_IS_ROOT)
+		goto out;
+
+	if (!clk->parent)
+		ret = 0;
+
+out:
+	return ret;
+}
+
 /**
  * clk_get_rate - return the rate of clk
  * @clk: the clk whose rate is being returned
@@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	/*
+	 * FIXME - any locking here seems heavy weight
+	 * can clk->rate be replaced with an atomic_t?
+	 * same logic can likely be applied to prepare_count & enable_count
+	 */
 
-	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
-		__clk_recalc_rates(clk, 0);
+	if (clk_is_reentrant()) {
+		rate = __clk_get_rate(clk);
+		goto out;
+	}
 
+	clk_fwk_lock();
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return rate;
 }
 EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
 		clk_change_rate(child);
 }
 
+int __clk_set_rate(struct clk *clk, unsigned long rate)
+{
+	int ret = 0;
+	struct clk *top, *fail_clk;
+
+	/* bail early if nothing to do */
+	if (rate == clk->rate)
+		return 0;
+
+	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+		return -EBUSY;
+	}
+
+	/* calculate new rates and get the topmost changed clock */
+	top = clk_calc_new_rates(clk, rate);
+	if (!top)
+		return -EINVAL;
+
+	/* notify that we are about to change rates */
+	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+	if (fail_clk) {
+		pr_warn("%s: failed to set %s rate\n", __func__,
+				fail_clk->name);
+		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+		return -EBUSY;
+	}
+
+	/* change the rates */
+	clk_change_rate(top);
+
+	return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1096,44 +1229,18 @@ static void clk_change_rate(struct clk *clk)
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-	struct clk *top, *fail_clk;
 	int ret = 0;
 
-	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
-
-	/* bail early if nothing to do */
-	if (rate == clk->rate)
-		goto out;
-
-	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-		ret = -EBUSY;
-		goto out;
-	}
-
-	/* calculate new rates and get the topmost changed clock */
-	top = clk_calc_new_rates(clk, rate);
-	if (!top) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	/* notify that we are about to change rates */
-	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-	if (fail_clk) {
-		pr_warn("%s: failed to set %s rate\n", __func__,
-				fail_clk->name);
-		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-		ret = -EBUSY;
+	if (clk_is_reentrant()) {
+		ret = __clk_set_rate(clk, rate);
 		goto out;
 	}
 
-	/* change the rates */
-	clk_change_rate(top);
+	clk_fwk_lock();
+	ret = __clk_set_rate(clk, rate);
+	clk_fwk_unlock();
 
 out:
-	mutex_unlock(&prepare_lock);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_rate);
@@ -1148,10 +1255,16 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	if (clk_is_reentrant()) {
+		parent = __clk_get_parent(clk);
+		goto out;
+	}
+
+	clk_fwk_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_fwk_unlock();
 
+out:
 	return parent;
 }
 EXPORT_SYMBOL_GPL(clk_get_parent);
@@ -1330,6 +1443,7 @@ out:
 int clk_set_parent(struct clk *clk, struct clk *parent)
 {
 	int ret = 0;
+	bool reenter;
 
 	if (!clk || !clk->ops)
 		return -EINVAL;
@@ -1337,8 +1451,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	if (!clk->ops->set_parent)
 		return -ENOSYS;
 
-	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	reenter = clk_is_reentrant();
+
+	if (!reenter)
+		clk_fwk_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1483,8 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	if (!reenter)
+		clk_fwk_unlock();
 
 	return ret;
 }
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  7:09 ` Mike Turquette
@ 2013-03-27  9:08   ` Laurent Pinchart
  -1 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-27  9:08 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	Rajagopal Venkat, David Brown, Ulf Hansson

Hi Mike,

Thanks for the patch.

Please see below for a couple of comments.

On Wednesday 27 March 2013 00:09:43 Mike Turquette wrote:
> Reentrancy into the clock framework from the clk.h api is necessary for
> clocks that are prepared and unprepared via i2c_transfer (which includes
> many PMICs and discrete audio chips) as well as for several other use
> cases.
> 
> This patch implements reentrancy by adding two global atomic_t's which
> track the context of the current caller.  Context in this case is the
> return value from get_current().  One context variable is for slow
> operations protected by the prepare_mutex and the other is for fast
> operations protected by the enable_lock spinlock.
> 
> The clk.h api implementations are modified to first see if the relevant
> global lock is already held and if so compare the global context (set by
> whoever is holding the lock) against their own context (via a call to
> get_current()).  If the two match then this function is a nested call
> from the one already holding the lock and we procede.  If the context
> does not match then procede to call mutex_lock and busy-wait for the
> existing task to complete.
> 
> This patch does not increase concurrency for unrelated calls into the
> clock framework.  Instead it simply allows reentrancy by the single task
> which is currently holding the global clock framework lock.
> 
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> ---
>  drivers/clk/clk.c |  255 +++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 186 insertions(+), 69 deletions(-)
> 
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..17432a5 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,9 +19,12 @@
>  #include <linux/of.h>
>  #include <linux/device.h>
>  #include <linux/init.h>
> +#include <linux/sched.h>
> 
>  static DEFINE_SPINLOCK(enable_lock);
>  static DEFINE_MUTEX(prepare_lock);
> +static atomic_t prepare_context;
> +static atomic_t enable_context;
> 
>  static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);

[snip]

> @@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
>  	return NULL;
>  }
> 
> +/***  locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)
> +{
> +	/* hold the framework-wide lock, context == NULL */
> +	mutex_lock(&prepare_lock);
> +
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());

Won't that break on 64-bit architectures with sizeof(void *) != sizeof(int) ?

I wonder if it would make sense to abstract these operations in a generic 
recursive mutex. Given that it would delay this patch past v3.10 I won't push 
for that.

> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> +	/* clear the context */
> +	atomic_set(&prepare_context, 0);
> +
> +	/* release the framework-wide lock, context == NULL */
> +	mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())
> +			return true;
> +
> +	return false;
> +}
> +
>  /***        clk api        ***/
> 
>  void __clk_unprepare(struct clk *clk)

[snip]

> @@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk,
> unsigned long msg) __clk_recalc_rates(child, msg);
>  }
> 
> +unsigned long __clk_get_rate(struct clk *clk)
> +{
> +	unsigned long ret;
> +
> +	if (!clk) {
> +		ret = 0;
> +		goto out;

You could return 0 directly here.

> +	}
> +
> +	if (clk->flags & CLK_GET_RATE_NOCACHE)
> +		__clk_recalc_rates(clk, 0);
> +
> +	ret = clk->rate;
> +
> +	if (clk->flags & CLK_IS_ROOT)
> +		goto out;

And return ret here.

> +
> +	if (!clk->parent)
> +		ret = 0;
> +
> +out:
> +	return ret;
> +}
> +
>  /**
>   * clk_get_rate - return the rate of clk
>   * @clk: the clk whose rate is being returned
> @@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
>  {
>  	unsigned long rate;
> 
> -	mutex_lock(&prepare_lock);
> +	/*
> +	 * FIXME - any locking here seems heavy weight
> +	 * can clk->rate be replaced with an atomic_t?
> +	 * same logic can likely be applied to prepare_count & enable_count
> +	 */
> 
> -	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
> -		__clk_recalc_rates(clk, 0);
> +	if (clk_is_reentrant()) {
> +		rate = __clk_get_rate(clk);
> +		goto out;

You can return directly here as well.

> +	}
> 
> +	clk_fwk_lock();
>  	rate = __clk_get_rate(clk);
> -	mutex_unlock(&prepare_lock);
> +	clk_fwk_unlock();
> 
> +out:
>  	return rate;
>  }
>  EXPORT_SYMBOL_GPL(clk_get_rate);
> @@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
>  		clk_change_rate(child);
>  }
> 
> +int __clk_set_rate(struct clk *clk, unsigned long rate)
> +{
> +	int ret = 0;
> +	struct clk *top, *fail_clk;
> +
> +	/* bail early if nothing to do */
> +	if (rate == clk->rate)
> +		return 0;
> +
> +	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
> +		return -EBUSY;
> +	}

Braces are not necessary.

> +	/* calculate new rates and get the topmost changed clock */
> +	top = clk_calc_new_rates(clk, rate);
> +	if (!top)
> +		return -EINVAL;
> +
> +	/* notify that we are about to change rates */
> +	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
> +	if (fail_clk) {
> +		pr_warn("%s: failed to set %s rate\n", __func__,
> +				fail_clk->name);
> +		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
> +		return -EBUSY;
> +	}
> +
> +	/* change the rates */
> +	clk_change_rate(top);
> +
> +	return ret;
> +}
> +
>  /**
>   * clk_set_rate - specify a new rate for clk
>   * @clk: the clk whose rate is being changed

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  9:08   ` Laurent Pinchart
  0 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-27  9:08 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Mike,

Thanks for the patch.

Please see below for a couple of comments.

On Wednesday 27 March 2013 00:09:43 Mike Turquette wrote:
> Reentrancy into the clock framework from the clk.h api is necessary for
> clocks that are prepared and unprepared via i2c_transfer (which includes
> many PMICs and discrete audio chips) as well as for several other use
> cases.
> 
> This patch implements reentrancy by adding two global atomic_t's which
> track the context of the current caller.  Context in this case is the
> return value from get_current().  One context variable is for slow
> operations protected by the prepare_mutex and the other is for fast
> operations protected by the enable_lock spinlock.
> 
> The clk.h api implementations are modified to first see if the relevant
> global lock is already held and if so compare the global context (set by
> whoever is holding the lock) against their own context (via a call to
> get_current()).  If the two match then this function is a nested call
> from the one already holding the lock and we procede.  If the context
> does not match then procede to call mutex_lock and busy-wait for the
> existing task to complete.
> 
> This patch does not increase concurrency for unrelated calls into the
> clock framework.  Instead it simply allows reentrancy by the single task
> which is currently holding the global clock framework lock.
> 
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> ---
>  drivers/clk/clk.c |  255 +++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 186 insertions(+), 69 deletions(-)
> 
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..17432a5 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,9 +19,12 @@
>  #include <linux/of.h>
>  #include <linux/device.h>
>  #include <linux/init.h>
> +#include <linux/sched.h>
> 
>  static DEFINE_SPINLOCK(enable_lock);
>  static DEFINE_MUTEX(prepare_lock);
> +static atomic_t prepare_context;
> +static atomic_t enable_context;
> 
>  static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);

[snip]

> @@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
>  	return NULL;
>  }
> 
> +/***  locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)
> +{
> +	/* hold the framework-wide lock, context == NULL */
> +	mutex_lock(&prepare_lock);
> +
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());

Won't that break on 64-bit architectures with sizeof(void *) != sizeof(int) ?

I wonder if it would make sense to abstract these operations in a generic 
recursive mutex. Given that it would delay this patch past v3.10 I won't push 
for that.

> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> +	/* clear the context */
> +	atomic_set(&prepare_context, 0);
> +
> +	/* release the framework-wide lock, context == NULL */
> +	mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())
> +			return true;
> +
> +	return false;
> +}
> +
>  /***        clk api        ***/
> 
>  void __clk_unprepare(struct clk *clk)

[snip]

> @@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk,
> unsigned long msg) __clk_recalc_rates(child, msg);
>  }
> 
> +unsigned long __clk_get_rate(struct clk *clk)
> +{
> +	unsigned long ret;
> +
> +	if (!clk) {
> +		ret = 0;
> +		goto out;

You could return 0 directly here.

> +	}
> +
> +	if (clk->flags & CLK_GET_RATE_NOCACHE)
> +		__clk_recalc_rates(clk, 0);
> +
> +	ret = clk->rate;
> +
> +	if (clk->flags & CLK_IS_ROOT)
> +		goto out;

And return ret here.

> +
> +	if (!clk->parent)
> +		ret = 0;
> +
> +out:
> +	return ret;
> +}
> +
>  /**
>   * clk_get_rate - return the rate of clk
>   * @clk: the clk whose rate is being returned
> @@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
>  {
>  	unsigned long rate;
> 
> -	mutex_lock(&prepare_lock);
> +	/*
> +	 * FIXME - any locking here seems heavy weight
> +	 * can clk->rate be replaced with an atomic_t?
> +	 * same logic can likely be applied to prepare_count & enable_count
> +	 */
> 
> -	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
> -		__clk_recalc_rates(clk, 0);
> +	if (clk_is_reentrant()) {
> +		rate = __clk_get_rate(clk);
> +		goto out;

You can return directly here as well.

> +	}
> 
> +	clk_fwk_lock();
>  	rate = __clk_get_rate(clk);
> -	mutex_unlock(&prepare_lock);
> +	clk_fwk_unlock();
> 
> +out:
>  	return rate;
>  }
>  EXPORT_SYMBOL_GPL(clk_get_rate);
> @@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
>  		clk_change_rate(child);
>  }
> 
> +int __clk_set_rate(struct clk *clk, unsigned long rate)
> +{
> +	int ret = 0;
> +	struct clk *top, *fail_clk;
> +
> +	/* bail early if nothing to do */
> +	if (rate == clk->rate)
> +		return 0;
> +
> +	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
> +		return -EBUSY;
> +	}

Braces are not necessary.

> +	/* calculate new rates and get the topmost changed clock */
> +	top = clk_calc_new_rates(clk, rate);
> +	if (!top)
> +		return -EINVAL;
> +
> +	/* notify that we are about to change rates */
> +	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
> +	if (fail_clk) {
> +		pr_warn("%s: failed to set %s rate\n", __func__,
> +				fail_clk->name);
> +		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
> +		return -EBUSY;
> +	}
> +
> +	/* change the rates */
> +	clk_change_rate(top);
> +
> +	return ret;
> +}
> +
>  /**
>   * clk_set_rate - specify a new rate for clk
>   * @clk: the clk whose rate is being changed

-- 
Regards,

Laurent Pinchart

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  7:09 ` Mike Turquette
@ 2013-03-27  9:40   ` Thomas Gleixner
  -1 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27  9:40 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linaro-kernel, patches, Laurent Pinchart,
	David Brown, linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Reentrancy into the clock framework from the clk.h api is necessary
> for clocks that are prepared and unprepared via i2c_transfer (which
> includes many PMICs and discrete audio chips) as well as for several
> other use cases.

That explanation sucks.

Why does an i2c clock need reentrancy? Just because it's i2c or what?

What exactly are the "several other use cases"?

Why do you need the spinlock side reentrant? If a clock is handled by
i2c it hardly can hold the spinlock over a i2c transfer.

A few pointers to code which needs this would be nice as well.

I'm refraining from reviewing the horrible implementation until I get
proper answers to the above questions.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  9:40   ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27  9:40 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Reentrancy into the clock framework from the clk.h api is necessary
> for clocks that are prepared and unprepared via i2c_transfer (which
> includes many PMICs and discrete audio chips) as well as for several
> other use cases.

That explanation sucks.

Why does an i2c clock need reentrancy? Just because it's i2c or what?

What exactly are the "several other use cases"?

Why do you need the spinlock side reentrant? If a clock is handled by
i2c it hardly can hold the spinlock over a i2c transfer.

A few pointers to code which needs this would be nice as well.

I'm refraining from reviewing the horrible implementation until I get
proper answers to the above questions.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  9:40   ` Thomas Gleixner
@ 2013-03-27  9:55     ` Viresh Kumar
  -1 siblings, 0 replies; 51+ messages in thread
From: Viresh Kumar @ 2013-03-27  9:55 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Mike Turquette, linaro-kernel, patches, linux-kernel,
	Laurent Pinchart, David Brown, linux-arm-kernel

On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework from the clk.h api is necessary
>> for clocks that are prepared and unprepared via i2c_transfer (which
>> includes many PMICs and discrete audio chips) as well as for several
>> other use cases.
>
> That explanation sucks.
>
> Why does an i2c clock need reentrancy? Just because it's i2c or what?

I am noway connected to this development but was just going through
your mail and i think i might know the answer why is this required.

Consider an example where an external chip has clock controller and has
bits which can be programmed to enable/disable clock. And this chip is
connected via spi/i2c to SoC.

clk_prepare(peripheral on external chip)
  -> i2c_xfer(to write to external chips register)
      -> clk_enable(i2c controller)
          ->controller-xfer-routine.. and finally we enable clk here...


Sorry if i am on the wrong side :)

--
viresh

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  9:55     ` Viresh Kumar
  0 siblings, 0 replies; 51+ messages in thread
From: Viresh Kumar @ 2013-03-27  9:55 UTC (permalink / raw)
  To: linux-arm-kernel

On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework from the clk.h api is necessary
>> for clocks that are prepared and unprepared via i2c_transfer (which
>> includes many PMICs and discrete audio chips) as well as for several
>> other use cases.
>
> That explanation sucks.
>
> Why does an i2c clock need reentrancy? Just because it's i2c or what?

I am noway connected to this development but was just going through
your mail and i think i might know the answer why is this required.

Consider an example where an external chip has clock controller and has
bits which can be programmed to enable/disable clock. And this chip is
connected via spi/i2c to SoC.

clk_prepare(peripheral on external chip)
  -> i2c_xfer(to write to external chips register)
      -> clk_enable(i2c controller)
          ->controller-xfer-routine.. and finally we enable clk here...


Sorry if i am on the wrong side :)

--
viresh

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  9:40   ` Thomas Gleixner
@ 2013-03-27  9:59     ` Laurent Pinchart
  -1 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-27  9:59 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: Mike Turquette, linux-kernel, linaro-kernel, patches,
	David Brown, linux-arm-kernel

Hi Thomas,

On Wednesday 27 March 2013 10:40:28 Thomas Gleixner wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > Reentrancy into the clock framework from the clk.h api is necessary
> > for clocks that are prepared and unprepared via i2c_transfer (which
> > includes many PMICs and discrete audio chips) as well as for several
> > other use cases.
> 
> That explanation sucks.
> 
> Why does an i2c clock need reentrancy? Just because it's i2c or what?

That's just an example, other clocks need reentrancy as well.

> What exactly are the "several other use cases"?

Please see below.

> Why do you need the spinlock side reentrant? If a clock is handled by
> i2c it hardly can hold the spinlock over a i2c transfer.
> 
> A few pointers to code which needs this would be nice as well.

See for instance 
http://git.linuxtv.org/pinchartl/media.git/commitdiff/c4abb868a4cb6ad33fde76a6383543f3bd6699b0

That commit exposes the XCLKA and XCLKB clock outputs of the OMAP3 ISP (camera 
interface) as clock framework objects. The issue here is that the ISP has 
several inputs clocks. One of them is a parent of XCLK[AB], and another one 
controls access to the ISP registers (L4 bus clock). The ISP driver thus needs 
to enable/disable and prepare/unprepare the L4 clock in the XCLK[AB] 
enable/disable and prepare/unprepare operation handlers.

> I'm refraining from reviewing the horrible implementation until I get
> proper answers to the above questions.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27  9:59     ` Laurent Pinchart
  0 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-27  9:59 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Thomas,

On Wednesday 27 March 2013 10:40:28 Thomas Gleixner wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > Reentrancy into the clock framework from the clk.h api is necessary
> > for clocks that are prepared and unprepared via i2c_transfer (which
> > includes many PMICs and discrete audio chips) as well as for several
> > other use cases.
> 
> That explanation sucks.
> 
> Why does an i2c clock need reentrancy? Just because it's i2c or what?

That's just an example, other clocks need reentrancy as well.

> What exactly are the "several other use cases"?

Please see below.

> Why do you need the spinlock side reentrant? If a clock is handled by
> i2c it hardly can hold the spinlock over a i2c transfer.
> 
> A few pointers to code which needs this would be nice as well.

See for instance 
http://git.linuxtv.org/pinchartl/media.git/commitdiff/c4abb868a4cb6ad33fde76a6383543f3bd6699b0

That commit exposes the XCLKA and XCLKB clock outputs of the OMAP3 ISP (camera 
interface) as clock framework objects. The issue here is that the ISP has 
several inputs clocks. One of them is a parent of XCLK[AB], and another one 
controls access to the ISP registers (L4 bus clock). The ISP driver thus needs 
to enable/disable and prepare/unprepare the L4 clock in the XCLK[AB] 
enable/disable and prepare/unprepare operation handlers.

> I'm refraining from reviewing the horrible implementation until I get
> proper answers to the above questions.

-- 
Regards,

Laurent Pinchart

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  9:55     ` Viresh Kumar
@ 2013-03-27 10:03       ` Ulf Hansson
  -1 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-03-27 10:03 UTC (permalink / raw)
  To: Viresh Kumar
  Cc: Thomas Gleixner, Mike Turquette, linaro-kernel, patches,
	linux-kernel, Laurent Pinchart, David Brown, linux-arm-kernel

On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
>> On Wed, 27 Mar 2013, Mike Turquette wrote:
>>
>>> Reentrancy into the clock framework from the clk.h api is necessary
>>> for clocks that are prepared and unprepared via i2c_transfer (which
>>> includes many PMICs and discrete audio chips) as well as for several
>>> other use cases.
>>
>> That explanation sucks.
>>
>> Why does an i2c clock need reentrancy? Just because it's i2c or what?
>
> I am noway connected to this development but was just going through
> your mail and i think i might know the answer why is this required.
>
> Consider an example where an external chip has clock controller and has
> bits which can be programmed to enable/disable clock. And this chip is
> connected via spi/i2c to SoC.
>
> clk_prepare(peripheral on external chip)
>   -> i2c_xfer(to write to external chips register)
>       -> clk_enable(i2c controller)
>           ->controller-xfer-routine.. and finally we enable clk here...
>
>
> Sorry if i am on the wrong side :)

I agree with you Viresh. I guess Mike should update the commit message.

I would also like add another reason to why this is needed. For some
clks you would like to do pinctrl operations from a clk hw. But since
a pinctrl driver likely requires a clk to be prepared|enabled, we run
into a clk reentrant issue.

Kind regards
Ulf Hansson

>
> --
> viresh
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 10:03       ` Ulf Hansson
  0 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-03-27 10:03 UTC (permalink / raw)
  To: linux-arm-kernel

On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
>> On Wed, 27 Mar 2013, Mike Turquette wrote:
>>
>>> Reentrancy into the clock framework from the clk.h api is necessary
>>> for clocks that are prepared and unprepared via i2c_transfer (which
>>> includes many PMICs and discrete audio chips) as well as for several
>>> other use cases.
>>
>> That explanation sucks.
>>
>> Why does an i2c clock need reentrancy? Just because it's i2c or what?
>
> I am noway connected to this development but was just going through
> your mail and i think i might know the answer why is this required.
>
> Consider an example where an external chip has clock controller and has
> bits which can be programmed to enable/disable clock. And this chip is
> connected via spi/i2c to SoC.
>
> clk_prepare(peripheral on external chip)
>   -> i2c_xfer(to write to external chips register)
>       -> clk_enable(i2c controller)
>           ->controller-xfer-routine.. and finally we enable clk here...
>
>
> Sorry if i am on the wrong side :)

I agree with you Viresh. I guess Mike should update the commit message.

I would also like add another reason to why this is needed. For some
clks you would like to do pinctrl operations from a clk hw. But since
a pinctrl driver likely requires a clk to be prepared|enabled, we run
into a clk reentrant issue.

Kind regards
Ulf Hansson

>
> --
> viresh
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo at vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 10:03       ` Ulf Hansson
@ 2013-03-27 11:09         ` Thomas Gleixner
  -1 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 11:09 UTC (permalink / raw)
  To: Ulf Hansson
  Cc: Viresh Kumar, Mike Turquette, linaro-kernel, patches,
	linux-kernel, Laurent Pinchart, David Brown, linux-arm-kernel

On Wed, 27 Mar 2013, Ulf Hansson wrote:
> On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> >>
> >>> Reentrancy into the clock framework from the clk.h api is necessary
> >>> for clocks that are prepared and unprepared via i2c_transfer (which
> >>> includes many PMICs and discrete audio chips) as well as for several
> >>> other use cases.
> >>
> >> That explanation sucks.
> >>
> >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> >
> > I am noway connected to this development but was just going through
> > your mail and i think i might know the answer why is this required.
> >
> > Consider an example where an external chip has clock controller and has
> > bits which can be programmed to enable/disable clock. And this chip is
> > connected via spi/i2c to SoC.
> >
> > clk_prepare(peripheral on external chip)
> >   -> i2c_xfer(to write to external chips register)
> >       -> clk_enable(i2c controller)
> >           ->controller-xfer-routine.. and finally we enable clk here...

Which does not explain the whole issue:

    clk_prepare() takes the mutex
    clk_enable() takes the spinlock

That works today.

The issue arises, if you need to call clk_prepare(i2c) in the xfer
routine.

> >
> > Sorry if i am on the wrong side :)

Only slightly :)

> I agree with you Viresh. I guess Mike should update the commit message.
> 
> I would also like add another reason to why this is needed. For some
> clks you would like to do pinctrl operations from a clk hw. But since
> a pinctrl driver likely requires a clk to be prepared|enabled, we run
> into a clk reentrant issue.

Fair enough. This all wants to go into the changelog, so we can
understand why we have this business.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 11:09         ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 11:09 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Ulf Hansson wrote:
> On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> >>
> >>> Reentrancy into the clock framework from the clk.h api is necessary
> >>> for clocks that are prepared and unprepared via i2c_transfer (which
> >>> includes many PMICs and discrete audio chips) as well as for several
> >>> other use cases.
> >>
> >> That explanation sucks.
> >>
> >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> >
> > I am noway connected to this development but was just going through
> > your mail and i think i might know the answer why is this required.
> >
> > Consider an example where an external chip has clock controller and has
> > bits which can be programmed to enable/disable clock. And this chip is
> > connected via spi/i2c to SoC.
> >
> > clk_prepare(peripheral on external chip)
> >   -> i2c_xfer(to write to external chips register)
> >       -> clk_enable(i2c controller)
> >           ->controller-xfer-routine.. and finally we enable clk here...

Which does not explain the whole issue:

    clk_prepare() takes the mutex
    clk_enable() takes the spinlock

That works today.

The issue arises, if you need to call clk_prepare(i2c) in the xfer
routine.

> >
> > Sorry if i am on the wrong side :)

Only slightly :)

> I agree with you Viresh. I guess Mike should update the commit message.
> 
> I would also like add another reason to why this is needed. For some
> clks you would like to do pinctrl operations from a clk hw. But since
> a pinctrl driver likely requires a clk to be prepared|enabled, we run
> into a clk reentrant issue.

Fair enough. This all wants to go into the changelog, so we can
understand why we have this business.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  7:09 ` Mike Turquette
@ 2013-03-27 11:24   ` Thomas Gleixner
  -1 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 11:24 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linaro-kernel, patches, Laurent Pinchart,
	David Brown, linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> +/***  locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)

This function name sucks as much as the whole implementation does.

> +{
> +	/* hold the framework-wide lock, context == NULL */
> +	mutex_lock(&prepare_lock);
> +
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());

And what's the point of the atomic here? There is no need for an
atomic if you hold the lock. Neither here nor on the reader side.

Aside of that, the cast to (int) and the one below to (void *) are
blantantly wrong on 64 bit.

> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> +	/* clear the context */
> +	atomic_set(&prepare_context, 0);
> +
> +	/* release the framework-wide lock, context == NULL */
> +	mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())

Mooo.

> +			return true;
> +
> +	return false;
> +}

Why the heck do you need this function?

Just to sprinkle all these ugly constructs into the code:

> -	mutex_lock(&prepare_lock);
> +	/* re-enter if call is from the same context */
> +	if (clk_is_reentrant()) {
> +		__clk_unprepare(clk);
> +		return;
> +	}

Sigh. Why not doing the obvious?

Step 1/2: Wrap locking in helper functions

+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}

That way the whole change in the existing code boils down to:

-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
...
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();

Ditto for the spinlock.

And there is no pointless reshuffling of functions required.


Step 2/2: Implement reentrancy

+static struct task_struct *prepare_owner;
+static int prepare_refcnt;

static void clk_prepare_lock()
{
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+		   	prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
}

static void clk_prepare_unlock(void)
{
-	mutex_unlock(&prepare_lock);
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
+	mutex_unlock(&prepare_lock);
}

Ditto for the spinlock.

That step requires ZERO change to the functions. They simply work and
you don't need all this ugly reentrancy hackery.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 11:24   ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 11:24 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> +/***  locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)

This function name sucks as much as the whole implementation does.

> +{
> +	/* hold the framework-wide lock, context == NULL */
> +	mutex_lock(&prepare_lock);
> +
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());

And what's the point of the atomic here? There is no need for an
atomic if you hold the lock. Neither here nor on the reader side.

Aside of that, the cast to (int) and the one below to (void *) are
blantantly wrong on 64 bit.

> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> +	/* clear the context */
> +	atomic_set(&prepare_context, 0);
> +
> +	/* release the framework-wide lock, context == NULL */
> +	mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())

Mooo.

> +			return true;
> +
> +	return false;
> +}

Why the heck do you need this function?

Just to sprinkle all these ugly constructs into the code:

> -	mutex_lock(&prepare_lock);
> +	/* re-enter if call is from the same context */
> +	if (clk_is_reentrant()) {
> +		__clk_unprepare(clk);
> +		return;
> +	}

Sigh. Why not doing the obvious?

Step 1/2: Wrap locking in helper functions

+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}

That way the whole change in the existing code boils down to:

-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
...
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();

Ditto for the spinlock.

And there is no pointless reshuffling of functions required.


Step 2/2: Implement reentrancy

+static struct task_struct *prepare_owner;
+static int prepare_refcnt;

static void clk_prepare_lock()
{
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+		   	prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
}

static void clk_prepare_unlock(void)
{
-	mutex_unlock(&prepare_lock);
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
+	mutex_unlock(&prepare_lock);
}

Ditto for the spinlock.

That step requires ZERO change to the functions. They simply work and
you don't need all this ugly reentrancy hackery.

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 11:09         ` Thomas Gleixner
@ 2013-03-27 14:25           ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-27 14:25 UTC (permalink / raw)
  To: Thomas Gleixner, Ulf Hansson
  Cc: Viresh Kumar, linaro-kernel, patches, linux-kernel,
	Laurent Pinchart, David Brown, linux-arm-kernel

Quoting Thomas Gleixner (2013-03-27 04:09:17)
> On Wed, 27 Mar 2013, Ulf Hansson wrote:
> > On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > > On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> > >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > >>
> > >>> Reentrancy into the clock framework from the clk.h api is necessary
> > >>> for clocks that are prepared and unprepared via i2c_transfer (which
> > >>> includes many PMICs and discrete audio chips) as well as for several
> > >>> other use cases.
> > >>
> > >> That explanation sucks.
> > >>
> > >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> > >
> > > I am noway connected to this development but was just going through
> > > your mail and i think i might know the answer why is this required.
> > >
> > > Consider an example where an external chip has clock controller and has
> > > bits which can be programmed to enable/disable clock. And this chip is
> > > connected via spi/i2c to SoC.
> > >
> > > clk_prepare(peripheral on external chip)
> > >   -> i2c_xfer(to write to external chips register)
> > >       -> clk_enable(i2c controller)
> > >           ->controller-xfer-routine.. and finally we enable clk here...
> 
> Which does not explain the whole issue:
> 
>     clk_prepare() takes the mutex
>     clk_enable() takes the spinlock
> 
> That works today.
> 
> The issue arises, if you need to call clk_prepare(i2c) in the xfer
> routine.
> 

The issue arises any time a clk_ops callback calls a function that
unwittingly re-enters the clock framework.  I think the easiest example
to understand and perhaps the most common in practice is a clock which
is controlled via an i2c transfer.

Viresh's example makes the mistake of calling
clk_enable(i2c_controller), but it must also call
clk_prepare(i2c_controller) which is missing in the call graph above.
That nested call to clk_prepare is where the reentrancy comes from.

This has nothing to do with the prepare/enable locking split and leaves
that relationship intact.

> > >
> > > Sorry if i am on the wrong side :)
> 
> Only slightly :)
> 
> > I agree with you Viresh. I guess Mike should update the commit message.
> > 
> > I would also like add another reason to why this is needed. For some
> > clks you would like to do pinctrl operations from a clk hw. But since
> > a pinctrl driver likely requires a clk to be prepared|enabled, we run
> > into a clk reentrant issue.
> 
> Fair enough. This all wants to go into the changelog, so we can
> understand why we have this business.
> 

I'll submit a v5 which I hope will end the pain and suffering this patch
has caused you.

Regards,
Mike

> Thanks,
> 
>         tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 14:25           ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-27 14:25 UTC (permalink / raw)
  To: linux-arm-kernel

Quoting Thomas Gleixner (2013-03-27 04:09:17)
> On Wed, 27 Mar 2013, Ulf Hansson wrote:
> > On 27 March 2013 10:55, Viresh Kumar <viresh.kumar@linaro.org> wrote:
> > > On 27 March 2013 15:10, Thomas Gleixner <tglx@linutronix.de> wrote:
> > >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > >>
> > >>> Reentrancy into the clock framework from the clk.h api is necessary
> > >>> for clocks that are prepared and unprepared via i2c_transfer (which
> > >>> includes many PMICs and discrete audio chips) as well as for several
> > >>> other use cases.
> > >>
> > >> That explanation sucks.
> > >>
> > >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> > >
> > > I am noway connected to this development but was just going through
> > > your mail and i think i might know the answer why is this required.
> > >
> > > Consider an example where an external chip has clock controller and has
> > > bits which can be programmed to enable/disable clock. And this chip is
> > > connected via spi/i2c to SoC.
> > >
> > > clk_prepare(peripheral on external chip)
> > >   -> i2c_xfer(to write to external chips register)
> > >       -> clk_enable(i2c controller)
> > >           ->controller-xfer-routine.. and finally we enable clk here...
> 
> Which does not explain the whole issue:
> 
>     clk_prepare() takes the mutex
>     clk_enable() takes the spinlock
> 
> That works today.
> 
> The issue arises, if you need to call clk_prepare(i2c) in the xfer
> routine.
> 

The issue arises any time a clk_ops callback calls a function that
unwittingly re-enters the clock framework.  I think the easiest example
to understand and perhaps the most common in practice is a clock which
is controlled via an i2c transfer.

Viresh's example makes the mistake of calling
clk_enable(i2c_controller), but it must also call
clk_prepare(i2c_controller) which is missing in the call graph above.
That nested call to clk_prepare is where the reentrancy comes from.

This has nothing to do with the prepare/enable locking split and leaves
that relationship intact.

> > >
> > > Sorry if i am on the wrong side :)
> 
> Only slightly :)
> 
> > I agree with you Viresh. I guess Mike should update the commit message.
> > 
> > I would also like add another reason to why this is needed. For some
> > clks you would like to do pinctrl operations from a clk hw. But since
> > a pinctrl driver likely requires a clk to be prepared|enabled, we run
> > into a clk reentrant issue.
> 
> Fair enough. This all wants to go into the changelog, so we can
> understand why we have this business.
> 

I'll submit a v5 which I hope will end the pain and suffering this patch
has caused you.

Regards,
Mike

> Thanks,
> 
>         tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  9:08   ` Laurent Pinchart
  (?)
@ 2013-03-27 15:06   ` Mike Turquette
  2013-03-27 17:12       ` Thomas Gleixner
  -1 siblings, 1 reply; 51+ messages in thread
From: Mike Turquette @ 2013-03-27 15:06 UTC (permalink / raw)
  To: linux-arm-kernel

Quoting Laurent Pinchart (2013-03-27 02:08:15)
> Hi Mike,
> 
> Thanks for the patch.
> 
> Please see below for a couple of comments.
> 
> On Wednesday 27 March 2013 00:09:43 Mike Turquette wrote:
> > Reentrancy into the clock framework from the clk.h api is necessary for
> > clocks that are prepared and unprepared via i2c_transfer (which includes
> > many PMICs and discrete audio chips) as well as for several other use
> > cases.
> > 
> > This patch implements reentrancy by adding two global atomic_t's which
> > track the context of the current caller.  Context in this case is the
> > return value from get_current().  One context variable is for slow
> > operations protected by the prepare_mutex and the other is for fast
> > operations protected by the enable_lock spinlock.
> > 
> > The clk.h api implementations are modified to first see if the relevant
> > global lock is already held and if so compare the global context (set by
> > whoever is holding the lock) against their own context (via a call to
> > get_current()).  If the two match then this function is a nested call
> > from the one already holding the lock and we procede.  If the context
> > does not match then procede to call mutex_lock and busy-wait for the
> > existing task to complete.
> > 
> > This patch does not increase concurrency for unrelated calls into the
> > clock framework.  Instead it simply allows reentrancy by the single task
> > which is currently holding the global clock framework lock.
> > 
> > Signed-off-by: Mike Turquette <mturquette@linaro.org>
> > Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> > Cc: David Brown <davidb@codeaurora.org>
> > Cc: Ulf Hansson <ulf.hansson@linaro.org>
> > Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> > ---
> >  drivers/clk/clk.c |  255 +++++++++++++++++++++++++++++++++++---------------
> >  1 file changed, 186 insertions(+), 69 deletions(-)
> > 
> > diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> > index 5e8ffff..17432a5 100644
> > --- a/drivers/clk/clk.c
> > +++ b/drivers/clk/clk.c
> > @@ -19,9 +19,12 @@
> >  #include <linux/of.h>
> >  #include <linux/device.h>
> >  #include <linux/init.h>
> > +#include <linux/sched.h>
> > 
> >  static DEFINE_SPINLOCK(enable_lock);
> >  static DEFINE_MUTEX(prepare_lock);
> > +static atomic_t prepare_context;
> > +static atomic_t enable_context;
> > 
> >  static HLIST_HEAD(clk_root_list);
> >  static HLIST_HEAD(clk_orphan_list);
> 
> [snip]
> 
> > @@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
> >       return NULL;
> >  }
> > 
> > +/***  locking & reentrancy ***/
> > +
> > +static void clk_fwk_lock(void)
> > +{
> > +     /* hold the framework-wide lock, context == NULL */
> > +     mutex_lock(&prepare_lock);
> > +
> > +     /* set context for any reentrant calls */
> > +     atomic_set(&prepare_context, (int) get_current());
> 
> Won't that break on 64-bit architectures with sizeof(void *) != sizeof(int) ?
> 

Ok, I can use atomic64_t in the next version.  Good catch.

> I wonder if it would make sense to abstract these operations in a generic 
> recursive mutex. Given that it would delay this patch past v3.10 I won't push 
> for that.
> 

Having a nice implementation of recursive mutexes would have saved me
some time.

> > +}
> > +
> > +static void clk_fwk_unlock(void)
> > +{
> > +     /* clear the context */
> > +     atomic_set(&prepare_context, 0);
> > +
> > +     /* release the framework-wide lock, context == NULL */
> > +     mutex_unlock(&prepare_lock);
> > +}
> > +
> > +static bool clk_is_reentrant(void)
> > +{
> > +     if (mutex_is_locked(&prepare_lock))
> > +             if ((void *) atomic_read(&prepare_context) == get_current())
> > +                     return true;
> > +
> > +     return false;
> > +}
> > +
> >  /***        clk api        ***/
> > 
> >  void __clk_unprepare(struct clk *clk)
> 
> [snip]
> 
> > @@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk,
> > unsigned long msg) __clk_recalc_rates(child, msg);
> >  }
> > 
> > +unsigned long __clk_get_rate(struct clk *clk)
> > +{
> > +     unsigned long ret;
> > +
> > +     if (!clk) {
> > +             ret = 0;
> > +             goto out;
> 
> You could return 0 directly here.
> 

Call me crazy, but I prefer having a single return statement in a
function if possible.  That goes for the similar review comments below.

> > +     }
> > +
> > +     if (clk->flags & CLK_GET_RATE_NOCACHE)
> > +             __clk_recalc_rates(clk, 0);
> > +
> > +     ret = clk->rate;
> > +
> > +     if (clk->flags & CLK_IS_ROOT)
> > +             goto out;
> 
> And return ret here.
> 
> > +
> > +     if (!clk->parent)
> > +             ret = 0;
> > +
> > +out:
> > +     return ret;
> > +}
> > +
> >  /**
> >   * clk_get_rate - return the rate of clk
> >   * @clk: the clk whose rate is being returned
> > @@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
> >  {
> >       unsigned long rate;
> > 
> > -     mutex_lock(&prepare_lock);
> > +     /*
> > +      * FIXME - any locking here seems heavy weight
> > +      * can clk->rate be replaced with an atomic_t?
> > +      * same logic can likely be applied to prepare_count & enable_count
> > +      */
> > 
> > -     if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
> > -             __clk_recalc_rates(clk, 0);
> > +     if (clk_is_reentrant()) {
> > +             rate = __clk_get_rate(clk);
> > +             goto out;
> 
> You can return directly here as well.
> 
> > +     }
> > 
> > +     clk_fwk_lock();
> >       rate = __clk_get_rate(clk);
> > -     mutex_unlock(&prepare_lock);
> > +     clk_fwk_unlock();
> > 
> > +out:
> >       return rate;
> >  }
> >  EXPORT_SYMBOL_GPL(clk_get_rate);
> > @@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
> >               clk_change_rate(child);
> >  }
> > 
> > +int __clk_set_rate(struct clk *clk, unsigned long rate)
> > +{
> > +     int ret = 0;
> > +     struct clk *top, *fail_clk;
> > +
> > +     /* bail early if nothing to do */
> > +     if (rate == clk->rate)
> > +             return 0;
> > +
> > +     if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
> > +             return -EBUSY;
> > +     }
> 
> Braces are not necessary.
> 

No harm in having them, but I can remove them in the next version.

Thanks for the review,
Mike

> > +     /* calculate new rates and get the topmost changed clock */
> > +     top = clk_calc_new_rates(clk, rate);
> > +     if (!top)
> > +             return -EINVAL;
> > +
> > +     /* notify that we are about to change rates */
> > +     fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
> > +     if (fail_clk) {
> > +             pr_warn("%s: failed to set %s rate\n", __func__,
> > +                             fail_clk->name);
> > +             clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
> > +             return -EBUSY;
> > +     }
> > +
> > +     /* change the rates */
> > +     clk_change_rate(top);
> > +
> > +     return ret;
> > +}
> > +
> >  /**
> >   * clk_set_rate - specify a new rate for clk
> >   * @clk: the clk whose rate is being changed
> 
> -- 
> Regards,
> 
> Laurent Pinchart

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 11:24   ` Thomas Gleixner
  (?)
@ 2013-03-27 16:47   ` Mike Turquette
  2013-03-27 17:09       ` Thomas Gleixner
  -1 siblings, 1 reply; 51+ messages in thread
From: Mike Turquette @ 2013-03-27 16:47 UTC (permalink / raw)
  To: linux-arm-kernel

Quoting Thomas Gleixner (2013-03-27 04:24:12)
> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > +/***  locking & reentrancy ***/
> > +
> > +static void clk_fwk_lock(void)
> 
> This function name sucks as much as the whole implementation does.
> 
> > +{
> > +     /* hold the framework-wide lock, context == NULL */
> > +     mutex_lock(&prepare_lock);
> > +
> > +     /* set context for any reentrant calls */
> > +     atomic_set(&prepare_context, (int) get_current());
> 
> And what's the point of the atomic here? There is no need for an
> atomic if you hold the lock. Neither here nor on the reader side.
> 

I had wondered about that.  So the barriers in mutex_lock and
spin_lock_irqsave are sufficient such that the (unprotected) read-side
will always see the correct data?  That makes sense to me since accesses
to the clock tree are still serialized.

> Aside of that, the cast to (int) and the one below to (void *) are
> blantantly wrong on 64 bit.
> 

Since the atomic type is no longer required (based on the above
assumption) then this problem goes away.  Each context is just a global
pointer.

> > +}
> > +
> > +static void clk_fwk_unlock(void)
> > +{
> > +     /* clear the context */
> > +     atomic_set(&prepare_context, 0);
> > +
> > +     /* release the framework-wide lock, context == NULL */
> > +     mutex_unlock(&prepare_lock);
> > +}
> > +
> > +static bool clk_is_reentrant(void)
> > +{
> > +     if (mutex_is_locked(&prepare_lock))
> > +             if ((void *) atomic_read(&prepare_context) == get_current())
> 
> Mooo.

Woof?

> 
> > +                     return true;
> > +
> > +     return false;
> > +}
> 
> Why the heck do you need this function?
> 
> Just to sprinkle all these ugly constructs into the code:
> 
> > -     mutex_lock(&prepare_lock);
> > +     /* re-enter if call is from the same context */
> > +     if (clk_is_reentrant()) {
> > +             __clk_unprepare(clk);
> > +             return;
> > +     }
> 
> Sigh. Why not doing the obvious?
> 
> Step 1/2: Wrap locking in helper functions
> 
> +static void clk_prepare_lock(void)
> +{
> +       mutex_lock(&prepare_lock);
> +}
> +
> +static void clk_prepare_unlock(void)
> +{
> +       mutex_unlock(&prepare_lock);
> +}
> 
> That way the whole change in the existing code boils down to:
> 
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
> ...
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
> 
> Ditto for the spinlock.
> 
> And there is no pointless reshuffling of functions required.
> 
> 
> Step 2/2: Implement reentrancy
> 
> +static struct task_struct *prepare_owner;
> +static int prepare_refcnt;
> 
> static void clk_prepare_lock()
> {
> -       mutex_lock(&prepare_lock);
> +       if (!mutex_trylock(&prepare_lock)) {
> +               if (prepare_owner == current) {
> +                       prepare_refcnt++;
> +                       return;
> +               }
> +               mutex_lock(&prepare_lock);
> +       }
> +       WARN_ON_ONCE(prepare_owner != NULL);
> +       WARN_ON_ONCE(prepare_refcnt != 0);
> +       prepare_owner = current;
> +       prepare_refcnt = 1;
> }
> 
> static void clk_prepare_unlock(void)
> {
> -       mutex_unlock(&prepare_lock);
> +       WARN_ON_ONCE(prepare_owner != current);
> +       WARN_ON_ONCE(prepare_refcnt == 0);
> +
> +       if (--prepare_refcnt)
> +               return;
> +       prepare_owner = NULL;
> +       mutex_unlock(&prepare_lock);
> }
> 
> Ditto for the spinlock.
> 
> That step requires ZERO change to the functions. They simply work and
> you don't need all this ugly reentrancy hackery.
> 

Thanks for the review Thomas.  I will steal your code and call it my own
in the next version.  In particular getting rid of the atomics makes
things much nicer.

Regards,
Mike

> Thanks,
> 
>         tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 16:47   ` Mike Turquette
@ 2013-03-27 17:09       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 17:09 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linaro-kernel, patches, Laurent Pinchart,
	David Brown, linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> Thanks for the review Thomas.  I will steal your code and call it my own
> in the next version.

Sure.

> In particular getting rid of the atomics makes things much nicer.

I'd say using the helper functions and not having all these
conditionals makes it really nice :)

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 17:09       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 17:09 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> Thanks for the review Thomas.  I will steal your code and call it my own
> in the next version.

Sure.

> In particular getting rid of the atomics makes things much nicer.

I'd say using the helper functions and not having all these
conditionals makes it really nice :)

Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 15:06   ` Mike Turquette
@ 2013-03-27 17:12       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 17:12 UTC (permalink / raw)
  To: Mike Turquette
  Cc: Laurent Pinchart, linaro-kernel, patches, linux-kernel,
	David Brown, linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> Quoting Laurent Pinchart (2013-03-27 02:08:15)
> > I wonder if it would make sense to abstract these operations in a generic 
> > recursive mutex. Given that it would delay this patch past v3.10 I won't push 
> > for that.
> > 
> 
> Having a nice implementation of recursive mutexes would have saved me
> some time.

If you encapsulate stuff nicely like I suggested, then switching to a
generic version later on is a nobrainer.
 
Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 17:12       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-27 17:12 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:
> Quoting Laurent Pinchart (2013-03-27 02:08:15)
> > I wonder if it would make sense to abstract these operations in a generic 
> > recursive mutex. Given that it would delay this patch past v3.10 I won't push 
> > for that.
> > 
> 
> Having a nice implementation of recursive mutexes would have saved me
> some time.

If you encapsulate stuff nicely like I suggested, then switching to a
generic version later on is a nobrainer.
 
Thanks,

	tglx

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27  7:09 ` Mike Turquette
@ 2013-03-27 22:56   ` Russell King - ARM Linux
  -1 siblings, 0 replies; 51+ messages in thread
From: Russell King - ARM Linux @ 2013-03-27 22:56 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, Ulf Hansson, linaro-kernel, patches,
	Laurent Pinchart, Rajagopal Venkat, David Brown,
	linux-arm-kernel

On Wed, Mar 27, 2013 at 12:09:43AM -0700, Mike Turquette wrote:
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());
...
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())
> +			return true;

If you really want to do it like this, then the _correct_ way to do it
is:

		if (atomic_read(&prepare_context) == (int)get_current())

So that any effects from the cast are the same in both parts.  Otherwise,
you will be running into the possibility that a cast could do something
like truncate the returned value, resulting in the test condition using
pointers always being false.

It's not that much of a problem on ARM, but it's a matter of good
programming discpline that such issues are avoided.

Even better would be to cast it to unsigned long - this is the value
which the atomic types use, and that is less likely to be truncated.
It also helps to avoid the possibility of compilers complaining about
a narrowing cast too - especially as the entire Linux kernel assumes
that pointers can be cast to unsigned long and back again with no loss.

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
@ 2013-03-27 22:56   ` Russell King - ARM Linux
  0 siblings, 0 replies; 51+ messages in thread
From: Russell King - ARM Linux @ 2013-03-27 22:56 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, Mar 27, 2013 at 12:09:43AM -0700, Mike Turquette wrote:
> +	/* set context for any reentrant calls */
> +	atomic_set(&prepare_context, (int) get_current());
...
> +	if (mutex_is_locked(&prepare_lock))
> +		if ((void *) atomic_read(&prepare_context) == get_current())
> +			return true;

If you really want to do it like this, then the _correct_ way to do it
is:

		if (atomic_read(&prepare_context) == (int)get_current())

So that any effects from the cast are the same in both parts.  Otherwise,
you will be running into the possibility that a cast could do something
like truncate the returned value, resulting in the test condition using
pointers always being false.

It's not that much of a problem on ARM, but it's a matter of good
programming discpline that such issues are avoided.

Even better would be to cast it to unsigned long - this is the value
which the atomic types use, and that is less likely to be truncated.
It also helps to avoid the possibility of compilers complaining about
a narrowing cast too - especially as the entire Linux kernel assumes
that pointers can be cast to unsigned long and back again with no loss.

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v4] clk: allow reentrant calls into the clk framework
  2013-03-27 22:56   ` Russell King - ARM Linux
  (?)
@ 2013-03-28  3:00   ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  3:00 UTC (permalink / raw)
  To: linux-arm-kernel

Quoting Russell King - ARM Linux (2013-03-27 15:56:10)
> On Wed, Mar 27, 2013 at 12:09:43AM -0700, Mike Turquette wrote:
> > +     /* set context for any reentrant calls */
> > +     atomic_set(&prepare_context, (int) get_current());
> ...
> > +     if (mutex_is_locked(&prepare_lock))
> > +             if ((void *) atomic_read(&prepare_context) == get_current())
> > +                     return true;
> 
> If you really want to do it like this, then the _correct_ way to do it
> is:
> 
>                 if (atomic_read(&prepare_context) == (int)get_current())
> 
> So that any effects from the cast are the same in both parts.  Otherwise,
> you will be running into the possibility that a cast could do something
> like truncate the returned value, resulting in the test condition using
> pointers always being false.
> 
> It's not that much of a problem on ARM, but it's a matter of good
> programming discpline that such issues are avoided.
> 
> Even better would be to cast it to unsigned long - this is the value
> which the atomic types use, and that is less likely to be truncated.
> It also helps to avoid the possibility of compilers complaining about
> a narrowing cast too - especially as the entire Linux kernel assumes
> that pointers can be cast to unsigned long and back again with no loss.

The atomics and casts will go away in the next version but the insights
are useful.  Thanks for the review.

Regards,
Mike

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v5 0/2] reentrancy in the common clk framework
  2013-03-27  7:09 ` Mike Turquette
@ 2013-03-28  4:45   ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

This fifth attempt at allowing calls to the clk api to reenter splits
the last patch into two parts.  The first patch abstracts out the
locking details into some helper functions and converts all of the
direct calls to the mutex and spinlock api to use these helpers.

The second patch introduces the reentrancy logic into these helper
functions.  Fundamentally the reentrancy logic hasn't changed since v4,
but fixing casting bugs, removing unnecessary barriers and better design
& beautification separate this approach from the last one.

Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.

Mike Turquette (2):
  clk: abstract locking out into helper functions
  clk: allow reentrant calls into the clk framework

 drivers/clk/clk.c |  136 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 98 insertions(+), 38 deletions(-)

-- 
1.7.10.4


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v5 0/2] reentrancy in the common clk framework
@ 2013-03-28  4:45   ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-arm-kernel

This fifth attempt at allowing calls to the clk api to reenter splits
the last patch into two parts.  The first patch abstracts out the
locking details into some helper functions and converts all of the
direct calls to the mutex and spinlock api to use these helpers.

The second patch introduces the reentrancy logic into these helper
functions.  Fundamentally the reentrancy logic hasn't changed since v4,
but fixing casting bugs, removing unnecessary barriers and better design
& beautification separate this approach from the last one.

Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.

Mike Turquette (2):
  clk: abstract locking out into helper functions
  clk: allow reentrant calls into the clk framework

 drivers/clk/clk.c |  136 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 98 insertions(+), 38 deletions(-)

-- 
1.7.10.4

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
  2013-03-28  4:45   ` Mike Turquette
@ 2013-03-28  4:45     ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/clk/clk.c |   97 ++++++++++++++++++++++++++++++++---------------------
 1 file changed, 59 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..bea47d5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,27 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***           locking             ***/
+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}
+
+static void clk_enable_lock(unsigned long *flags)
+{
+	spin_lock_irqsave(&enable_lock, *flags);
+}
+
+static void clk_enable_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&enable_lock, *flags);
+}
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +90,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
 	seq_printf(s, "---------------------------------------------------------------------\n");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
@@ -77,7 +98,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -130,7 +151,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
 	seq_printf(s, "{");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node) {
 		if (!first_node)
@@ -144,7 +165,7 @@ static int clk_dump(struct seq_file *s, void *data)
 		clk_dump_subtree(s, c, 0);
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	seq_printf(s, "}");
 	return 0;
@@ -316,7 +337,7 @@ static int __init clk_debug_init(void)
 	if (!orphandir)
 		return -ENOMEM;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +347,7 @@ static int __init clk_debug_init(void)
 
 	inited = 1;
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -372,7 +393,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_disable_unused_subtree(child);
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 
 	if (clk->enable_count)
 		goto unlock_out;
@@ -393,7 +414,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	}
 
 unlock_out:
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 out:
 	return;
@@ -403,7 +424,7 @@ static int clk_disable_unused(void)
 {
 	struct clk *clk;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_disable_unused_subtree(clk);
@@ -417,7 +438,7 @@ static int clk_disable_unused(void)
 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(clk);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -600,9 +621,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +669,9 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -692,9 +713,9 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	__clk_disable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -745,9 +766,9 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	return ret;
 }
@@ -792,9 +813,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -889,13 +910,13 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 		__clk_recalc_rates(clk, 0);
 
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return rate;
 }
@@ -1100,7 +1121,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	int ret = 0;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* bail early if nothing to do */
 	if (rate == clk->rate)
@@ -1132,7 +1153,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	clk_change_rate(top);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1148,9 +1169,9 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return parent;
 }
@@ -1294,19 +1315,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 		__clk_prepare(parent);
 
 	/* FIXME replace with clk_is_enabled(clk) someday */
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	if (clk->enable_count)
 		__clk_enable(parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	/* change clock input source */
 	ret = clk->ops->set_parent(clk->hw, i);
 
 	/* clean up old prepare and enable */
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	if (clk->enable_count)
 		__clk_disable(old_parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	if (clk->prepare_count)
 		__clk_unprepare(old_parent);
@@ -1338,7 +1359,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 		return -ENOSYS;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1388,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1390,7 +1411,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	if (!clk)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* check to see if a clock with this name is already registered */
 	if (__clk_lookup(clk->name)) {
@@ -1514,7 +1535,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	clk_debug_register(clk);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1748,7 +1769,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* search the list of notifiers for this clk */
 	list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1793,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	clk->notifier_count++;
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1797,7 +1818,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	list_for_each_entry(cn, &clk_notifier_list, node)
 		if (cn->clk == clk)
@@ -1818,7 +1839,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 		ret = -ENOENT;
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
@ 2013-03-28  4:45     ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-arm-kernel

Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
 drivers/clk/clk.c |   97 ++++++++++++++++++++++++++++++++---------------------
 1 file changed, 59 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..bea47d5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,27 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***           locking             ***/
+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}
+
+static void clk_enable_lock(unsigned long *flags)
+{
+	spin_lock_irqsave(&enable_lock, *flags);
+}
+
+static void clk_enable_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&enable_lock, *flags);
+}
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +90,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
 	seq_printf(s, "---------------------------------------------------------------------\n");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
@@ -77,7 +98,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -130,7 +151,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
 	seq_printf(s, "{");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node) {
 		if (!first_node)
@@ -144,7 +165,7 @@ static int clk_dump(struct seq_file *s, void *data)
 		clk_dump_subtree(s, c, 0);
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	seq_printf(s, "}");
 	return 0;
@@ -316,7 +337,7 @@ static int __init clk_debug_init(void)
 	if (!orphandir)
 		return -ENOMEM;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +347,7 @@ static int __init clk_debug_init(void)
 
 	inited = 1;
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -372,7 +393,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_disable_unused_subtree(child);
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 
 	if (clk->enable_count)
 		goto unlock_out;
@@ -393,7 +414,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	}
 
 unlock_out:
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 out:
 	return;
@@ -403,7 +424,7 @@ static int clk_disable_unused(void)
 {
 	struct clk *clk;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_disable_unused_subtree(clk);
@@ -417,7 +438,7 @@ static int clk_disable_unused(void)
 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(clk);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -600,9 +621,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +669,9 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -692,9 +713,9 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	__clk_disable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -745,9 +766,9 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	return ret;
 }
@@ -792,9 +813,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -889,13 +910,13 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 		__clk_recalc_rates(clk, 0);
 
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return rate;
 }
@@ -1100,7 +1121,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	int ret = 0;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* bail early if nothing to do */
 	if (rate == clk->rate)
@@ -1132,7 +1153,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	clk_change_rate(top);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1148,9 +1169,9 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return parent;
 }
@@ -1294,19 +1315,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 		__clk_prepare(parent);
 
 	/* FIXME replace with clk_is_enabled(clk) someday */
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	if (clk->enable_count)
 		__clk_enable(parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	/* change clock input source */
 	ret = clk->ops->set_parent(clk->hw, i);
 
 	/* clean up old prepare and enable */
-	spin_lock_irqsave(&enable_lock, flags);
+	clk_enable_lock(&flags);
 	if (clk->enable_count)
 		__clk_disable(old_parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(&flags);
 
 	if (clk->prepare_count)
 		__clk_unprepare(old_parent);
@@ -1338,7 +1359,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 		return -ENOSYS;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1388,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1390,7 +1411,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	if (!clk)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* check to see if a clock with this name is already registered */
 	if (__clk_lookup(clk->name)) {
@@ -1514,7 +1535,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	clk_debug_register(clk);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1748,7 +1769,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* search the list of notifiers for this clk */
 	list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1793,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	clk->notifier_count++;
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1797,7 +1818,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	list_for_each_entry(cn, &clk_notifier_list, node)
 		if (cn->clk == clk)
@@ -1818,7 +1839,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 		ret = -ENOENT;
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
  2013-03-28  4:45   ` Mike Turquette
@ 2013-03-28  4:45     ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api.  A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC.  The i2c subsystem itself
will use the clk api resulting in a deadlock:

clk_prepare(audio_clk)
	i2c_transfer(..)
		clk_prepare(i2c_controller_clk)

The ability to reenter the clock framework prevents this deadlock.

Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations.  Yet another example is performing pinctrl operations
from a clk_ops callback.  Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
reenter the clock framework enables both of these use cases.

Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock.  One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.

When the clk framework is entered we try to hold the global lock.  If it
is held we compare the current task id against the current owner; a
match implies a nested call and we reenter.  If the values do not match
then we block on the lock until it is released.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v4:
 * remove uneccesary atomic operations
 * remove casting bugs
 * place reentrancy logic into locking helper functions
 * improve debugging with reference counting and WARNs

 drivers/clk/clk.c |   43 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 41 insertions(+), 2 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index bea47d5..fe7c054 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
@@ -30,21 +37,53 @@ static LIST_HEAD(clk_notifier_list);
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+			prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
 }
 
 static void clk_prepare_unlock(void)
 {
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
 	mutex_unlock(&prepare_lock);
 }
 
 static void clk_enable_lock(unsigned long *flags)
 {
-	spin_lock_irqsave(&enable_lock, *flags);
+	if (!spin_trylock_irqsave(&enable_lock, *flags)) {
+		if (enable_owner == current) {
+			enable_refcnt++;
+			return;
+		}
+		spin_lock_irqsave(&enable_lock, *flags);
+	}
+	WARN_ON_ONCE(enable_owner != NULL);
+	WARN_ON_ONCE(enable_refcnt != 0);
+	enable_owner = current;
+	enable_refcnt = 1;
 }
 
 static void clk_enable_unlock(unsigned long *flags)
 {
+	WARN_ON_ONCE(enable_owner != current);
+	WARN_ON_ONCE(enable_refcnt == 0);
+
+	if (--enable_refcnt)
+		return;
+	enable_owner = NULL;
 	spin_unlock_irqrestore(&enable_lock, *flags);
 }
 
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
@ 2013-03-28  4:45     ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28  4:45 UTC (permalink / raw)
  To: linux-arm-kernel

Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api.  A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC.  The i2c subsystem itself
will use the clk api resulting in a deadlock:

clk_prepare(audio_clk)
	i2c_transfer(..)
		clk_prepare(i2c_controller_clk)

The ability to reenter the clock framework prevents this deadlock.

Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations.  Yet another example is performing pinctrl operations
from a clk_ops callback.  Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
reenter the clock framework enables both of these use cases.

Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock.  One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.

When the clk framework is entered we try to hold the global lock.  If it
is held we compare the current task id against the current owner; a
match implies a nested call and we reenter.  If the values do not match
then we block on the lock until it is released.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v4:
 * remove uneccesary atomic operations
 * remove casting bugs
 * place reentrancy logic into locking helper functions
 * improve debugging with reference counting and WARNs

 drivers/clk/clk.c |   43 +++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 41 insertions(+), 2 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index bea47d5..fe7c054 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
@@ -30,21 +37,53 @@ static LIST_HEAD(clk_notifier_list);
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+			prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
 }
 
 static void clk_prepare_unlock(void)
 {
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
 	mutex_unlock(&prepare_lock);
 }
 
 static void clk_enable_lock(unsigned long *flags)
 {
-	spin_lock_irqsave(&enable_lock, *flags);
+	if (!spin_trylock_irqsave(&enable_lock, *flags)) {
+		if (enable_owner == current) {
+			enable_refcnt++;
+			return;
+		}
+		spin_lock_irqsave(&enable_lock, *flags);
+	}
+	WARN_ON_ONCE(enable_owner != NULL);
+	WARN_ON_ONCE(enable_refcnt != 0);
+	enable_owner = current;
+	enable_refcnt = 1;
 }
 
 static void clk_enable_unlock(unsigned long *flags)
 {
+	WARN_ON_ONCE(enable_owner != current);
+	WARN_ON_ONCE(enable_refcnt == 0);
+
+	if (--enable_refcnt)
+		return;
+	enable_owner = NULL;
 	spin_unlock_irqrestore(&enable_lock, *flags);
 }
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 1/2] clk: abstract locking out into helper functions
  2013-03-28  4:45     ` Mike Turquette
@ 2013-03-28  9:31       ` Thomas Gleixner
  -1 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-28  9:31 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	rajagopal.venkat, davidb, ulf.hansson, laurent.pinchart

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.

This looks way better. Nitpick below.
 
> +static void clk_enable_lock(unsigned long *flags)
> +{
> +	spin_lock_irqsave(&enable_lock, *flags);
> +}

> +static void clk_enable_unlock(unsigned long *flags)

Please just hand in the flags, no need for indirection.

> +{
> +	spin_unlock_irqrestore(&enable_lock, *flags);
> +}
> +

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
@ 2013-03-28  9:31       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-28  9:31 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.

This looks way better. Nitpick below.
 
> +static void clk_enable_lock(unsigned long *flags)
> +{
> +	spin_lock_irqsave(&enable_lock, *flags);
> +}

> +static void clk_enable_unlock(unsigned long *flags)

Please just hand in the flags, no need for indirection.

> +{
> +	spin_unlock_irqrestore(&enable_lock, *flags);
> +}
> +

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 2/2] clk: allow reentrant calls into the clk framework
  2013-03-28  4:45     ` Mike Turquette
@ 2013-03-28  9:33       ` Thomas Gleixner
  -1 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-28  9:33 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	rajagopal.venkat, davidb, ulf.hansson, laurent.pinchart

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api.  A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC.  The i2c subsystem itself
> will use the clk api resulting in a deadlock:
> 
> clk_prepare(audio_clk)
> 	i2c_transfer(..)
> 		clk_prepare(i2c_controller_clk)
> 
> The ability to reenter the clock framework prevents this deadlock.
> 
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations.  Yet another example is performing pinctrl operations
> from a clk_ops callback.  Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
> 
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock.  One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
> 
> When the clk framework is entered we try to hold the global lock.  If it
> is held we compare the current task id against the current owner; a

s/task id/task/ We store a the task pointer in the owner variable.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
@ 2013-03-28  9:33       ` Thomas Gleixner
  0 siblings, 0 replies; 51+ messages in thread
From: Thomas Gleixner @ 2013-03-28  9:33 UTC (permalink / raw)
  To: linux-arm-kernel

On Wed, 27 Mar 2013, Mike Turquette wrote:

> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api.  A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC.  The i2c subsystem itself
> will use the clk api resulting in a deadlock:
> 
> clk_prepare(audio_clk)
> 	i2c_transfer(..)
> 		clk_prepare(i2c_controller_clk)
> 
> The ability to reenter the clock framework prevents this deadlock.
> 
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations.  Yet another example is performing pinctrl operations
> from a clk_ops callback.  Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
> 
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock.  One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
> 
> When the clk framework is entered we try to hold the global lock.  If it
> is held we compare the current task id against the current owner; a

s/task id/task/ We store a the task pointer in the owner variable.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH v5 0/2] reentrancy in the common clk framework
  2013-03-28  4:45   ` Mike Turquette
@ 2013-03-28 10:44     ` Laurent Pinchart
  -1 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-28 10:44 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	rajagopal.venkat, davidb, ulf.hansson, tglx

Hi Mike,

On Wednesday 27 March 2013 21:45:56 Mike Turquette wrote:
> This fifth attempt at allowing calls to the clk api to reenter splits
> the last patch into two parts.  The first patch abstracts out the
> locking details into some helper functions and converts all of the
> direct calls to the mutex and spinlock api to use these helpers.
> 
> The second patch introduces the reentrancy logic into these helper
> functions.  Fundamentally the reentrancy logic hasn't changed since v4,
> but fixing casting bugs, removing unnecessary barriers and better design
> & beautification separate this approach from the last one.
> 
> Changes tested on top of the latest clk-next branch with an OMAP4430
> Panda board.
> 
> Mike Turquette (2):
>   clk: abstract locking out into helper functions
>   clk: allow reentrant calls into the clk framework
> 
>  drivers/clk/clk.c |  136 +++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 98 insertions(+), 38 deletions(-)

For the whole patch set,

Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>

on a DM3730.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v5 0/2] reentrancy in the common clk framework
@ 2013-03-28 10:44     ` Laurent Pinchart
  0 siblings, 0 replies; 51+ messages in thread
From: Laurent Pinchart @ 2013-03-28 10:44 UTC (permalink / raw)
  To: linux-arm-kernel

Hi Mike,

On Wednesday 27 March 2013 21:45:56 Mike Turquette wrote:
> This fifth attempt at allowing calls to the clk api to reenter splits
> the last patch into two parts.  The first patch abstracts out the
> locking details into some helper functions and converts all of the
> direct calls to the mutex and spinlock api to use these helpers.
> 
> The second patch introduces the reentrancy logic into these helper
> functions.  Fundamentally the reentrancy logic hasn't changed since v4,
> but fixing casting bugs, removing unnecessary barriers and better design
> & beautification separate this approach from the last one.
> 
> Changes tested on top of the latest clk-next branch with an OMAP4430
> Panda board.
> 
> Mike Turquette (2):
>   clk: abstract locking out into helper functions
>   clk: allow reentrant calls into the clk framework
> 
>  drivers/clk/clk.c |  136 +++++++++++++++++++++++++++++++++++---------------
>  1 file changed, 98 insertions(+), 38 deletions(-)

For the whole patch set,

Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>

on a DM3730.

-- 
Regards,

Laurent Pinchart

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 2/2] clk: allow reentrant calls into the clk framework
  2013-03-28  9:33       ` Thomas Gleixner
@ 2013-03-28 15:23         ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 15:23 UTC (permalink / raw)
  To: Thomas Gleixner
  Cc: linux-kernel, linux-arm-kernel, Patch Tracking, linaro-kernel,
	Rajagopal Venkat, David Brown, Ulf Hansson, laurent.pinchart

On Thu, Mar 28, 2013 at 2:33 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework is necessary for clock operations
>> that result in nested calls to the clk api.  A common example is a clock
>> that is prepared via an i2c transaction, such as a clock inside of a
>> discrete audio chip or a power management IC.  The i2c subsystem itself
>> will use the clk api resulting in a deadlock:
>>
>> clk_prepare(audio_clk)
>>       i2c_transfer(..)
>>               clk_prepare(i2c_controller_clk)
>>
>> The ability to reenter the clock framework prevents this deadlock.
>>
>> Other use cases exist such as allowing .set_rate callbacks to call
>> clk_set_parent to achieve the best rate, or to save power in certain
>> configurations.  Yet another example is performing pinctrl operations
>> from a clk_ops callback.  Calls into the pinctrl subsystem may call
>> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
>> reenter the clock framework enables both of these use cases.
>>
>> Reentrancy is implemented by two global pointers that track the owner
>> currently holding a global lock.  One pointer tracks the owner during
>> sleepable, mutex-protected operations and the other one tracks the owner
>> during non-interruptible, spinlock-protected operations.
>>
>> When the clk framework is entered we try to hold the global lock.  If it
>> is held we compare the current task id against the current owner; a
>
> s/task id/task/ We store a the task pointer in the owner variable.
>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

Will fix the typo and add your reviewed-by.

Thanks for the review,
Mike

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
@ 2013-03-28 15:23         ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 15:23 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Mar 28, 2013 at 2:33 AM, Thomas Gleixner <tglx@linutronix.de> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework is necessary for clock operations
>> that result in nested calls to the clk api.  A common example is a clock
>> that is prepared via an i2c transaction, such as a clock inside of a
>> discrete audio chip or a power management IC.  The i2c subsystem itself
>> will use the clk api resulting in a deadlock:
>>
>> clk_prepare(audio_clk)
>>       i2c_transfer(..)
>>               clk_prepare(i2c_controller_clk)
>>
>> The ability to reenter the clock framework prevents this deadlock.
>>
>> Other use cases exist such as allowing .set_rate callbacks to call
>> clk_set_parent to achieve the best rate, or to save power in certain
>> configurations.  Yet another example is performing pinctrl operations
>> from a clk_ops callback.  Calls into the pinctrl subsystem may call
>> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
>> reenter the clock framework enables both of these use cases.
>>
>> Reentrancy is implemented by two global pointers that track the owner
>> currently holding a global lock.  One pointer tracks the owner during
>> sleepable, mutex-protected operations and the other one tracks the owner
>> during non-interruptible, spinlock-protected operations.
>>
>> When the clk framework is entered we try to hold the global lock.  If it
>> is held we compare the current task id against the current owner; a
>
> s/task id/task/ We store a the task pointer in the owner variable.
>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

Will fix the typo and add your reviewed-by.

Thanks for the review,
Mike

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v6 0/2] reentrancy in the common clk framework
  2013-03-28  4:45   ` Mike Turquette
@ 2013-03-28 20:59     ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

This sixth version of the series fixes up two minor cosmetic issues and
adds reviewed-by's and tested-by's.  If there are no more comments to
this version then I'll merge these patches into clk-next so that it can
get some cycles in linux-next.

Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.

Mike Turquette (2):
  clk: abstract locking out into helper functions
  clk: allow reentrant calls into the clk framework

 drivers/clk/clk.c |  139 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 101 insertions(+), 38 deletions(-)

-- 
1.7.10.4


^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH v6 0/2] reentrancy in the common clk framework
@ 2013-03-28 20:59     ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-arm-kernel

This sixth version of the series fixes up two minor cosmetic issues and
adds reviewed-by's and tested-by's.  If there are no more comments to
this version then I'll merge these patches into clk-next so that it can
get some cycles in linux-next.

Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.

Mike Turquette (2):
  clk: abstract locking out into helper functions
  clk: allow reentrant calls into the clk framework

 drivers/clk/clk.c |  139 ++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 101 insertions(+), 38 deletions(-)

-- 
1.7.10.4

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
  2013-03-28 20:59     ` Mike Turquette
@ 2013-03-28 20:59       ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v5:
 * pass flags by value instead of by reference in clk_enable_{un}lock

 drivers/clk/clk.c |   99 +++++++++++++++++++++++++++++++++--------------------
 1 file changed, 61 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..0b5d612 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***           locking             ***/
+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&enable_lock, flags);
+	return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&enable_lock, flags);
+}
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
 	seq_printf(s, "---------------------------------------------------------------------\n");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
@@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
 	seq_printf(s, "{");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node) {
 		if (!first_node)
@@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
 		clk_dump_subtree(s, c, 0);
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	seq_printf(s, "}");
 	return 0;
@@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
 	if (!orphandir)
 		return -ENOMEM;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
 
 	inited = 1;
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_disable_unused_subtree(child);
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 
 	if (clk->enable_count)
 		goto unlock_out;
@@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	}
 
 unlock_out:
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 out:
 	return;
@@ -403,7 +426,7 @@ static int clk_disable_unused(void)
 {
 	struct clk *clk;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_disable_unused_subtree(clk);
@@ -417,7 +440,7 @@ static int clk_disable_unused(void)
 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(clk);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	__clk_disable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	return ret;
 }
@@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 		__clk_recalc_rates(clk, 0);
 
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return rate;
 }
@@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	int ret = 0;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* bail early if nothing to do */
 	if (rate == clk->rate)
@@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	clk_change_rate(top);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return parent;
 }
@@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 		__clk_prepare(parent);
 
 	/* FIXME replace with clk_is_enabled(clk) someday */
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	if (clk->enable_count)
 		__clk_enable(parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	/* change clock input source */
 	ret = clk->ops->set_parent(clk->hw, i);
 
 	/* clean up old prepare and enable */
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	if (clk->enable_count)
 		__clk_disable(old_parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	if (clk->prepare_count)
 		__clk_unprepare(old_parent);
@@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 		return -ENOSYS;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	if (!clk)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* check to see if a clock with this name is already registered */
 	if (__clk_lookup(clk->name)) {
@@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	clk_debug_register(clk);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* search the list of notifiers for this clk */
 	list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	clk->notifier_count++;
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	list_for_each_entry(cn, &clk_notifier_list, node)
 		if (cn->clk == clk)
@@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 		ret = -ENOENT;
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
@ 2013-03-28 20:59       ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-arm-kernel

Create locking helpers for the global mutex and global spinlock.  The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v5:
 * pass flags by value instead of by reference in clk_enable_{un}lock

 drivers/clk/clk.c |   99 +++++++++++++++++++++++++++++++++--------------------
 1 file changed, 61 insertions(+), 38 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..0b5d612 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+/***           locking             ***/
+static void clk_prepare_lock(void)
+{
+	mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+	mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&enable_lock, flags);
+	return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+	spin_unlock_irqrestore(&enable_lock, flags);
+}
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
 	seq_printf(s, "---------------------------------------------------------------------\n");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
@@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
 		clk_summary_show_subtree(s, c, 0);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
 
 	seq_printf(s, "{");
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(c, &clk_root_list, child_node) {
 		if (!first_node)
@@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
 		clk_dump_subtree(s, c, 0);
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	seq_printf(s, "}");
 	return 0;
@@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
 	if (!orphandir)
 		return -ENOMEM;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
 
 	inited = 1;
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	hlist_for_each_entry(child, &clk->children, child_node)
 		clk_disable_unused_subtree(child);
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 
 	if (clk->enable_count)
 		goto unlock_out;
@@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
 	}
 
 unlock_out:
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 out:
 	return;
@@ -403,7 +426,7 @@ static int clk_disable_unused(void)
 {
 	struct clk *clk;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	hlist_for_each_entry(clk, &clk_root_list, child_node)
 		clk_disable_unused_subtree(clk);
@@ -417,7 +440,7 @@ static int clk_disable_unused(void)
 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(clk);
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return 0;
 }
@@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
  */
 void clk_unprepare(struct clk *clk)
 {
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	__clk_unprepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
@@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
 {
 	int ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_prepare(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	__clk_disable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
@@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	ret = __clk_enable(clk);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	return ret;
 }
@@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
 	unsigned long ret;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	ret = __clk_round_rate(clk, rate);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
 {
 	unsigned long rate;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 		__clk_recalc_rates(clk, 0);
 
 	rate = __clk_get_rate(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return rate;
 }
@@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	int ret = 0;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* bail early if nothing to do */
 	if (rate == clk->rate)
@@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	clk_change_rate(top);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
 {
 	struct clk *parent;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 	parent = __clk_get_parent(clk);
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return parent;
 }
@@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
 		__clk_prepare(parent);
 
 	/* FIXME replace with clk_is_enabled(clk) someday */
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	if (clk->enable_count)
 		__clk_enable(parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	/* change clock input source */
 	ret = clk->ops->set_parent(clk->hw, i);
 
 	/* clean up old prepare and enable */
-	spin_lock_irqsave(&enable_lock, flags);
+	flags = clk_enable_lock();
 	if (clk->enable_count)
 		__clk_disable(old_parent);
-	spin_unlock_irqrestore(&enable_lock, flags);
+	clk_enable_unlock(flags);
 
 	if (clk->prepare_count)
 		__clk_unprepare(old_parent);
@@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 		return -ENOSYS;
 
 	/* prevent racing with updates to the clock topology */
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	if (clk->parent == parent)
 		goto out;
@@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
 	__clk_reparent(clk, parent);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	if (!clk)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* check to see if a clock with this name is already registered */
 	if (__clk_lookup(clk->name)) {
@@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
 	clk_debug_register(clk);
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	/* search the list of notifiers for this clk */
 	list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 	clk->notifier_count++;
 
 out:
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
@@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 	if (!clk || !nb)
 		return -EINVAL;
 
-	mutex_lock(&prepare_lock);
+	clk_prepare_lock();
 
 	list_for_each_entry(cn, &clk_notifier_list, node)
 		if (cn->clk == clk)
@@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 		ret = -ENOENT;
 	}
 
-	mutex_unlock(&prepare_lock);
+	clk_prepare_unlock();
 
 	return ret;
 }
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
  2013-03-28 20:59     ` Mike Turquette
@ 2013-03-28 20:59       ` Mike Turquette
  -1 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-kernel
  Cc: linux-arm-kernel, patches, linaro-kernel, rajagopal.venkat,
	davidb, ulf.hansson, laurent.pinchart, tglx, Mike Turquette

Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api.  A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC.  The i2c subsystem itself
will use the clk api resulting in a deadlock:

clk_prepare(audio_clk)
	i2c_transfer(..)
		clk_prepare(i2c_controller_clk)

The ability to reenter the clock framework prevents this deadlock.

Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations.  Yet another example is performing pinctrl operations
from a clk_ops callback.  Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
reenter the clock framework enables both of these use cases.

Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock.  One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.

When the clk framework is entered we try to hold the global lock.  If it
is held we compare the current task against the current owner; a match
implies a nested call and we reenter.  If the values do not match then
we block on the lock until it is released.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v5:
 * fixed up typo in changelog

Changes since v4:
 * remove uneccesary atomic operations
 * remove casting bugs
 * place reentrancy logic into locking helper functions
 * improve debugging with reference counting and WARNs

 drivers/clk/clk.c |   44 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0b5d612..0230c9d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
@@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+			prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
 }
 
 static void clk_prepare_unlock(void)
 {
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
 	mutex_unlock(&prepare_lock);
 }
 
 static unsigned long clk_enable_lock(void)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&enable_lock, flags);
+
+	if (!spin_trylock_irqsave(&enable_lock, flags)) {
+		if (enable_owner == current) {
+			enable_refcnt++;
+			return flags;
+		}
+		spin_lock_irqsave(&enable_lock, flags);
+	}
+	WARN_ON_ONCE(enable_owner != NULL);
+	WARN_ON_ONCE(enable_refcnt != 0);
+	enable_owner = current;
+	enable_refcnt = 1;
 	return flags;
 }
 
 static void clk_enable_unlock(unsigned long flags)
 {
+	WARN_ON_ONCE(enable_owner != current);
+	WARN_ON_ONCE(enable_refcnt == 0);
+
+	if (--enable_refcnt)
+		return;
+	enable_owner = NULL;
 	spin_unlock_irqrestore(&enable_lock, flags);
 }
 
-- 
1.7.10.4


^ permalink raw reply related	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
@ 2013-03-28 20:59       ` Mike Turquette
  0 siblings, 0 replies; 51+ messages in thread
From: Mike Turquette @ 2013-03-28 20:59 UTC (permalink / raw)
  To: linux-arm-kernel

Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api.  A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC.  The i2c subsystem itself
will use the clk api resulting in a deadlock:

clk_prepare(audio_clk)
	i2c_transfer(..)
		clk_prepare(i2c_controller_clk)

The ability to reenter the clock framework prevents this deadlock.

Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations.  Yet another example is performing pinctrl operations
from a clk_ops callback.  Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
reenter the clock framework enables both of these use cases.

Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock.  One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.

When the clk framework is entered we try to hold the global lock.  If it
is held we compare the current task against the current owner; a match
implies a nested call and we reenter.  If the values do not match then
we block on the lock until it is released.

Signed-off-by: Mike Turquette <mturquette@linaro.org>
Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Cc: David Brown <davidb@codeaurora.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
---
Changes since v5:
 * fixed up typo in changelog

Changes since v4:
 * remove uneccesary atomic operations
 * remove casting bugs
 * place reentrancy logic into locking helper functions
 * improve debugging with reference counting and WARNs

 drivers/clk/clk.c |   44 ++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 42 insertions(+), 2 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0b5d612..0230c9d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
 #include <linux/of.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 static DEFINE_SPINLOCK(enable_lock);
 static DEFINE_MUTEX(prepare_lock);
 
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
 static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
@@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
-	mutex_lock(&prepare_lock);
+	if (!mutex_trylock(&prepare_lock)) {
+		if (prepare_owner == current) {
+			prepare_refcnt++;
+			return;
+		}
+		mutex_lock(&prepare_lock);
+	}
+	WARN_ON_ONCE(prepare_owner != NULL);
+	WARN_ON_ONCE(prepare_refcnt != 0);
+	prepare_owner = current;
+	prepare_refcnt = 1;
 }
 
 static void clk_prepare_unlock(void)
 {
+	WARN_ON_ONCE(prepare_owner != current);
+	WARN_ON_ONCE(prepare_refcnt == 0);
+
+	if (--prepare_refcnt)
+		return;
+	prepare_owner = NULL;
 	mutex_unlock(&prepare_lock);
 }
 
 static unsigned long clk_enable_lock(void)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&enable_lock, flags);
+
+	if (!spin_trylock_irqsave(&enable_lock, flags)) {
+		if (enable_owner == current) {
+			enable_refcnt++;
+			return flags;
+		}
+		spin_lock_irqsave(&enable_lock, flags);
+	}
+	WARN_ON_ONCE(enable_owner != NULL);
+	WARN_ON_ONCE(enable_refcnt != 0);
+	enable_owner = current;
+	enable_refcnt = 1;
 	return flags;
 }
 
 static void clk_enable_unlock(unsigned long flags)
 {
+	WARN_ON_ONCE(enable_owner != current);
+	WARN_ON_ONCE(enable_refcnt == 0);
+
+	if (--enable_refcnt)
+		return;
+	enable_owner = NULL;
 	spin_unlock_irqrestore(&enable_lock, flags);
 }
 
-- 
1.7.10.4

^ permalink raw reply related	[flat|nested] 51+ messages in thread

* Re: [PATCH 1/2] clk: abstract locking out into helper functions
  2013-03-28 20:59       ` Mike Turquette
@ 2013-04-02  9:23         ` Ulf Hansson
  -1 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-04-02  9:23 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	rajagopal.venkat, davidb, laurent.pinchart, tglx

On 28 March 2013 21:59, Mike Turquette <mturquette@linaro.org> wrote:
> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.
>
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> Changes since v5:
>  * pass flags by value instead of by reference in clk_enable_{un}lock
>
>  drivers/clk/clk.c |   99 +++++++++++++++++++++++++++++++++--------------------
>  1 file changed, 61 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..0b5d612 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);
>  static LIST_HEAD(clk_notifier_list);
>
> +/***           locking             ***/
> +static void clk_prepare_lock(void)
> +{
> +       mutex_lock(&prepare_lock);
> +}
> +
> +static void clk_prepare_unlock(void)
> +{
> +       mutex_unlock(&prepare_lock);
> +}
> +
> +static unsigned long clk_enable_lock(void)
> +{
> +       unsigned long flags;
> +       spin_lock_irqsave(&enable_lock, flags);
> +       return flags;
> +}
> +
> +static void clk_enable_unlock(unsigned long flags)
> +{
> +       spin_unlock_irqrestore(&enable_lock, flags);
> +}
> +
>  /***        debugfs support        ***/
>
>  #ifdef CONFIG_COMMON_CLK_DEBUG
> @@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
>         seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
>         seq_printf(s, "---------------------------------------------------------------------\n");
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(c, &clk_root_list, child_node)
>                 clk_summary_show_subtree(s, c, 0);
> @@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
>         hlist_for_each_entry(c, &clk_orphan_list, child_node)
>                 clk_summary_show_subtree(s, c, 0);
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
>
>         seq_printf(s, "{");
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(c, &clk_root_list, child_node) {
>                 if (!first_node)
> @@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
>                 clk_dump_subtree(s, c, 0);
>         }
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         seq_printf(s, "}");
>         return 0;
> @@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
>         if (!orphandir)
>                 return -ENOMEM;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(clk, &clk_root_list, child_node)
>                 clk_debug_create_subtree(clk, rootdir);
> @@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
>
>         inited = 1;
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
>         hlist_for_each_entry(child, &clk->children, child_node)
>                 clk_disable_unused_subtree(child);
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>
>         if (clk->enable_count)
>                 goto unlock_out;
> @@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
>         }
>
>  unlock_out:
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>  out:
>         return;
> @@ -403,7 +426,7 @@ static int clk_disable_unused(void)
>  {
>         struct clk *clk;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(clk, &clk_root_list, child_node)
>                 clk_disable_unused_subtree(clk);
> @@ -417,7 +440,7 @@ static int clk_disable_unused(void)
>         hlist_for_each_entry(clk, &clk_orphan_list, child_node)
>                 clk_unprepare_unused_subtree(clk);
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
>   */
>  void clk_unprepare(struct clk *clk)
>  {
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         __clk_unprepare(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>  }
>  EXPORT_SYMBOL_GPL(clk_unprepare);
>
> @@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
>  {
>         int ret;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         ret = __clk_prepare(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
>  {
>         unsigned long flags;
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         __clk_disable(clk);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>  }
>  EXPORT_SYMBOL_GPL(clk_disable);
>
> @@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
>         unsigned long flags;
>         int ret;
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         ret = __clk_enable(clk);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         return ret;
>  }
> @@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
>  {
>         unsigned long ret;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         ret = __clk_round_rate(clk, rate);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
>  {
>         unsigned long rate;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
>                 __clk_recalc_rates(clk, 0);
>
>         rate = __clk_get_rate(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return rate;
>  }
> @@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
>         int ret = 0;
>
>         /* prevent racing with updates to the clock topology */
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* bail early if nothing to do */
>         if (rate == clk->rate)
> @@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
>         clk_change_rate(top);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
>  {
>         struct clk *parent;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         parent = __clk_get_parent(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return parent;
>  }
> @@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
>                 __clk_prepare(parent);
>
>         /* FIXME replace with clk_is_enabled(clk) someday */
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         if (clk->enable_count)
>                 __clk_enable(parent);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         /* change clock input source */
>         ret = clk->ops->set_parent(clk->hw, i);
>
>         /* clean up old prepare and enable */
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         if (clk->enable_count)
>                 __clk_disable(old_parent);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         if (clk->prepare_count)
>                 __clk_unprepare(old_parent);
> @@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
>                 return -ENOSYS;
>
>         /* prevent racing with updates to the clock topology */
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         if (clk->parent == parent)
>                 goto out;
> @@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
>         __clk_reparent(clk, parent);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
>         if (!clk)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* check to see if a clock with this name is already registered */
>         if (__clk_lookup(clk->name)) {
> @@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
>         clk_debug_register(clk);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
>         if (!clk || !nb)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* search the list of notifiers for this clk */
>         list_for_each_entry(cn, &clk_notifier_list, node)
> @@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
>         clk->notifier_count++;
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
>         if (!clk || !nb)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         list_for_each_entry(cn, &clk_notifier_list, node)
>                 if (cn->clk == clk)
> @@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
>                 ret = -ENOENT;
>         }
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> --
> 1.7.10.4
>

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 1/2] clk: abstract locking out into helper functions
@ 2013-04-02  9:23         ` Ulf Hansson
  0 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-04-02  9:23 UTC (permalink / raw)
  To: linux-arm-kernel

On 28 March 2013 21:59, Mike Turquette <mturquette@linaro.org> wrote:
> Create locking helpers for the global mutex and global spinlock.  The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.
>
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> Changes since v5:
>  * pass flags by value instead of by reference in clk_enable_{un}lock
>
>  drivers/clk/clk.c |   99 +++++++++++++++++++++++++++++++++--------------------
>  1 file changed, 61 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..0b5d612 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);
>  static LIST_HEAD(clk_notifier_list);
>
> +/***           locking             ***/
> +static void clk_prepare_lock(void)
> +{
> +       mutex_lock(&prepare_lock);
> +}
> +
> +static void clk_prepare_unlock(void)
> +{
> +       mutex_unlock(&prepare_lock);
> +}
> +
> +static unsigned long clk_enable_lock(void)
> +{
> +       unsigned long flags;
> +       spin_lock_irqsave(&enable_lock, flags);
> +       return flags;
> +}
> +
> +static void clk_enable_unlock(unsigned long flags)
> +{
> +       spin_unlock_irqrestore(&enable_lock, flags);
> +}
> +
>  /***        debugfs support        ***/
>
>  #ifdef CONFIG_COMMON_CLK_DEBUG
> @@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
>         seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
>         seq_printf(s, "---------------------------------------------------------------------\n");
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(c, &clk_root_list, child_node)
>                 clk_summary_show_subtree(s, c, 0);
> @@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
>         hlist_for_each_entry(c, &clk_orphan_list, child_node)
>                 clk_summary_show_subtree(s, c, 0);
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
>
>         seq_printf(s, "{");
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(c, &clk_root_list, child_node) {
>                 if (!first_node)
> @@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
>                 clk_dump_subtree(s, c, 0);
>         }
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         seq_printf(s, "}");
>         return 0;
> @@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
>         if (!orphandir)
>                 return -ENOMEM;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(clk, &clk_root_list, child_node)
>                 clk_debug_create_subtree(clk, rootdir);
> @@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
>
>         inited = 1;
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
>         hlist_for_each_entry(child, &clk->children, child_node)
>                 clk_disable_unused_subtree(child);
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>
>         if (clk->enable_count)
>                 goto unlock_out;
> @@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
>         }
>
>  unlock_out:
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>  out:
>         return;
> @@ -403,7 +426,7 @@ static int clk_disable_unused(void)
>  {
>         struct clk *clk;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         hlist_for_each_entry(clk, &clk_root_list, child_node)
>                 clk_disable_unused_subtree(clk);
> @@ -417,7 +440,7 @@ static int clk_disable_unused(void)
>         hlist_for_each_entry(clk, &clk_orphan_list, child_node)
>                 clk_unprepare_unused_subtree(clk);
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return 0;
>  }
> @@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
>   */
>  void clk_unprepare(struct clk *clk)
>  {
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         __clk_unprepare(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>  }
>  EXPORT_SYMBOL_GPL(clk_unprepare);
>
> @@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
>  {
>         int ret;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         ret = __clk_prepare(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
>  {
>         unsigned long flags;
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         __clk_disable(clk);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>  }
>  EXPORT_SYMBOL_GPL(clk_disable);
>
> @@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
>         unsigned long flags;
>         int ret;
>
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         ret = __clk_enable(clk);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         return ret;
>  }
> @@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
>  {
>         unsigned long ret;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         ret = __clk_round_rate(clk, rate);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
>  {
>         unsigned long rate;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
>                 __clk_recalc_rates(clk, 0);
>
>         rate = __clk_get_rate(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return rate;
>  }
> @@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
>         int ret = 0;
>
>         /* prevent racing with updates to the clock topology */
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* bail early if nothing to do */
>         if (rate == clk->rate)
> @@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
>         clk_change_rate(top);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
>  {
>         struct clk *parent;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>         parent = __clk_get_parent(clk);
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return parent;
>  }
> @@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
>                 __clk_prepare(parent);
>
>         /* FIXME replace with clk_is_enabled(clk) someday */
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         if (clk->enable_count)
>                 __clk_enable(parent);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         /* change clock input source */
>         ret = clk->ops->set_parent(clk->hw, i);
>
>         /* clean up old prepare and enable */
> -       spin_lock_irqsave(&enable_lock, flags);
> +       flags = clk_enable_lock();
>         if (clk->enable_count)
>                 __clk_disable(old_parent);
> -       spin_unlock_irqrestore(&enable_lock, flags);
> +       clk_enable_unlock(flags);
>
>         if (clk->prepare_count)
>                 __clk_unprepare(old_parent);
> @@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
>                 return -ENOSYS;
>
>         /* prevent racing with updates to the clock topology */
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         if (clk->parent == parent)
>                 goto out;
> @@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
>         __clk_reparent(clk, parent);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
>         if (!clk)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* check to see if a clock with this name is already registered */
>         if (__clk_lookup(clk->name)) {
> @@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
>         clk_debug_register(clk);
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
>         if (!clk || !nb)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         /* search the list of notifiers for this clk */
>         list_for_each_entry(cn, &clk_notifier_list, node)
> @@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
>         clk->notifier_count++;
>
>  out:
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> @@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
>         if (!clk || !nb)
>                 return -EINVAL;
>
> -       mutex_lock(&prepare_lock);
> +       clk_prepare_lock();
>
>         list_for_each_entry(cn, &clk_notifier_list, node)
>                 if (cn->clk == clk)
> @@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
>                 ret = -ENOENT;
>         }
>
> -       mutex_unlock(&prepare_lock);
> +       clk_prepare_unlock();
>
>         return ret;
>  }
> --
> 1.7.10.4
>

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* Re: [PATCH 2/2] clk: allow reentrant calls into the clk framework
  2013-03-28 20:59       ` Mike Turquette
@ 2013-04-02  9:35         ` Ulf Hansson
  -1 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-04-02  9:35 UTC (permalink / raw)
  To: Mike Turquette
  Cc: linux-kernel, linux-arm-kernel, patches, linaro-kernel,
	rajagopal.venkat, davidb, laurent.pinchart, tglx

On 28 March 2013 21:59, Mike Turquette <mturquette@linaro.org> wrote:
> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api.  A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC.  The i2c subsystem itself
> will use the clk api resulting in a deadlock:
>
> clk_prepare(audio_clk)
>         i2c_transfer(..)
>                 clk_prepare(i2c_controller_clk)
>
> The ability to reenter the clock framework prevents this deadlock.
>
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations.  Yet another example is performing pinctrl operations
> from a clk_ops callback.  Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
>
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock.  One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
>
> When the clk framework is entered we try to hold the global lock.  If it
> is held we compare the current task against the current owner; a match
> implies a nested call and we reenter.  If the values do not match then
> we block on the lock until it is released.
>
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> Changes since v5:
>  * fixed up typo in changelog
>
> Changes since v4:
>  * remove uneccesary atomic operations
>  * remove casting bugs
>  * place reentrancy logic into locking helper functions
>  * improve debugging with reference counting and WARNs
>
>  drivers/clk/clk.c |   44 ++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 42 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 0b5d612..0230c9d 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,10 +19,17 @@
>  #include <linux/of.h>
>  #include <linux/device.h>
>  #include <linux/init.h>
> +#include <linux/sched.h>
>
>  static DEFINE_SPINLOCK(enable_lock);
>  static DEFINE_MUTEX(prepare_lock);
>
> +static struct task_struct *prepare_owner;
> +static struct task_struct *enable_owner;
> +
> +static int prepare_refcnt;
> +static int enable_refcnt;
> +
>  static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);
>  static LIST_HEAD(clk_notifier_list);
> @@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
>  /***           locking             ***/
>  static void clk_prepare_lock(void)
>  {
> -       mutex_lock(&prepare_lock);
> +       if (!mutex_trylock(&prepare_lock)) {
> +               if (prepare_owner == current) {
> +                       prepare_refcnt++;
> +                       return;
> +               }
> +               mutex_lock(&prepare_lock);
> +       }
> +       WARN_ON_ONCE(prepare_owner != NULL);
> +       WARN_ON_ONCE(prepare_refcnt != 0);
> +       prepare_owner = current;
> +       prepare_refcnt = 1;
>  }
>
>  static void clk_prepare_unlock(void)
>  {
> +       WARN_ON_ONCE(prepare_owner != current);
> +       WARN_ON_ONCE(prepare_refcnt == 0);
> +
> +       if (--prepare_refcnt)
> +               return;
> +       prepare_owner = NULL;
>         mutex_unlock(&prepare_lock);
>  }
>
>  static unsigned long clk_enable_lock(void)
>  {
>         unsigned long flags;
> -       spin_lock_irqsave(&enable_lock, flags);
> +
> +       if (!spin_trylock_irqsave(&enable_lock, flags)) {
> +               if (enable_owner == current) {
> +                       enable_refcnt++;
> +                       return flags;
> +               }
> +               spin_lock_irqsave(&enable_lock, flags);
> +       }
> +       WARN_ON_ONCE(enable_owner != NULL);
> +       WARN_ON_ONCE(enable_refcnt != 0);
> +       enable_owner = current;
> +       enable_refcnt = 1;
>         return flags;
>  }
>
>  static void clk_enable_unlock(unsigned long flags)
>  {
> +       WARN_ON_ONCE(enable_owner != current);
> +       WARN_ON_ONCE(enable_refcnt == 0);
> +
> +       if (--enable_refcnt)
> +               return;
> +       enable_owner = NULL;
>         spin_unlock_irqrestore(&enable_lock, flags);
>  }
>
> --
> 1.7.10.4
>

Great piece of work!

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

^ permalink raw reply	[flat|nested] 51+ messages in thread

* [PATCH 2/2] clk: allow reentrant calls into the clk framework
@ 2013-04-02  9:35         ` Ulf Hansson
  0 siblings, 0 replies; 51+ messages in thread
From: Ulf Hansson @ 2013-04-02  9:35 UTC (permalink / raw)
  To: linux-arm-kernel

On 28 March 2013 21:59, Mike Turquette <mturquette@linaro.org> wrote:
> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api.  A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC.  The i2c subsystem itself
> will use the clk api resulting in a deadlock:
>
> clk_prepare(audio_clk)
>         i2c_transfer(..)
>                 clk_prepare(i2c_controller_clk)
>
> The ability to reenter the clock framework prevents this deadlock.
>
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations.  Yet another example is performing pinctrl operations
> from a clk_ops callback.  Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock.  Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
>
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock.  One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
>
> When the clk framework is entered we try to hold the global lock.  If it
> is held we compare the current task against the current owner; a match
> implies a nested call and we reenter.  If the values do not match then
> we block on the lock until it is released.
>
> Signed-off-by: Mike Turquette <mturquette@linaro.org>
> Cc: Rajagopal Venkat <rajagopal.venkat@linaro.org>
> Cc: David Brown <davidb@codeaurora.org>
> Cc: Ulf Hansson <ulf.hansson@linaro.org>
> Tested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> Changes since v5:
>  * fixed up typo in changelog
>
> Changes since v4:
>  * remove uneccesary atomic operations
>  * remove casting bugs
>  * place reentrancy logic into locking helper functions
>  * improve debugging with reference counting and WARNs
>
>  drivers/clk/clk.c |   44 ++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 42 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 0b5d612..0230c9d 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,10 +19,17 @@
>  #include <linux/of.h>
>  #include <linux/device.h>
>  #include <linux/init.h>
> +#include <linux/sched.h>
>
>  static DEFINE_SPINLOCK(enable_lock);
>  static DEFINE_MUTEX(prepare_lock);
>
> +static struct task_struct *prepare_owner;
> +static struct task_struct *enable_owner;
> +
> +static int prepare_refcnt;
> +static int enable_refcnt;
> +
>  static HLIST_HEAD(clk_root_list);
>  static HLIST_HEAD(clk_orphan_list);
>  static LIST_HEAD(clk_notifier_list);
> @@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
>  /***           locking             ***/
>  static void clk_prepare_lock(void)
>  {
> -       mutex_lock(&prepare_lock);
> +       if (!mutex_trylock(&prepare_lock)) {
> +               if (prepare_owner == current) {
> +                       prepare_refcnt++;
> +                       return;
> +               }
> +               mutex_lock(&prepare_lock);
> +       }
> +       WARN_ON_ONCE(prepare_owner != NULL);
> +       WARN_ON_ONCE(prepare_refcnt != 0);
> +       prepare_owner = current;
> +       prepare_refcnt = 1;
>  }
>
>  static void clk_prepare_unlock(void)
>  {
> +       WARN_ON_ONCE(prepare_owner != current);
> +       WARN_ON_ONCE(prepare_refcnt == 0);
> +
> +       if (--prepare_refcnt)
> +               return;
> +       prepare_owner = NULL;
>         mutex_unlock(&prepare_lock);
>  }
>
>  static unsigned long clk_enable_lock(void)
>  {
>         unsigned long flags;
> -       spin_lock_irqsave(&enable_lock, flags);
> +
> +       if (!spin_trylock_irqsave(&enable_lock, flags)) {
> +               if (enable_owner == current) {
> +                       enable_refcnt++;
> +                       return flags;
> +               }
> +               spin_lock_irqsave(&enable_lock, flags);
> +       }
> +       WARN_ON_ONCE(enable_owner != NULL);
> +       WARN_ON_ONCE(enable_refcnt != 0);
> +       enable_owner = current;
> +       enable_refcnt = 1;
>         return flags;
>  }
>
>  static void clk_enable_unlock(unsigned long flags)
>  {
> +       WARN_ON_ONCE(enable_owner != current);
> +       WARN_ON_ONCE(enable_refcnt == 0);
> +
> +       if (--enable_refcnt)
> +               return;
> +       enable_owner = NULL;
>         spin_unlock_irqrestore(&enable_lock, flags);
>  }
>
> --
> 1.7.10.4
>

Great piece of work!

Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>

^ permalink raw reply	[flat|nested] 51+ messages in thread

end of thread, other threads:[~2013-04-02  9:35 UTC | newest]

Thread overview: 51+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-03-27  7:09 [PATCH v4] clk: allow reentrant calls into the clk framework Mike Turquette
2013-03-27  7:09 ` Mike Turquette
2013-03-27  9:08 ` Laurent Pinchart
2013-03-27  9:08   ` Laurent Pinchart
2013-03-27 15:06   ` Mike Turquette
2013-03-27 17:12     ` Thomas Gleixner
2013-03-27 17:12       ` Thomas Gleixner
2013-03-27  9:40 ` Thomas Gleixner
2013-03-27  9:40   ` Thomas Gleixner
2013-03-27  9:55   ` Viresh Kumar
2013-03-27  9:55     ` Viresh Kumar
2013-03-27 10:03     ` Ulf Hansson
2013-03-27 10:03       ` Ulf Hansson
2013-03-27 11:09       ` Thomas Gleixner
2013-03-27 11:09         ` Thomas Gleixner
2013-03-27 14:25         ` Mike Turquette
2013-03-27 14:25           ` Mike Turquette
2013-03-27  9:59   ` Laurent Pinchart
2013-03-27  9:59     ` Laurent Pinchart
2013-03-27 11:24 ` Thomas Gleixner
2013-03-27 11:24   ` Thomas Gleixner
2013-03-27 16:47   ` Mike Turquette
2013-03-27 17:09     ` Thomas Gleixner
2013-03-27 17:09       ` Thomas Gleixner
2013-03-27 22:56 ` Russell King - ARM Linux
2013-03-27 22:56   ` Russell King - ARM Linux
2013-03-28  3:00   ` Mike Turquette
2013-03-28  4:45 ` [PATCH v5 0/2] reentrancy in the common " Mike Turquette
2013-03-28  4:45   ` Mike Turquette
2013-03-28  4:45   ` [PATCH 1/2] clk: abstract locking out into helper functions Mike Turquette
2013-03-28  4:45     ` Mike Turquette
2013-03-28  9:31     ` Thomas Gleixner
2013-03-28  9:31       ` Thomas Gleixner
2013-03-28  4:45   ` [PATCH 2/2] clk: allow reentrant calls into the clk framework Mike Turquette
2013-03-28  4:45     ` Mike Turquette
2013-03-28  9:33     ` Thomas Gleixner
2013-03-28  9:33       ` Thomas Gleixner
2013-03-28 15:23       ` Mike Turquette
2013-03-28 15:23         ` Mike Turquette
2013-03-28 10:44   ` [PATCH v5 0/2] reentrancy in the common " Laurent Pinchart
2013-03-28 10:44     ` Laurent Pinchart
2013-03-28 20:59   ` [PATCH v6 " Mike Turquette
2013-03-28 20:59     ` Mike Turquette
2013-03-28 20:59     ` [PATCH 1/2] clk: abstract locking out into helper functions Mike Turquette
2013-03-28 20:59       ` Mike Turquette
2013-04-02  9:23       ` Ulf Hansson
2013-04-02  9:23         ` Ulf Hansson
2013-03-28 20:59     ` [PATCH 2/2] clk: allow reentrant calls into the clk framework Mike Turquette
2013-03-28 20:59       ` Mike Turquette
2013-04-02  9:35       ` Ulf Hansson
2013-04-02  9:35         ` Ulf Hansson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.