All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v4] pm_qos: make update_request non blocking
@ 2010-06-09 15:29 florian
  2010-06-09 15:37 ` James Bottomley
  2010-06-09 15:37 ` James Bottomley
  0 siblings, 2 replies; 40+ messages in thread
From: florian @ 2010-06-09 15:29 UTC (permalink / raw)
  To: pm list
  Cc: james.bottomley, markgross, mgross, linville, Florian Mickler,
	Frederic Weisbecker, Jonathan Corbet, Thomas Gleixner,
	linux-kernel

In order to allow drivers to use pm_qos_update_request from interrupt
context we call the notifiers via schedule_work().

Signed-off-by: Florian Mickler <florian@mickler.org>
---

Well, this would be the schedule_work() alternative. 

 kernel/pm_qos_params.c |   47 ++++++++++++++++++++++++++++++-----------------
 1 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index f42d3f7..296343a 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -40,6 +40,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
+#include <linux/workqueue.h>
 
 #include <linux/uaccess.h>
 
@@ -60,11 +61,13 @@ struct pm_qos_request_list {
 
 static s32 max_compare(s32 v1, s32 v2);
 static s32 min_compare(s32 v1, s32 v2);
+static void update_notify(struct work_struct *work);
 
 struct pm_qos_object {
 	struct pm_qos_request_list requests;
 	struct blocking_notifier_head *notifiers;
 	struct miscdevice pm_qos_power_miscdev;
+	struct work_struct notify;
 	char *name;
 	s32 default_value;
 	atomic_t target_value;
@@ -72,10 +75,12 @@ struct pm_qos_object {
 };
 
 static struct pm_qos_object null_pm_qos;
+
 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
 static struct pm_qos_object cpu_dma_pm_qos = {
 	.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
 	.notifiers = &cpu_dma_lat_notifier,
+	.notify = __WORK_INITIALIZER(cpu_dma_pm_qos.notify, update_notify),
 	.name = "cpu_dma_latency",
 	.default_value = 2000 * USEC_PER_SEC,
 	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
@@ -86,6 +91,7 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
 static struct pm_qos_object network_lat_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
 	.notifiers = &network_lat_notifier,
+	.notify = __WORK_INITIALIZER(network_lat_pm_qos.notify, update_notify),
 	.name = "network_latency",
 	.default_value = 2000 * USEC_PER_SEC,
 	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
@@ -97,13 +103,14 @@ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
 static struct pm_qos_object network_throughput_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
 	.notifiers = &network_throughput_notifier,
+	.notify = __WORK_INITIALIZER(network_throughput_pm_qos.notify,
+			update_notify),
 	.name = "network_throughput",
 	.default_value = 0,
 	.target_value = ATOMIC_INIT(0),
 	.comparitor = max_compare
 };
 
-
 static struct pm_qos_object *pm_qos_array[] = {
 	&null_pm_qos,
 	&cpu_dma_pm_qos,
@@ -135,35 +142,40 @@ static s32 min_compare(s32 v1, s32 v2)
 	return min(v1, v2);
 }
 
+static void update_notify(struct work_struct *work)
+{
+	struct pm_qos_object *obj =
+		container_of(work, struct pm_qos_object, notify);
+
+	s32 extreme_value = atomic_read(&obj->target_value);
+	blocking_notifier_call_chain(
+		obj->notifiers,
+			(unsigned long) extreme_value, NULL);
+}
 
 static void update_target(int pm_qos_class)
 {
 	s32 extreme_value;
 	struct pm_qos_request_list *node;
+	struct pm_qos_object *obj = pm_qos_array[pm_qos_class];
 	unsigned long flags;
 	int call_notifier = 0;
 
 	spin_lock_irqsave(&pm_qos_lock, flags);
-	extreme_value = pm_qos_array[pm_qos_class]->default_value;
-	list_for_each_entry(node,
-			&pm_qos_array[pm_qos_class]->requests.list, list) {
-		extreme_value = pm_qos_array[pm_qos_class]->comparitor(
-				extreme_value, node->value);
-	}
-	if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
-			extreme_value) {
+	extreme_value = obj->default_value;
+	list_for_each_entry(node, &obj->requests.list, list)
+		extreme_value = obj->comparitor(extreme_value, node->value);
+
+	if (atomic_read(&obj->target_value) != extreme_value) {
 		call_notifier = 1;
-		atomic_set(&pm_qos_array[pm_qos_class]->target_value,
-				extreme_value);
+		atomic_set(&obj->target_value, extreme_value);
 		pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
-			atomic_read(&pm_qos_array[pm_qos_class]->target_value));
+			atomic_read(&obj->target_value));
 	}
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
 	if (call_notifier)
-		blocking_notifier_call_chain(
-				pm_qos_array[pm_qos_class]->notifiers,
-					(unsigned long) extreme_value, NULL);
+		schedule_work(&obj->notify);
 }
 
 static int register_pm_qos_misc(struct pm_qos_object *qos)
@@ -301,7 +313,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
  * @pm_qos_class: identifies which qos target changes should be notified.
  * @notifier: notifier block managed by caller.
  *
- * will register the notifier into a notification chain that gets called
+ * Will register the notifier into a notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
@@ -320,7 +332,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
  * @pm_qos_class: identifies which qos target changes are notified.
  * @notifier: notifier block to be removed.
  *
- * will remove the notifier from the notification chain that gets called
+ * Will remove the notifier from the notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
@@ -392,6 +404,7 @@ static int __init pm_qos_power_init(void)
 {
 	int ret = 0;
 
+
 	ret = register_pm_qos_misc(&cpu_dma_pm_qos);
 	if (ret < 0) {
 		printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
-- 
1.7.1


^ permalink raw reply related	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 15:29 [PATCH v4] pm_qos: make update_request non blocking florian
  2010-06-09 15:37 ` James Bottomley
@ 2010-06-09 15:37 ` James Bottomley
  2010-06-09 16:00   ` Florian Mickler
  2010-06-09 16:00   ` Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 15:37 UTC (permalink / raw)
  To: florian
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 2010-06-09 at 17:29 +0200, florian@mickler.org wrote:
> In order to allow drivers to use pm_qos_update_request from interrupt
> context we call the notifiers via schedule_work().
> 
> Signed-off-by: Florian Mickler <florian@mickler.org>
> ---
> 
> Well, this would be the schedule_work() alternative. 
> 
>  kernel/pm_qos_params.c |   47 ++++++++++++++++++++++++++++++-----------------
>  1 files changed, 30 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index f42d3f7..296343a 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -40,6 +40,7 @@
>  #include <linux/string.h>
>  #include <linux/platform_device.h>
>  #include <linux/init.h>
> +#include <linux/workqueue.h>
>  
>  #include <linux/uaccess.h>
>  
> @@ -60,11 +61,13 @@ struct pm_qos_request_list {
>  
>  static s32 max_compare(s32 v1, s32 v2);
>  static s32 min_compare(s32 v1, s32 v2);
> +static void update_notify(struct work_struct *work);
>  
>  struct pm_qos_object {
>  	struct pm_qos_request_list requests;
>  	struct blocking_notifier_head *notifiers;
>  	struct miscdevice pm_qos_power_miscdev;
> +	struct work_struct notify;
>  	char *name;
>  	s32 default_value;
>  	atomic_t target_value;
> @@ -72,10 +75,12 @@ struct pm_qos_object {
>  };
>  
>  static struct pm_qos_object null_pm_qos;
> +
>  static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
>  static struct pm_qos_object cpu_dma_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
>  	.notifiers = &cpu_dma_lat_notifier,
> +	.notify = __WORK_INITIALIZER(cpu_dma_pm_qos.notify, update_notify),
>  	.name = "cpu_dma_latency",
>  	.default_value = 2000 * USEC_PER_SEC,
>  	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
> @@ -86,6 +91,7 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
>  static struct pm_qos_object network_lat_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
>  	.notifiers = &network_lat_notifier,
> +	.notify = __WORK_INITIALIZER(network_lat_pm_qos.notify, update_notify),
>  	.name = "network_latency",
>  	.default_value = 2000 * USEC_PER_SEC,
>  	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
> @@ -97,13 +103,14 @@ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
>  static struct pm_qos_object network_throughput_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
>  	.notifiers = &network_throughput_notifier,
> +	.notify = __WORK_INITIALIZER(network_throughput_pm_qos.notify,
> +			update_notify),
>  	.name = "network_throughput",
>  	.default_value = 0,
>  	.target_value = ATOMIC_INIT(0),
>  	.comparitor = max_compare
>  };
>  
> -
>  static struct pm_qos_object *pm_qos_array[] = {
>  	&null_pm_qos,
>  	&cpu_dma_pm_qos,
> @@ -135,35 +142,40 @@ static s32 min_compare(s32 v1, s32 v2)
>  	return min(v1, v2);
>  }
>  
> +static void update_notify(struct work_struct *work)
> +{
> +	struct pm_qos_object *obj =
> +		container_of(work, struct pm_qos_object, notify);
> +
> +	s32 extreme_value = atomic_read(&obj->target_value);
> +	blocking_notifier_call_chain(
> +		obj->notifiers,
> +			(unsigned long) extreme_value, NULL);
> +}
>  
>  static void update_target(int pm_qos_class)
>  {
>  	s32 extreme_value;
>  	struct pm_qos_request_list *node;
> +	struct pm_qos_object *obj = pm_qos_array[pm_qos_class];
>  	unsigned long flags;
>  	int call_notifier = 0;
>  
>  	spin_lock_irqsave(&pm_qos_lock, flags);
> -	extreme_value = pm_qos_array[pm_qos_class]->default_value;
> -	list_for_each_entry(node,
> -			&pm_qos_array[pm_qos_class]->requests.list, list) {
> -		extreme_value = pm_qos_array[pm_qos_class]->comparitor(
> -				extreme_value, node->value);
> -	}
> -	if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
> -			extreme_value) {
> +	extreme_value = obj->default_value;
> +	list_for_each_entry(node, &obj->requests.list, list)
> +		extreme_value = obj->comparitor(extreme_value, node->value);
> +
> +	if (atomic_read(&obj->target_value) != extreme_value) {
>  		call_notifier = 1;
> -		atomic_set(&pm_qos_array[pm_qos_class]->target_value,
> -				extreme_value);
> +		atomic_set(&obj->target_value, extreme_value);
>  		pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
> -			atomic_read(&pm_qos_array[pm_qos_class]->target_value));
> +			atomic_read(&obj->target_value));
>  	}
>  	spin_unlock_irqrestore(&pm_qos_lock, flags);
>  
>  	if (call_notifier)
> -		blocking_notifier_call_chain(
> -				pm_qos_array[pm_qos_class]->notifiers,
> -					(unsigned long) extreme_value, NULL);
> +		schedule_work(&obj->notify);
>  }

This still isn't resilient against two successive calls to update.  If
the second one gets to schedule_work() before the work of the first one
has finished, you'll corrupt the workqueue.

The mechanisms to solve this race (refcounting a pool of structures) are
so heavyweight that I concluded it was simpler just to have atomically
updated pm_qos objects and enforce it ... rather than trying to allow
blocking chains to be added to atomic sites via a workqueue.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 15:29 [PATCH v4] pm_qos: make update_request non blocking florian
@ 2010-06-09 15:37 ` James Bottomley
  2010-06-09 15:37 ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 15:37 UTC (permalink / raw)
  To: florian
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 2010-06-09 at 17:29 +0200, florian@mickler.org wrote:
> In order to allow drivers to use pm_qos_update_request from interrupt
> context we call the notifiers via schedule_work().
> 
> Signed-off-by: Florian Mickler <florian@mickler.org>
> ---
> 
> Well, this would be the schedule_work() alternative. 
> 
>  kernel/pm_qos_params.c |   47 ++++++++++++++++++++++++++++++-----------------
>  1 files changed, 30 insertions(+), 17 deletions(-)
> 
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index f42d3f7..296343a 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -40,6 +40,7 @@
>  #include <linux/string.h>
>  #include <linux/platform_device.h>
>  #include <linux/init.h>
> +#include <linux/workqueue.h>
>  
>  #include <linux/uaccess.h>
>  
> @@ -60,11 +61,13 @@ struct pm_qos_request_list {
>  
>  static s32 max_compare(s32 v1, s32 v2);
>  static s32 min_compare(s32 v1, s32 v2);
> +static void update_notify(struct work_struct *work);
>  
>  struct pm_qos_object {
>  	struct pm_qos_request_list requests;
>  	struct blocking_notifier_head *notifiers;
>  	struct miscdevice pm_qos_power_miscdev;
> +	struct work_struct notify;
>  	char *name;
>  	s32 default_value;
>  	atomic_t target_value;
> @@ -72,10 +75,12 @@ struct pm_qos_object {
>  };
>  
>  static struct pm_qos_object null_pm_qos;
> +
>  static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
>  static struct pm_qos_object cpu_dma_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
>  	.notifiers = &cpu_dma_lat_notifier,
> +	.notify = __WORK_INITIALIZER(cpu_dma_pm_qos.notify, update_notify),
>  	.name = "cpu_dma_latency",
>  	.default_value = 2000 * USEC_PER_SEC,
>  	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
> @@ -86,6 +91,7 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
>  static struct pm_qos_object network_lat_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
>  	.notifiers = &network_lat_notifier,
> +	.notify = __WORK_INITIALIZER(network_lat_pm_qos.notify, update_notify),
>  	.name = "network_latency",
>  	.default_value = 2000 * USEC_PER_SEC,
>  	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
> @@ -97,13 +103,14 @@ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
>  static struct pm_qos_object network_throughput_pm_qos = {
>  	.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
>  	.notifiers = &network_throughput_notifier,
> +	.notify = __WORK_INITIALIZER(network_throughput_pm_qos.notify,
> +			update_notify),
>  	.name = "network_throughput",
>  	.default_value = 0,
>  	.target_value = ATOMIC_INIT(0),
>  	.comparitor = max_compare
>  };
>  
> -
>  static struct pm_qos_object *pm_qos_array[] = {
>  	&null_pm_qos,
>  	&cpu_dma_pm_qos,
> @@ -135,35 +142,40 @@ static s32 min_compare(s32 v1, s32 v2)
>  	return min(v1, v2);
>  }
>  
> +static void update_notify(struct work_struct *work)
> +{
> +	struct pm_qos_object *obj =
> +		container_of(work, struct pm_qos_object, notify);
> +
> +	s32 extreme_value = atomic_read(&obj->target_value);
> +	blocking_notifier_call_chain(
> +		obj->notifiers,
> +			(unsigned long) extreme_value, NULL);
> +}
>  
>  static void update_target(int pm_qos_class)
>  {
>  	s32 extreme_value;
>  	struct pm_qos_request_list *node;
> +	struct pm_qos_object *obj = pm_qos_array[pm_qos_class];
>  	unsigned long flags;
>  	int call_notifier = 0;
>  
>  	spin_lock_irqsave(&pm_qos_lock, flags);
> -	extreme_value = pm_qos_array[pm_qos_class]->default_value;
> -	list_for_each_entry(node,
> -			&pm_qos_array[pm_qos_class]->requests.list, list) {
> -		extreme_value = pm_qos_array[pm_qos_class]->comparitor(
> -				extreme_value, node->value);
> -	}
> -	if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
> -			extreme_value) {
> +	extreme_value = obj->default_value;
> +	list_for_each_entry(node, &obj->requests.list, list)
> +		extreme_value = obj->comparitor(extreme_value, node->value);
> +
> +	if (atomic_read(&obj->target_value) != extreme_value) {
>  		call_notifier = 1;
> -		atomic_set(&pm_qos_array[pm_qos_class]->target_value,
> -				extreme_value);
> +		atomic_set(&obj->target_value, extreme_value);
>  		pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
> -			atomic_read(&pm_qos_array[pm_qos_class]->target_value));
> +			atomic_read(&obj->target_value));
>  	}
>  	spin_unlock_irqrestore(&pm_qos_lock, flags);
>  
>  	if (call_notifier)
> -		blocking_notifier_call_chain(
> -				pm_qos_array[pm_qos_class]->notifiers,
> -					(unsigned long) extreme_value, NULL);
> +		schedule_work(&obj->notify);
>  }

This still isn't resilient against two successive calls to update.  If
the second one gets to schedule_work() before the work of the first one
has finished, you'll corrupt the workqueue.

The mechanisms to solve this race (refcounting a pool of structures) are
so heavyweight that I concluded it was simpler just to have atomically
updated pm_qos objects and enforce it ... rather than trying to allow
blocking chains to be added to atomic sites via a workqueue.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 15:37 ` James Bottomley
  2010-06-09 16:00   ` Florian Mickler
@ 2010-06-09 16:00   ` Florian Mickler
  2010-06-09 16:07     ` James Bottomley
  2010-06-09 16:07     ` James Bottomley
  1 sibling, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 16:00 UTC (permalink / raw)
  To: James Bottomley
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 09 Jun 2010 11:37:12 -0400
James Bottomley <James.Bottomley@suse.de> wrote:


> This still isn't resilient against two successive calls to update.  If
> the second one gets to schedule_work() before the work of the first one
> has finished, you'll corrupt the workqueue.

Sorry, I don't see it. Can you elaborate?

In "run_workqueue(" we do a list_del_init() which resets the
list-pointers of the workitem and only after that reset the
WORK_STRUCT_PENDING member of said structure. 


schedule_work does a queue_work_on which does a test_and_set_bit on
the WORK_STRUCT_PENDING member of the work and only queues the work
via list_add_tail in insert_work afterwards.

Where is my think'o? Or was this fixed while you didn't look?

So what _can_ happen, is that we miss a new notfication while the old
notification is still in the queue. But I don't think this is a problem.

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 15:37 ` James Bottomley
@ 2010-06-09 16:00   ` Florian Mickler
  2010-06-09 16:00   ` Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 16:00 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 09 Jun 2010 11:37:12 -0400
James Bottomley <James.Bottomley@suse.de> wrote:


> This still isn't resilient against two successive calls to update.  If
> the second one gets to schedule_work() before the work of the first one
> has finished, you'll corrupt the workqueue.

Sorry, I don't see it. Can you elaborate?

In "run_workqueue(" we do a list_del_init() which resets the
list-pointers of the workitem and only after that reset the
WORK_STRUCT_PENDING member of said structure. 


schedule_work does a queue_work_on which does a test_and_set_bit on
the WORK_STRUCT_PENDING member of the work and only queues the work
via list_add_tail in insert_work afterwards.

Where is my think'o? Or was this fixed while you didn't look?

So what _can_ happen, is that we miss a new notfication while the old
notification is still in the queue. But I don't think this is a problem.

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:00   ` Florian Mickler
  2010-06-09 16:07     ` James Bottomley
@ 2010-06-09 16:07     ` James Bottomley
  2010-06-09 16:32       ` Florian Mickler
  2010-06-09 16:32       ` Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 16:07 UTC (permalink / raw)
  To: Florian Mickler
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 11:37:12 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> 
> > This still isn't resilient against two successive calls to update.  If
> > the second one gets to schedule_work() before the work of the first one
> > has finished, you'll corrupt the workqueue.
> 
> Sorry, I don't see it. Can you elaborate?
> 
> In "run_workqueue(" we do a list_del_init() which resets the
> list-pointers of the workitem and only after that reset the
> WORK_STRUCT_PENDING member of said structure. 
> 
> 
> schedule_work does a queue_work_on which does a test_and_set_bit on
> the WORK_STRUCT_PENDING member of the work and only queues the work
> via list_add_tail in insert_work afterwards.
> 
> Where is my think'o? Or was this fixed while you didn't look?
> 
> So what _can_ happen, is that we miss a new notfication while the old
> notification is still in the queue. But I don't think this is a problem.

OK, so the expression of the race is that the latest notification gets
lost.  If something is tracking values, you'd really like to lose the
previous one (which is now irrelevant) not the latest one.  The point is
there's still a race.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:00   ` Florian Mickler
@ 2010-06-09 16:07     ` James Bottomley
  2010-06-09 16:07     ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 16:07 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 11:37:12 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> 
> > This still isn't resilient against two successive calls to update.  If
> > the second one gets to schedule_work() before the work of the first one
> > has finished, you'll corrupt the workqueue.
> 
> Sorry, I don't see it. Can you elaborate?
> 
> In "run_workqueue(" we do a list_del_init() which resets the
> list-pointers of the workitem and only after that reset the
> WORK_STRUCT_PENDING member of said structure. 
> 
> 
> schedule_work does a queue_work_on which does a test_and_set_bit on
> the WORK_STRUCT_PENDING member of the work and only queues the work
> via list_add_tail in insert_work afterwards.
> 
> Where is my think'o? Or was this fixed while you didn't look?
> 
> So what _can_ happen, is that we miss a new notfication while the old
> notification is still in the queue. But I don't think this is a problem.

OK, so the expression of the race is that the latest notification gets
lost.  If something is tracking values, you'd really like to lose the
previous one (which is now irrelevant) not the latest one.  The point is
there's still a race.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:07     ` James Bottomley
@ 2010-06-09 16:32       ` Florian Mickler
  2010-06-09 17:05         ` James Bottomley
  2010-06-09 17:05         ` James Bottomley
  2010-06-09 16:32       ` Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 16:32 UTC (permalink / raw)
  To: James Bottomley
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 09 Jun 2010 12:07:25 -0400
James Bottomley <James.Bottomley@suse.de> wrote:

> On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 11:37:12 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > 
> > 
> > > This still isn't resilient against two successive calls to update.  If
> > > the second one gets to schedule_work() before the work of the first one
> > > has finished, you'll corrupt the workqueue.
> > 
> > Sorry, I don't see it. Can you elaborate?
> > 
> > In "run_workqueue(" we do a list_del_init() which resets the
> > list-pointers of the workitem and only after that reset the
> > WORK_STRUCT_PENDING member of said structure. 
> > 
> > 
> > schedule_work does a queue_work_on which does a test_and_set_bit on
> > the WORK_STRUCT_PENDING member of the work and only queues the work
> > via list_add_tail in insert_work afterwards.
> > 
> > Where is my think'o? Or was this fixed while you didn't look?
> > 
> > So what _can_ happen, is that we miss a new notfication while the old
> > notification is still in the queue. But I don't think this is a problem.
> 
> OK, so the expression of the race is that the latest notification gets
> lost.  If something is tracking values, you'd really like to lose the
> previous one (which is now irrelevant) not the latest one.  The point is
> there's still a race.
> 
> James
> 

Yeah, but for blocking notification it is not that bad. 

Doesn't using blocking notifiers mean that you need to always check
via pm_qos_request() to get the latest value anyways? I.e. the
notification is just a hint, that something changed so you can act on
it. But if you act (may it because of notification or because of
something other) then you have to get the current value anyways.

I think there are 2 schemes of operation. The one which needs
atomic notifiers and where it would be bad if we lost any notification
and the other where it is just a way of doing some work in a timely
fashion but it isn't critical that it is done right away.

pm_qos was the second kind of operation up until now, because the
notifiers may have got scheduled away while blocked. 

I think we should allow for both kinds of operations simultaneous. But
as I got that pushback to the change from John Linville as I touched his
code, I got a bit reluctant and just have done the simple variant. :)

(for example if we want to implement some sort of debugging stats, we
could use blocking notifications, but if we for example block some
c-stats due to a latency-requirement we probably need some atomic
notifications.)

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:07     ` James Bottomley
  2010-06-09 16:32       ` Florian Mickler
@ 2010-06-09 16:32       ` Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 16:32 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 09 Jun 2010 12:07:25 -0400
James Bottomley <James.Bottomley@suse.de> wrote:

> On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 11:37:12 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > 
> > 
> > > This still isn't resilient against two successive calls to update.  If
> > > the second one gets to schedule_work() before the work of the first one
> > > has finished, you'll corrupt the workqueue.
> > 
> > Sorry, I don't see it. Can you elaborate?
> > 
> > In "run_workqueue(" we do a list_del_init() which resets the
> > list-pointers of the workitem and only after that reset the
> > WORK_STRUCT_PENDING member of said structure. 
> > 
> > 
> > schedule_work does a queue_work_on which does a test_and_set_bit on
> > the WORK_STRUCT_PENDING member of the work and only queues the work
> > via list_add_tail in insert_work afterwards.
> > 
> > Where is my think'o? Or was this fixed while you didn't look?
> > 
> > So what _can_ happen, is that we miss a new notfication while the old
> > notification is still in the queue. But I don't think this is a problem.
> 
> OK, so the expression of the race is that the latest notification gets
> lost.  If something is tracking values, you'd really like to lose the
> previous one (which is now irrelevant) not the latest one.  The point is
> there's still a race.
> 
> James
> 

Yeah, but for blocking notification it is not that bad. 

Doesn't using blocking notifiers mean that you need to always check
via pm_qos_request() to get the latest value anyways? I.e. the
notification is just a hint, that something changed so you can act on
it. But if you act (may it because of notification or because of
something other) then you have to get the current value anyways.

I think there are 2 schemes of operation. The one which needs
atomic notifiers and where it would be bad if we lost any notification
and the other where it is just a way of doing some work in a timely
fashion but it isn't critical that it is done right away.

pm_qos was the second kind of operation up until now, because the
notifiers may have got scheduled away while blocked. 

I think we should allow for both kinds of operations simultaneous. But
as I got that pushback to the change from John Linville as I touched his
code, I got a bit reluctant and just have done the simple variant. :)

(for example if we want to implement some sort of debugging stats, we
could use blocking notifications, but if we for example block some
c-stats due to a latency-requirement we probably need some atomic
notifications.)

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:32       ` Florian Mickler
  2010-06-09 17:05         ` James Bottomley
@ 2010-06-09 17:05         ` James Bottomley
  2010-06-09 17:31           ` Florian Mickler
                             ` (3 more replies)
  1 sibling, 4 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 17:05 UTC (permalink / raw)
  To: Florian Mickler
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 12:07:25 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> > > On Wed, 09 Jun 2010 11:37:12 -0400
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > 
> > > 
> > > > This still isn't resilient against two successive calls to update.  If
> > > > the second one gets to schedule_work() before the work of the first one
> > > > has finished, you'll corrupt the workqueue.
> > > 
> > > Sorry, I don't see it. Can you elaborate?
> > > 
> > > In "run_workqueue(" we do a list_del_init() which resets the
> > > list-pointers of the workitem and only after that reset the
> > > WORK_STRUCT_PENDING member of said structure. 
> > > 
> > > 
> > > schedule_work does a queue_work_on which does a test_and_set_bit on
> > > the WORK_STRUCT_PENDING member of the work and only queues the work
> > > via list_add_tail in insert_work afterwards.
> > > 
> > > Where is my think'o? Or was this fixed while you didn't look?
> > > 
> > > So what _can_ happen, is that we miss a new notfication while the old
> > > notification is still in the queue. But I don't think this is a problem.
> > 
> > OK, so the expression of the race is that the latest notification gets
> > lost.  If something is tracking values, you'd really like to lose the
> > previous one (which is now irrelevant) not the latest one.  The point is
> > there's still a race.
> > 
> > James
> > 
> 
> Yeah, but for blocking notification it is not that bad. 

The network latency notifier uses the value to recalculate something.
Losing the last value will mean it's using stale data.

> Doesn't using blocking notifiers mean that you need to always check
> via pm_qos_request() to get the latest value anyways? I.e. the
> notification is just a hint, that something changed so you can act on
> it. But if you act (may it because of notification or because of
> something other) then you have to get the current value anyways.

Well, the network notifier assumes the notifier value is the latest ... 

> I think there are 2 schemes of operation. The one which needs
> atomic notifiers and where it would be bad if we lost any notification
> and the other where it is just a way of doing some work in a timely
> fashion but it isn't critical that it is done right away.
> 
> pm_qos was the second kind of operation up until now, because the
> notifiers may have got scheduled away while blocked. 

Actually, no ... the current API preserves ordering semantics.  If a
notifier sleeps and another change comes along, the update callsite will
sleep on the notifier's mutex ... so ordering is always preserved.

> I think we should allow for both kinds of operations simultaneous. But
> as I got that pushback to the change from John Linville as I touched his
> code, I got a bit reluctant and just have done the simple variant. :)

The code I proposed does ... but it relies on the callsite supplying the
necessary context.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 16:32       ` Florian Mickler
@ 2010-06-09 17:05         ` James Bottomley
  2010-06-09 17:05         ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-09 17:05 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 12:07:25 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > On Wed, 2010-06-09 at 18:00 +0200, Florian Mickler wrote:
> > > On Wed, 09 Jun 2010 11:37:12 -0400
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > 
> > > 
> > > > This still isn't resilient against two successive calls to update.  If
> > > > the second one gets to schedule_work() before the work of the first one
> > > > has finished, you'll corrupt the workqueue.
> > > 
> > > Sorry, I don't see it. Can you elaborate?
> > > 
> > > In "run_workqueue(" we do a list_del_init() which resets the
> > > list-pointers of the workitem and only after that reset the
> > > WORK_STRUCT_PENDING member of said structure. 
> > > 
> > > 
> > > schedule_work does a queue_work_on which does a test_and_set_bit on
> > > the WORK_STRUCT_PENDING member of the work and only queues the work
> > > via list_add_tail in insert_work afterwards.
> > > 
> > > Where is my think'o? Or was this fixed while you didn't look?
> > > 
> > > So what _can_ happen, is that we miss a new notfication while the old
> > > notification is still in the queue. But I don't think this is a problem.
> > 
> > OK, so the expression of the race is that the latest notification gets
> > lost.  If something is tracking values, you'd really like to lose the
> > previous one (which is now irrelevant) not the latest one.  The point is
> > there's still a race.
> > 
> > James
> > 
> 
> Yeah, but for blocking notification it is not that bad. 

The network latency notifier uses the value to recalculate something.
Losing the last value will mean it's using stale data.

> Doesn't using blocking notifiers mean that you need to always check
> via pm_qos_request() to get the latest value anyways? I.e. the
> notification is just a hint, that something changed so you can act on
> it. But if you act (may it because of notification or because of
> something other) then you have to get the current value anyways.

Well, the network notifier assumes the notifier value is the latest ... 

> I think there are 2 schemes of operation. The one which needs
> atomic notifiers and where it would be bad if we lost any notification
> and the other where it is just a way of doing some work in a timely
> fashion but it isn't critical that it is done right away.
> 
> pm_qos was the second kind of operation up until now, because the
> notifiers may have got scheduled away while blocked. 

Actually, no ... the current API preserves ordering semantics.  If a
notifier sleeps and another change comes along, the update callsite will
sleep on the notifier's mutex ... so ordering is always preserved.

> I think we should allow for both kinds of operations simultaneous. But
> as I got that pushback to the change from John Linville as I touched his
> code, I got a bit reluctant and just have done the simple variant. :)

The code I proposed does ... but it relies on the callsite supplying the
necessary context.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 17:05         ` James Bottomley
  2010-06-09 17:31           ` Florian Mickler
@ 2010-06-09 17:31           ` Florian Mickler
  2010-06-10  7:45           ` Florian Mickler
  2010-06-10  7:45           ` Florian Mickler
  3 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 17:31 UTC (permalink / raw)
  To: James Bottomley
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 09 Jun 2010 13:05:49 -0400
James Bottomley <James.Bottomley@suse.de> wrote:

> On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > Yeah, but for blocking notification it is not that bad. 
> 
> The network latency notifier uses the value to recalculate something.
> Losing the last value will mean it's using stale data.

Ah. Indeed. I didn't do my homework there. That sucks. (Btw, I know why
you said "recalculate _something_" :) )

It even fetches via pm_qos_get_request() in the middle. But obviously
if we do not report the last value but the value before that, then this
is bloed. 

We could fix it though for all current (two) in tree users by passing
always a negative value in to the notification. Then the network
notifier fetches the value via pm_qos_get_request(); 
And cpuidle ignores the value anyways. 

> > 
> > pm_qos was the second kind of operation up until now, because the
> > notifiers may have got scheduled away while blocked. 
> 
> Actually, no ... the current API preserves ordering semantics.  If a
> notifier sleeps and another change comes along, the update callsite will
> sleep on the notifier's mutex ... so ordering is always preserved.

Ah! Ordering. Something I didn't thought of. But still no luck for me. 

> 
> > I think we should allow for both kinds of operations simultaneous. But
> > as I got that pushback to the change from John Linville as I touched his
> > code, I got a bit reluctant and just have done the simple variant. :)
> 
> The code I proposed does ... but it relies on the callsite supplying the
> necessary context.
> 

Yes, I know. It's just that if we want to have both kind's of notifiers
we need to use a atomic_notifier chain for that constraint. And then we
have the issue that John objected to, namely pushing all the
schedule_work code to each of the API-Users. 

Anyway, I think your patch is better in that it is correct.

It just doesn't solve the issue for constraints where drivers want to
update from interrupt but notifiers want to be able to work from
process context. 

> James
> 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 17:05         ` James Bottomley
@ 2010-06-09 17:31           ` Florian Mickler
  2010-06-09 17:31           ` Florian Mickler
                             ` (2 subsequent siblings)
  3 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-09 17:31 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 09 Jun 2010 13:05:49 -0400
James Bottomley <James.Bottomley@suse.de> wrote:

> On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > Yeah, but for blocking notification it is not that bad. 
> 
> The network latency notifier uses the value to recalculate something.
> Losing the last value will mean it's using stale data.

Ah. Indeed. I didn't do my homework there. That sucks. (Btw, I know why
you said "recalculate _something_" :) )

It even fetches via pm_qos_get_request() in the middle. But obviously
if we do not report the last value but the value before that, then this
is bloed. 

We could fix it though for all current (two) in tree users by passing
always a negative value in to the notification. Then the network
notifier fetches the value via pm_qos_get_request(); 
And cpuidle ignores the value anyways. 

> > 
> > pm_qos was the second kind of operation up until now, because the
> > notifiers may have got scheduled away while blocked. 
> 
> Actually, no ... the current API preserves ordering semantics.  If a
> notifier sleeps and another change comes along, the update callsite will
> sleep on the notifier's mutex ... so ordering is always preserved.

Ah! Ordering. Something I didn't thought of. But still no luck for me. 

> 
> > I think we should allow for both kinds of operations simultaneous. But
> > as I got that pushback to the change from John Linville as I touched his
> > code, I got a bit reluctant and just have done the simple variant. :)
> 
> The code I proposed does ... but it relies on the callsite supplying the
> necessary context.
> 

Yes, I know. It's just that if we want to have both kind's of notifiers
we need to use a atomic_notifier chain for that constraint. And then we
have the issue that John objected to, namely pushing all the
schedule_work code to each of the API-Users. 

Anyway, I think your patch is better in that it is correct.

It just doesn't solve the issue for constraints where drivers want to
update from interrupt but notifiers want to be able to work from
process context. 

> James
> 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 17:05         ` James Bottomley
  2010-06-09 17:31           ` Florian Mickler
  2010-06-09 17:31           ` Florian Mickler
@ 2010-06-10  7:45           ` Florian Mickler
  2010-06-10 13:39             ` James Bottomley
  2010-06-10 13:39             ` [linux-pm] " James Bottomley
  2010-06-10  7:45           ` Florian Mickler
  3 siblings, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-10  7:45 UTC (permalink / raw)
  To: James Bottomley
  Cc: pm list, markgross, mgross, linville, Frederic Weisbecker,
	Jonathan Corbet, Thomas Gleixner, linux-kernel

On Wed, 09 Jun 2010 13:05:49 -0400
James Bottomley <James.Bottomley@suse.de> wrote:
> On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 12:07:25 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > > OK, so the expression of the race is that the latest notification gets
> > > lost.  If something is tracking values, you'd really like to lose the
> > > previous one (which is now irrelevant) not the latest one.  The point is
> > > there's still a race.
> > > 
> > > James
> > > 
> > 
> > Yeah, but for blocking notification it is not that bad. 
> 
> The network latency notifier uses the value to recalculate something.
> Losing the last value will mean it's using stale data.

Actually after pondering a bit, it is not stale data that gets
delivered: (Correct me if I'm wrong)

The update_notify() function determines the extreme value and then
calls the blocking_notifier_chain. 

But just before the update_notify() function get's called, the
work-structure is reset and re-queue-able. So it is possible to queue it
already even before the extreme_value in update_notify get's
determined. 

So the notified value is always the latest or there is another
notification underway.

> 
> > Doesn't using blocking notifiers mean that you need to always check
> > via pm_qos_request() to get the latest value anyways? I.e. the
> > notification is just a hint, that something changed so you can act on
> > it. But if you act (may it because of notification or because of
> > something other) then you have to get the current value anyways.
> 
> Well, the network notifier assumes the notifier value is the latest ... 
> 
> > I think there are 2 schemes of operation. The one which needs
> > atomic notifiers and where it would be bad if we lost any notification
> > and the other where it is just a way of doing some work in a timely
> > fashion but it isn't critical that it is done right away.
> > 
> > pm_qos was the second kind of operation up until now, because the
> > notifiers may have got scheduled away while blocked. 
> 
> Actually, no ... the current API preserves ordering semantics.  If a
> notifier sleeps and another change comes along, the update callsite will
> sleep on the notifier's mutex ... so ordering is always preserved.

If my above thinkings are right, then the change in semantics is only,
that now you can not determine by the number of notifications the
number of update_request-calls.

> 
> James
> 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-09 17:05         ` James Bottomley
                             ` (2 preceding siblings ...)
  2010-06-10  7:45           ` Florian Mickler
@ 2010-06-10  7:45           ` Florian Mickler
  3 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-10  7:45 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Wed, 09 Jun 2010 13:05:49 -0400
James Bottomley <James.Bottomley@suse.de> wrote:
> On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 12:07:25 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > > OK, so the expression of the race is that the latest notification gets
> > > lost.  If something is tracking values, you'd really like to lose the
> > > previous one (which is now irrelevant) not the latest one.  The point is
> > > there's still a race.
> > > 
> > > James
> > > 
> > 
> > Yeah, but for blocking notification it is not that bad. 
> 
> The network latency notifier uses the value to recalculate something.
> Losing the last value will mean it's using stale data.

Actually after pondering a bit, it is not stale data that gets
delivered: (Correct me if I'm wrong)

The update_notify() function determines the extreme value and then
calls the blocking_notifier_chain. 

But just before the update_notify() function get's called, the
work-structure is reset and re-queue-able. So it is possible to queue it
already even before the extreme_value in update_notify get's
determined. 

So the notified value is always the latest or there is another
notification underway.

> 
> > Doesn't using blocking notifiers mean that you need to always check
> > via pm_qos_request() to get the latest value anyways? I.e. the
> > notification is just a hint, that something changed so you can act on
> > it. But if you act (may it because of notification or because of
> > something other) then you have to get the current value anyways.
> 
> Well, the network notifier assumes the notifier value is the latest ... 
> 
> > I think there are 2 schemes of operation. The one which needs
> > atomic notifiers and where it would be bad if we lost any notification
> > and the other where it is just a way of doing some work in a timely
> > fashion but it isn't critical that it is done right away.
> > 
> > pm_qos was the second kind of operation up until now, because the
> > notifiers may have got scheduled away while blocked. 
> 
> Actually, no ... the current API preserves ordering semantics.  If a
> notifier sleeps and another change comes along, the update callsite will
> sleep on the notifier's mutex ... so ordering is always preserved.

If my above thinkings are right, then the change in semantics is only,
that now you can not determine by the number of notifications the
number of update_request-calls.

> 
> James
> 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10  7:45           ` Florian Mickler
  2010-06-10 13:39             ` James Bottomley
@ 2010-06-10 13:39             ` James Bottomley
  2010-06-10 14:41               ` Florian Mickler
  2010-06-10 14:41               ` [PATCH v4] pm_qos: make update_request non blocking Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-10 13:39 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 2010-06-10 at 09:45 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 13:05:49 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> > On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > > On Wed, 09 Jun 2010 12:07:25 -0400
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > > OK, so the expression of the race is that the latest notification gets
> > > > lost.  If something is tracking values, you'd really like to lose the
> > > > previous one (which is now irrelevant) not the latest one.  The point is
> > > > there's still a race.
> > > > 
> > > > James
> > > > 
> > > 
> > > Yeah, but for blocking notification it is not that bad. 
> > 
> > The network latency notifier uses the value to recalculate something.
> > Losing the last value will mean it's using stale data.
> 
> Actually after pondering a bit, it is not stale data that gets
> delivered: (Correct me if I'm wrong)
> 
> The update_notify() function determines the extreme value and then
> calls the blocking_notifier_chain. 
> 
> But just before the update_notify() function get's called, the
> work-structure is reset and re-queue-able. So it is possible to queue it
> already even before the extreme_value in update_notify get's
> determined. 
> 
> So the notified value is always the latest or there is another
> notification underway.

Well, no ... it's a race, and like all good races the winner is non
deterministic.

If the update comes in before the work queue is run, then everyone
eventually sees the new value.  If it comes in as the chain is running,
some notifiers see the old value and some the new.  If it comes in
during back end processing, no-one sees the new value.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10  7:45           ` Florian Mickler
@ 2010-06-10 13:39             ` James Bottomley
  2010-06-10 13:39             ` [linux-pm] " James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-10 13:39 UTC (permalink / raw)
  To: Florian Mickler
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 2010-06-10 at 09:45 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 13:05:49 -0400
> James Bottomley <James.Bottomley@suse.de> wrote:
> > On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > > On Wed, 09 Jun 2010 12:07:25 -0400
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > > OK, so the expression of the race is that the latest notification gets
> > > > lost.  If something is tracking values, you'd really like to lose the
> > > > previous one (which is now irrelevant) not the latest one.  The point is
> > > > there's still a race.
> > > > 
> > > > James
> > > > 
> > > 
> > > Yeah, but for blocking notification it is not that bad. 
> > 
> > The network latency notifier uses the value to recalculate something.
> > Losing the last value will mean it's using stale data.
> 
> Actually after pondering a bit, it is not stale data that gets
> delivered: (Correct me if I'm wrong)
> 
> The update_notify() function determines the extreme value and then
> calls the blocking_notifier_chain. 
> 
> But just before the update_notify() function get's called, the
> work-structure is reset and re-queue-able. So it is possible to queue it
> already even before the extreme_value in update_notify get's
> determined. 
> 
> So the notified value is always the latest or there is another
> notification underway.

Well, no ... it's a race, and like all good races the winner is non
deterministic.

If the update comes in before the work queue is run, then everyone
eventually sees the new value.  If it comes in as the chain is running,
some notifiers see the old value and some the new.  If it comes in
during back end processing, no-one sees the new value.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10 13:39             ` [linux-pm] " James Bottomley
@ 2010-06-10 14:41               ` Florian Mickler
  2010-06-11 14:25                 ` James Bottomley
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
  2010-06-10 14:41               ` [PATCH v4] pm_qos: make update_request non blocking Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-10 14:41 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 10 Jun 2010 08:39:04 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Thu, 2010-06-10 at 09:45 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 13:05:49 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > > On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > > > On Wed, 09 Jun 2010 12:07:25 -0400
> > > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > > > OK, so the expression of the race is that the latest notification gets
> > > > > lost.  If something is tracking values, you'd really like to lose the
> > > > > previous one (which is now irrelevant) not the latest one.  The point is
> > > > > there's still a race.
> > > > > 
> > > > > James
> > > > > 
> > > > 
> > > > Yeah, but for blocking notification it is not that bad. 
> > > 
> > > The network latency notifier uses the value to recalculate something.
> > > Losing the last value will mean it's using stale data.
> > 
> > Actually after pondering a bit, it is not stale data that gets
> > delivered: (Correct me if I'm wrong)
> > 
> > The update_notify() function determines the extreme value and then
> > calls the blocking_notifier_chain. 
> > 
> > But just before the update_notify() function get's called, the
> > work-structure is reset and re-queue-able. So it is possible to queue it
> > already even before the extreme_value in update_notify get's
> > determined. 
> > 
> > So the notified value is always the latest or there is another
> > notification underway.
> 
> Well, no ... it's a race, and like all good races the winner is non
> deterministic.

Can you point out where I'm wrong?

U1. update_request gets called
U2. 	new extreme value gets calculated under spinlock
U3.	notify gets queued if its WORK_PENDING_BIT is not set.

run_workqueue() does the following:
R1. clears the WORK_PENDING_BIT
R2. calls update_notify()
R3. 	reads the current extreme value
R4. 	notification gets called with that value		


If another update_request comes to schedule_work before
run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
requeued, but R3 isn't yet executed. So the notifiers will get the last
value.

If another update_request comes to schedule_work() after
run_workqueue() has cleared the WORK_PENDING_BIT, the work will be
requeued and another update_notify will be executed later.

> 
> If the update comes in before the work queue is run, then everyone
> eventually sees the new value.  If it comes in as the chain is running,
> some notifiers see the old value and some the new.  If it comes in
> during back end processing, no-one sees the new value.
> 
> James
> 

No, I think that is not correct. There will always be another
update_notify()-call after the last update_request-recalculation due to
the fact that run_workqueue() calls work_clear_pending() before calling
the callback function of that work. 

(We could even requeue the work from within that callback function if we
wanted to keep the number of notifications the same. But then we would
need to queue the extreme_values in the framework. I think this
isn't needed.)

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10 13:39             ` [linux-pm] " James Bottomley
  2010-06-10 14:41               ` Florian Mickler
@ 2010-06-10 14:41               ` Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-10 14:41 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 10 Jun 2010 08:39:04 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Thu, 2010-06-10 at 09:45 +0200, Florian Mickler wrote:
> > On Wed, 09 Jun 2010 13:05:49 -0400
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > > On Wed, 2010-06-09 at 18:32 +0200, Florian Mickler wrote:
> > > > On Wed, 09 Jun 2010 12:07:25 -0400
> > > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > > > OK, so the expression of the race is that the latest notification gets
> > > > > lost.  If something is tracking values, you'd really like to lose the
> > > > > previous one (which is now irrelevant) not the latest one.  The point is
> > > > > there's still a race.
> > > > > 
> > > > > James
> > > > > 
> > > > 
> > > > Yeah, but for blocking notification it is not that bad. 
> > > 
> > > The network latency notifier uses the value to recalculate something.
> > > Losing the last value will mean it's using stale data.
> > 
> > Actually after pondering a bit, it is not stale data that gets
> > delivered: (Correct me if I'm wrong)
> > 
> > The update_notify() function determines the extreme value and then
> > calls the blocking_notifier_chain. 
> > 
> > But just before the update_notify() function get's called, the
> > work-structure is reset and re-queue-able. So it is possible to queue it
> > already even before the extreme_value in update_notify get's
> > determined. 
> > 
> > So the notified value is always the latest or there is another
> > notification underway.
> 
> Well, no ... it's a race, and like all good races the winner is non
> deterministic.

Can you point out where I'm wrong?

U1. update_request gets called
U2. 	new extreme value gets calculated under spinlock
U3.	notify gets queued if its WORK_PENDING_BIT is not set.

run_workqueue() does the following:
R1. clears the WORK_PENDING_BIT
R2. calls update_notify()
R3. 	reads the current extreme value
R4. 	notification gets called with that value		


If another update_request comes to schedule_work before
run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
requeued, but R3 isn't yet executed. So the notifiers will get the last
value.

If another update_request comes to schedule_work() after
run_workqueue() has cleared the WORK_PENDING_BIT, the work will be
requeued and another update_notify will be executed later.

> 
> If the update comes in before the work queue is run, then everyone
> eventually sees the new value.  If it comes in as the chain is running,
> some notifiers see the old value and some the new.  If it comes in
> during back end processing, no-one sees the new value.
> 
> James
> 

No, I think that is not correct. There will always be another
update_notify()-call after the last update_request-recalculation due to
the fact that run_workqueue() calls work_clear_pending() before calling
the callback function of that work. 

(We could even requeue the work from within that callback function if we
wanted to keep the number of notifications the same. But then we would
need to queue the extreme_values in the framework. I think this
isn't needed.)

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10 14:41               ` Florian Mickler
  2010-06-11 14:25                 ` James Bottomley
@ 2010-06-11 14:25                 ` James Bottomley
  2010-06-11 15:49                   ` Florian Mickler
                                     ` (6 more replies)
  1 sibling, 7 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-11 14:25 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 2010-06-10 at 16:41 +0200, Florian Mickler wrote:
> > > So the notified value is always the latest or there is another
> > > notification underway.
> > 
> > Well, no ... it's a race, and like all good races the winner is non
> > deterministic.
> 
> Can you point out where I'm wrong?
> 
> U1. update_request gets called
> U2. 	new extreme value gets calculated under spinlock
> U3.	notify gets queued if its WORK_PENDING_BIT is not set.
> 
> run_workqueue() does the following:
> R1. clears the WORK_PENDING_BIT
> R2. calls update_notify()
> R3. 	reads the current extreme value
> R4. 	notification gets called with that value		
> 
> 
> If another update_request comes to schedule_work before
> run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
> requeued, but R3 isn't yet executed. So the notifiers will get the last
> value.

So the race now only causes lost older notifications ... as long as the
consumers are OK with that (it is an API change) then this should work.
You're still not taking advantage of the user context passed in, though,
so this does needlessly delay notifications for that case.

Actually, pm_qos_remove now needs a flush_scheduled work since you don't
want to return until the list is clear (since the next action may be to
free the object).

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-10 14:41               ` Florian Mickler
@ 2010-06-11 14:25                 ` James Bottomley
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-11 14:25 UTC (permalink / raw)
  To: Florian Mickler
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Thu, 2010-06-10 at 16:41 +0200, Florian Mickler wrote:
> > > So the notified value is always the latest or there is another
> > > notification underway.
> > 
> > Well, no ... it's a race, and like all good races the winner is non
> > deterministic.
> 
> Can you point out where I'm wrong?
> 
> U1. update_request gets called
> U2. 	new extreme value gets calculated under spinlock
> U3.	notify gets queued if its WORK_PENDING_BIT is not set.
> 
> run_workqueue() does the following:
> R1. clears the WORK_PENDING_BIT
> R2. calls update_notify()
> R3. 	reads the current extreme value
> R4. 	notification gets called with that value		
> 
> 
> If another update_request comes to schedule_work before
> run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
> requeued, but R3 isn't yet executed. So the notifiers will get the last
> value.

So the race now only causes lost older notifications ... as long as the
consumers are OK with that (it is an API change) then this should work.
You're still not taking advantage of the user context passed in, though,
so this does needlessly delay notifications for that case.

Actually, pm_qos_remove now needs a flush_scheduled work since you don't
want to return until the list is clear (since the next action may be to
free the object).

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
  2010-06-11 15:49                   ` Florian Mickler
@ 2010-06-11 15:49                   ` Florian Mickler
  2010-06-14 14:33                   ` Florian Mickler
                                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-11 15:49 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Fri, 11 Jun 2010 09:25:52 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Thu, 2010-06-10 at 16:41 +0200, Florian Mickler wrote:
> > > > So the notified value is always the latest or there is another
> > > > notification underway.
> > > 
> > > Well, no ... it's a race, and like all good races the winner is non
> > > deterministic.
> > 
> > Can you point out where I'm wrong?
> > 
> > U1. update_request gets called
> > U2. 	new extreme value gets calculated under spinlock
> > U3.	notify gets queued if its WORK_PENDING_BIT is not set.
> > 
> > run_workqueue() does the following:
> > R1. clears the WORK_PENDING_BIT
> > R2. calls update_notify()
> > R3. 	reads the current extreme value
> > R4. 	notification gets called with that value		
> > 
> > 
> > If another update_request comes to schedule_work before
> > run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
> > requeued, but R3 isn't yet executed. So the notifiers will get the last
> > value.
> 
> So the race now only causes lost older notifications ... as long as the
> consumers are OK with that (it is an API change) then this should work.
> You're still not taking advantage of the user context passed in, though,
> so this does needlessly delay notifications for that case.

Right. We can use execute_in_process_context. 
 
> Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> want to return until the list is clear (since the next action may be to
> free the object).

Yes. Good point, will fix. 

> James
> 

Cheers,
Flo


^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
@ 2010-06-11 15:49                   ` Florian Mickler
  2010-06-11 15:49                   ` [linux-pm] " Florian Mickler
                                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-11 15:49 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Fri, 11 Jun 2010 09:25:52 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Thu, 2010-06-10 at 16:41 +0200, Florian Mickler wrote:
> > > > So the notified value is always the latest or there is another
> > > > notification underway.
> > > 
> > > Well, no ... it's a race, and like all good races the winner is non
> > > deterministic.
> > 
> > Can you point out where I'm wrong?
> > 
> > U1. update_request gets called
> > U2. 	new extreme value gets calculated under spinlock
> > U3.	notify gets queued if its WORK_PENDING_BIT is not set.
> > 
> > run_workqueue() does the following:
> > R1. clears the WORK_PENDING_BIT
> > R2. calls update_notify()
> > R3. 	reads the current extreme value
> > R4. 	notification gets called with that value		
> > 
> > 
> > If another update_request comes to schedule_work before
> > run_workqueue() has cleared the WORK_PENDING_BIT, the work will not be
> > requeued, but R3 isn't yet executed. So the notifiers will get the last
> > value.
> 
> So the race now only causes lost older notifications ... as long as the
> consumers are OK with that (it is an API change) then this should work.
> You're still not taking advantage of the user context passed in, though,
> so this does needlessly delay notifications for that case.

Right. We can use execute_in_process_context. 
 
> Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> want to return until the list is clear (since the next action may be to
> free the object).

Yes. Good point, will fix. 

> James
> 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
                                     ` (2 preceding siblings ...)
  2010-06-14 14:33                   ` Florian Mickler
@ 2010-06-14 14:33                   ` Florian Mickler
  2010-06-14 14:44                     ` James Bottomley
  2010-06-14 14:44                     ` [linux-pm] " James Bottomley
  2010-06-14 14:46                   ` [PATCH 1/3] " florian
                                     ` (2 subsequent siblings)
  6 siblings, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 14:33 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Fri, 11 Jun 2010 09:25:52 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> 
> Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> want to return until the list is clear (since the next action may be to
> free the object).

The work-items are allocated in the pm_qos objects (which get never
freed), so we should be fine there.

> 
> James
> 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
  2010-06-11 15:49                   ` Florian Mickler
  2010-06-11 15:49                   ` [linux-pm] " Florian Mickler
@ 2010-06-14 14:33                   ` Florian Mickler
  2010-06-14 14:33                   ` [linux-pm] " Florian Mickler
                                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 14:33 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Fri, 11 Jun 2010 09:25:52 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> 
> Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> want to return until the list is clear (since the next action may be to
> free the object).

The work-items are allocated in the pm_qos objects (which get never
freed), so we should be fine there.

> 
> James
> 

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:33                   ` [linux-pm] " Florian Mickler
  2010-06-14 14:44                     ` James Bottomley
@ 2010-06-14 14:44                     ` James Bottomley
  2010-06-14 14:49                       ` Florian Mickler
  2010-06-14 14:49                       ` [linux-pm] " Florian Mickler
  1 sibling, 2 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-14 14:44 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> On Fri, 11 Jun 2010 09:25:52 -0500
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > 
> > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > want to return until the list is clear (since the next action may be to
> > free the object).
> 
> The work-items are allocated in the pm_qos objects (which get never
> freed), so we should be fine there.

That's not a safe assumption.  Once we get into drivers, timers and cpu
ilde states, I can see these things being in modules.

Regardless, it's bad programming practise to be using something after
the final remove is called, it certainly violates the principle of least
surprise and would usually eventually cause problems.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:33                   ` [linux-pm] " Florian Mickler
@ 2010-06-14 14:44                     ` James Bottomley
  2010-06-14 14:44                     ` [linux-pm] " James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-14 14:44 UTC (permalink / raw)
  To: Florian Mickler
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> On Fri, 11 Jun 2010 09:25:52 -0500
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > 
> > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > want to return until the list is clear (since the next action may be to
> > free the object).
> 
> The work-items are allocated in the pm_qos objects (which get never
> freed), so we should be fine there.

That's not a safe assumption.  Once we get into drivers, timers and cpu
ilde states, I can see these things being in modules.

Regardless, it's bad programming practise to be using something after
the final remove is called, it certainly violates the principle of least
surprise and would usually eventually cause problems.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH 1/3] pm_qos: make update_request non blocking
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
                                     ` (3 preceding siblings ...)
  2010-06-14 14:33                   ` [linux-pm] " Florian Mickler
@ 2010-06-14 14:46                   ` florian
  2010-06-14 14:46                   ` [PATCH 2/3] pm_qos: add atomic notifier chain florian
  2010-06-14 14:46                   ` [PATCH 3/3] pm_qos: only schedule work when in interrupt context florian
  6 siblings, 0 replies; 40+ messages in thread
From: florian @ 2010-06-14 14:46 UTC (permalink / raw)
  To: James.Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, Florian Mickler, pm list, Thomas Gleixner

In order to allow drivers to use pm_qos_update_request from interrupt
context we call the notifiers via schedule_work().

Signed-off-by: Florian Mickler <florian@mickler.org>
---

No difference to "[PATCH v4] pm_qos: make update_request non blocking".

In the 3rd patch of this series we change the logic to use in_interrupt() to determine if it is 
necessary to schedule a workitem or if we can just execute the notifiers right away. 

As the pm_qos_objects are never destroyed, a flush_scheduled_work() is not needed.


kernel/pm_qos_params.c |   47 ++++++++++++++++++++++++++++++-----------------
 1 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index f42d3f7..296343a 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -40,6 +40,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
+#include <linux/workqueue.h>
 
 #include <linux/uaccess.h>
 
@@ -60,11 +61,13 @@ struct pm_qos_request_list {
 
 static s32 max_compare(s32 v1, s32 v2);
 static s32 min_compare(s32 v1, s32 v2);
+static void update_notify(struct work_struct *work);
 
 struct pm_qos_object {
 	struct pm_qos_request_list requests;
 	struct blocking_notifier_head *notifiers;
 	struct miscdevice pm_qos_power_miscdev;
+	struct work_struct notify;
 	char *name;
 	s32 default_value;
 	atomic_t target_value;
@@ -72,10 +75,12 @@ struct pm_qos_object {
 };
 
 static struct pm_qos_object null_pm_qos;
+
 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
 static struct pm_qos_object cpu_dma_pm_qos = {
 	.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
 	.notifiers = &cpu_dma_lat_notifier,
+	.notify = __WORK_INITIALIZER(cpu_dma_pm_qos.notify, update_notify),
 	.name = "cpu_dma_latency",
 	.default_value = 2000 * USEC_PER_SEC,
 	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
@@ -86,6 +91,7 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
 static struct pm_qos_object network_lat_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
 	.notifiers = &network_lat_notifier,
+	.notify = __WORK_INITIALIZER(network_lat_pm_qos.notify, update_notify),
 	.name = "network_latency",
 	.default_value = 2000 * USEC_PER_SEC,
 	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
@@ -97,13 +103,14 @@ static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
 static struct pm_qos_object network_throughput_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
 	.notifiers = &network_throughput_notifier,
+	.notify = __WORK_INITIALIZER(network_throughput_pm_qos.notify,
+			update_notify),
 	.name = "network_throughput",
 	.default_value = 0,
 	.target_value = ATOMIC_INIT(0),
 	.comparitor = max_compare
 };
 
-
 static struct pm_qos_object *pm_qos_array[] = {
 	&null_pm_qos,
 	&cpu_dma_pm_qos,
@@ -135,35 +142,40 @@ static s32 min_compare(s32 v1, s32 v2)
 	return min(v1, v2);
 }
 
+static void update_notify(struct work_struct *work)
+{
+	struct pm_qos_object *obj =
+		container_of(work, struct pm_qos_object, notify);
+
+	s32 extreme_value = atomic_read(&obj->target_value);
+	blocking_notifier_call_chain(
+		obj->notifiers,
+			(unsigned long) extreme_value, NULL);
+}
 
 static void update_target(int pm_qos_class)
 {
 	s32 extreme_value;
 	struct pm_qos_request_list *node;
+	struct pm_qos_object *obj = pm_qos_array[pm_qos_class];
 	unsigned long flags;
 	int call_notifier = 0;
 
 	spin_lock_irqsave(&pm_qos_lock, flags);
-	extreme_value = pm_qos_array[pm_qos_class]->default_value;
-	list_for_each_entry(node,
-			&pm_qos_array[pm_qos_class]->requests.list, list) {
-		extreme_value = pm_qos_array[pm_qos_class]->comparitor(
-				extreme_value, node->value);
-	}
-	if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
-			extreme_value) {
+	extreme_value = obj->default_value;
+	list_for_each_entry(node, &obj->requests.list, list)
+		extreme_value = obj->comparitor(extreme_value, node->value);
+
+	if (atomic_read(&obj->target_value) != extreme_value) {
 		call_notifier = 1;
-		atomic_set(&pm_qos_array[pm_qos_class]->target_value,
-				extreme_value);
+		atomic_set(&obj->target_value, extreme_value);
 		pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
-			atomic_read(&pm_qos_array[pm_qos_class]->target_value));
+			atomic_read(&obj->target_value));
 	}
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
 	if (call_notifier)
-		blocking_notifier_call_chain(
-				pm_qos_array[pm_qos_class]->notifiers,
-					(unsigned long) extreme_value, NULL);
+		schedule_work(&obj->notify);
 }
 
 static int register_pm_qos_misc(struct pm_qos_object *qos)
@@ -301,7 +313,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
  * @pm_qos_class: identifies which qos target changes should be notified.
  * @notifier: notifier block managed by caller.
  *
- * will register the notifier into a notification chain that gets called
+ * Will register the notifier into a notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
@@ -320,7 +332,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
  * @pm_qos_class: identifies which qos target changes are notified.
  * @notifier: notifier block to be removed.
  *
- * will remove the notifier from the notification chain that gets called
+ * Will remove the notifier from the notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
@@ -392,6 +404,7 @@ static int __init pm_qos_power_init(void)
 {
 	int ret = 0;
 
+
 	ret = register_pm_qos_misc(&cpu_dma_pm_qos);
 	if (ret < 0) {
 		printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 2/3] pm_qos: add atomic notifier chain
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
                                     ` (4 preceding siblings ...)
  2010-06-14 14:46                   ` [PATCH 1/3] " florian
@ 2010-06-14 14:46                   ` florian
  2010-06-14 14:46                   ` [PATCH 3/3] pm_qos: only schedule work when in interrupt context florian
  6 siblings, 0 replies; 40+ messages in thread
From: florian @ 2010-06-14 14:46 UTC (permalink / raw)
  To: James.Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, Florian Mickler, pm list, Thomas Gleixner

This allows for atomic notifications.
This may be necessary to have in some future scenarios,
as it eliminates any scheduling between update_request and doing the
notifier_callback.
---

I took the naming of the interface functions James Bottomly suggested in an earlier patch.

 include/linux/pm_qos_params.h |    1 +
 kernel/pm_qos_params.c        |   76 ++++++++++++++++++++++++++++++++---------
 2 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index 8ba440e..0f41378 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -23,5 +23,6 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
 
 int pm_qos_request(int pm_qos_class);
 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_add_atomic_notifier(int pm_qos_class, struct notifier_block *notifier);
 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
 
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 296343a..9346906 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -65,7 +65,8 @@ static void update_notify(struct work_struct *work);
 
 struct pm_qos_object {
 	struct pm_qos_request_list requests;
-	struct blocking_notifier_head *notifiers;
+	struct blocking_notifier_head *blocking_notifiers;
+	struct atomic_notifier_head *atomic_notifiers;
 	struct miscdevice pm_qos_power_miscdev;
 	struct work_struct notify;
 	char *name;
@@ -76,21 +77,25 @@ struct pm_qos_object {
 
 static struct pm_qos_object null_pm_qos;
 
-static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier_blocking);
+static ATOMIC_NOTIFIER_HEAD(cpu_dma_lat_notifier_atomic);
 static struct pm_qos_object cpu_dma_pm_qos = {
 	.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
-	.notifiers = &cpu_dma_lat_notifier,
+	.blocking_notifiers = &cpu_dma_lat_notifier_blocking,
+	.atomic_notifiers = &cpu_dma_lat_notifier_atomic,
 	.notify = __WORK_INITIALIZER(cpu_dma_pm_qos.notify, update_notify),
 	.name = "cpu_dma_latency",
 	.default_value = 2000 * USEC_PER_SEC,
 	.target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
-	.comparitor = min_compare
+	.comparitor = min_compare,
 };
 
-static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier_blocking);
+static ATOMIC_NOTIFIER_HEAD(network_lat_notifier_atomic);
 static struct pm_qos_object network_lat_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
-	.notifiers = &network_lat_notifier,
+	.blocking_notifiers = &network_lat_notifier_blocking,
+	.atomic_notifiers = &network_lat_notifier_atomic,
 	.notify = __WORK_INITIALIZER(network_lat_pm_qos.notify, update_notify),
 	.name = "network_latency",
 	.default_value = 2000 * USEC_PER_SEC,
@@ -99,10 +104,12 @@ static struct pm_qos_object network_lat_pm_qos = {
 };
 
 
-static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier_blocking);
+static ATOMIC_NOTIFIER_HEAD(network_throughput_notifier_atomic);
 static struct pm_qos_object network_throughput_pm_qos = {
 	.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
-	.notifiers = &network_throughput_notifier,
+	.blocking_notifiers = &network_throughput_notifier_blocking,
+	.atomic_notifiers = &network_throughput_notifier_atomic,
 	.notify = __WORK_INITIALIZER(network_throughput_pm_qos.notify,
 			update_notify),
 	.name = "network_throughput",
@@ -142,6 +149,16 @@ static s32 min_compare(s32 v1, s32 v2)
 	return min(v1, v2);
 }
 
+static void pm_qos_call_notifiers(struct pm_qos_object *o,
+				  unsigned long curr_value)
+{
+	schedule_work(&o->notify);
+
+	if (o->atomic_notifiers)
+		atomic_notifier_call_chain(o->atomic_notifiers,
+					   curr_value, NULL);
+}
+
 static void update_notify(struct work_struct *work)
 {
 	struct pm_qos_object *obj =
@@ -149,7 +166,7 @@ static void update_notify(struct work_struct *work)
 
 	s32 extreme_value = atomic_read(&obj->target_value);
 	blocking_notifier_call_chain(
-		obj->notifiers,
+		obj->blocking_notifiers,
 			(unsigned long) extreme_value, NULL);
 }
 
@@ -175,7 +192,7 @@ static void update_target(int pm_qos_class)
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
 	if (call_notifier)
-		schedule_work(&obj->notify);
+		pm_qos_call_notifiers(obj, extreme_value);
 }
 
 static int register_pm_qos_misc(struct pm_qos_object *qos)
@@ -310,24 +327,48 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
 
 /**
  * pm_qos_add_notifier - sets notification entry for changes to target value
- * @pm_qos_class: identifies which qos target changes should be notified.
+ * @pm_qos_class: identifies which qos target changes should trigger
+ *   	notifications.
  * @notifier: notifier block managed by caller.
  *
  * Will register the notifier into a notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *nb)
 {
 	int retval;
 
 	retval = blocking_notifier_chain_register(
-			pm_qos_array[pm_qos_class]->notifiers, notifier);
+			pm_qos_array[pm_qos_class]->blocking_notifiers, nb);
 
 	return retval;
 }
 EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
 
 /**
+ * pm_qos_add_atomic_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should trigger
+ * 	notifications.
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets
+ * called upon changes to the pm_qos_class target value.  The notifier
+ * may be called from atomic context.  use @pm_qos_remove_notifier to
+ * unregister.
+ *
+ */
+int pm_qos_add_atomic_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+	/* guard against programming error */
+	BUG_ON(!pm_qos_array[pm_qos_class]->atomic_notifiers);
+
+	return atomic_notifier_chain_register(
+		pm_qos_array[pm_qos_class]->atomic_notifiers,
+		notifier);
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_atomic_notifier);
+
+/**
  * pm_qos_remove_notifier - deletes notification entry from chain.
  * @pm_qos_class: identifies which qos target changes are notified.
  * @notifier: notifier block to be removed.
@@ -335,13 +376,16 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
  * Will remove the notifier from the notification chain that gets called
  * upon changes to the pm_qos_class target value.
  */
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *nb)
 {
 	int retval;
 
 	retval = blocking_notifier_chain_unregister(
-			pm_qos_array[pm_qos_class]->notifiers, notifier);
-
+			pm_qos_array[pm_qos_class]->blocking_notifiers, nb);
+	if (retval) {
+		retval = atomic_notifier_chain_unregister(
+			pm_qos_array[pm_qos_class]->atomic_notifiers, nb);
+	}
 	return retval;
 }
 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 40+ messages in thread

* [PATCH 3/3] pm_qos: only schedule work when in interrupt context
  2010-06-11 14:25                 ` [linux-pm] " James Bottomley
                                     ` (5 preceding siblings ...)
  2010-06-14 14:46                   ` [PATCH 2/3] pm_qos: add atomic notifier chain florian
@ 2010-06-14 14:46                   ` florian
  2010-06-15 17:23                     ` Florian Mickler
  2010-06-15 17:23                     ` Florian Mickler
  6 siblings, 2 replies; 40+ messages in thread
From: florian @ 2010-06-14 14:46 UTC (permalink / raw)
  To: James.Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, Florian Mickler, pm list, Thomas Gleixner

With this patch we only schedule the work when in interrupt context.

Before update_request was callable from interrupt-context there was a
1:1 relation between a change in the request-value and a notification.
This patch restores that behaviour for all constraints that have update_request
never called from interrupt context.

The notifier mutex serializes calls to blocking_notifier_call_chain, so
that we are serialized against any pending or currently executing notification.

Signed-off-by: Florian Mickler <florian@mickler.org>
---
 kernel/pm_qos_params.c |   10 +++++++---
 1 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 9346906..c06cae9 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -152,11 +152,15 @@ static s32 min_compare(s32 v1, s32 v2)
 static void pm_qos_call_notifiers(struct pm_qos_object *o,
 				  unsigned long curr_value)
 {
-	schedule_work(&o->notify);
-
 	if (o->atomic_notifiers)
 		atomic_notifier_call_chain(o->atomic_notifiers,
-					   curr_value, NULL);
+				(unsigned long) curr_value, NULL);
+
+	if (in_interrupt()) 
+		schedule_work(&o->notify);
+	else 
+		blocking_notifier_call_chain(o->blocking_notifiers, 
+				(unsigned long) curr_value, NULL);
 }
 
 static void update_notify(struct work_struct *work)
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:44                     ` [linux-pm] " James Bottomley
  2010-06-14 14:49                       ` Florian Mickler
@ 2010-06-14 14:49                       ` Florian Mickler
  2010-06-14 15:10                         ` James Bottomley
  2010-06-14 15:10                         ` James Bottomley
  1 sibling, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 14:49 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 14 Jun 2010 09:44:06 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> > On Fri, 11 Jun 2010 09:25:52 -0500
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > 
> > > 
> > > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > > want to return until the list is clear (since the next action may be to
> > > free the object).
> > 
> > The work-items are allocated in the pm_qos objects (which get never
> > freed), so we should be fine there.
> 
> That's not a safe assumption.  Once we get into drivers, timers and cpu
> ilde states, I can see these things being in modules.
> 
> Regardless, it's bad programming practise to be using something after
> the final remove is called, it certainly violates the principle of least
> surprise and would usually eventually cause problems.
> 
> James
> 

I absolutely defer to you in this question. But there is no
pm_qos_remove at the moment, as far as I see? Should I add one? When
and how would it be called?

Maybe I'm not understanding you right at the moment. 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:44                     ` [linux-pm] " James Bottomley
@ 2010-06-14 14:49                       ` Florian Mickler
  2010-06-14 14:49                       ` [linux-pm] " Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 14:49 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 14 Jun 2010 09:44:06 -0500
James Bottomley <James.Bottomley@suse.de> wrote:

> On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> > On Fri, 11 Jun 2010 09:25:52 -0500
> > James Bottomley <James.Bottomley@suse.de> wrote:
> > 
> > > 
> > > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > > want to return until the list is clear (since the next action may be to
> > > free the object).
> > 
> > The work-items are allocated in the pm_qos objects (which get never
> > freed), so we should be fine there.
> 
> That's not a safe assumption.  Once we get into drivers, timers and cpu
> ilde states, I can see these things being in modules.
> 
> Regardless, it's bad programming practise to be using something after
> the final remove is called, it certainly violates the principle of least
> surprise and would usually eventually cause problems.
> 
> James
> 

I absolutely defer to you in this question. But there is no
pm_qos_remove at the moment, as far as I see? Should I add one? When
and how would it be called?

Maybe I'm not understanding you right at the moment. 

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:49                       ` [linux-pm] " Florian Mickler
@ 2010-06-14 15:10                         ` James Bottomley
  2010-06-14 15:20                           ` Florian Mickler
  2010-06-14 15:20                           ` [linux-pm] " Florian Mickler
  2010-06-14 15:10                         ` James Bottomley
  1 sibling, 2 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-14 15:10 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 2010-06-14 at 16:49 +0200, Florian Mickler wrote:
> On Mon, 14 Jun 2010 09:44:06 -0500
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> > > On Fri, 11 Jun 2010 09:25:52 -0500
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > 
> > > > 
> > > > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > > > want to return until the list is clear (since the next action may be to
> > > > free the object).
> > > 
> > > The work-items are allocated in the pm_qos objects (which get never
> > > freed), so we should be fine there.
> > 
> > That's not a safe assumption.  Once we get into drivers, timers and cpu
> > ilde states, I can see these things being in modules.
> > 
> > Regardless, it's bad programming practise to be using something after
> > the final remove is called, it certainly violates the principle of least
> > surprise and would usually eventually cause problems.
> > 
> > James
> > 
> 
> I absolutely defer to you in this question. But there is no
> pm_qos_remove at the moment, as far as I see? Should I add one? When
> and how would it be called?
> 
> Maybe I'm not understanding you right at the moment. 

You need to flush in pm_qos_remove_notifier() to make sure you aren't
racing deferred work in the removed notifier with a return from remove,
so that the outcome is always consistent (i.e. the notifier sees all
pending events before removal).  On closer inspection, it looks like the
notifier mutexes are sufficient to make sure it doesn't return running
the notifier to be removed ... but that does mean that add and remove
require user context.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 14:49                       ` [linux-pm] " Florian Mickler
  2010-06-14 15:10                         ` James Bottomley
@ 2010-06-14 15:10                         ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-14 15:10 UTC (permalink / raw)
  To: Florian Mickler
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 2010-06-14 at 16:49 +0200, Florian Mickler wrote:
> On Mon, 14 Jun 2010 09:44:06 -0500
> James Bottomley <James.Bottomley@suse.de> wrote:
> 
> > On Mon, 2010-06-14 at 16:33 +0200, Florian Mickler wrote:
> > > On Fri, 11 Jun 2010 09:25:52 -0500
> > > James Bottomley <James.Bottomley@suse.de> wrote:
> > > 
> > > > 
> > > > Actually, pm_qos_remove now needs a flush_scheduled work since you don't
> > > > want to return until the list is clear (since the next action may be to
> > > > free the object).
> > > 
> > > The work-items are allocated in the pm_qos objects (which get never
> > > freed), so we should be fine there.
> > 
> > That's not a safe assumption.  Once we get into drivers, timers and cpu
> > ilde states, I can see these things being in modules.
> > 
> > Regardless, it's bad programming practise to be using something after
> > the final remove is called, it certainly violates the principle of least
> > surprise and would usually eventually cause problems.
> > 
> > James
> > 
> 
> I absolutely defer to you in this question. But there is no
> pm_qos_remove at the moment, as far as I see? Should I add one? When
> and how would it be called?
> 
> Maybe I'm not understanding you right at the moment. 

You need to flush in pm_qos_remove_notifier() to make sure you aren't
racing deferred work in the removed notifier with a return from remove,
so that the outcome is always consistent (i.e. the notifier sees all
pending events before removal).  On closer inspection, it looks like the
notifier mutexes are sufficient to make sure it doesn't return running
the notifier to be removed ... but that does mean that add and remove
require user context.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [linux-pm] [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 15:10                         ` James Bottomley
  2010-06-14 15:20                           ` Florian Mickler
@ 2010-06-14 15:20                           ` Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 15:20 UTC (permalink / raw)
  To: James Bottomley
  Cc: Jonathan Corbet, Frederic Weisbecker, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 14 Jun 2010 10:10:06 -0500
James Bottomley <James.Bottomley@suse.de> wrote:
 
> On closer inspection, it looks like the
> notifier mutexes are sufficient to make sure it doesn't return running
> the notifier to be removed ... but that does mean that add and remove
> require user context.
> 
> James
> 

Yes. I don't think we need to make the add and remove callable from
interrupt context?

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v4] pm_qos: make update_request non blocking
  2010-06-14 15:10                         ` James Bottomley
@ 2010-06-14 15:20                           ` Florian Mickler
  2010-06-14 15:20                           ` [linux-pm] " Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-14 15:20 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Mon, 14 Jun 2010 10:10:06 -0500
James Bottomley <James.Bottomley@suse.de> wrote:
 
> On closer inspection, it looks like the
> notifier mutexes are sufficient to make sure it doesn't return running
> the notifier to be removed ... but that does mean that add and remove
> require user context.
> 
> James
> 

Yes. I don't think we need to make the add and remove callable from
interrupt context?

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 3/3] pm_qos: only schedule work when in interrupt context
  2010-06-14 14:46                   ` [PATCH 3/3] pm_qos: only schedule work when in interrupt context florian
  2010-06-15 17:23                     ` Florian Mickler
@ 2010-06-15 17:23                     ` Florian Mickler
  2010-06-17 23:02                       ` James Bottomley
  2010-06-17 23:02                       ` James Bottomley
  1 sibling, 2 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-15 17:23 UTC (permalink / raw)
  To: James Bottomley
  Cc: Frederic Weisbecker, Jonathan Corbet, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

Hi James!

On Mon, 14 Jun 2010 16:46:40 +0200
florian@mickler.org wrote:

> With this patch we only schedule the work when in interrupt context.
> 
> Before update_request was callable from interrupt-context there was a
> 1:1 relation between a change in the request-value and a notification.
> This patch restores that behaviour for all constraints that have update_request
> never called from interrupt context.
> 
> The notifier mutex serializes calls to blocking_notifier_call_chain, so
> that we are serialized against any pending or currently executing notification.
> 
> Signed-off-by: Florian Mickler <florian@mickler.org>
> ---
>  kernel/pm_qos_params.c |   10 +++++++---
>  1 files changed, 7 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index 9346906..c06cae9 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -152,11 +152,15 @@ static s32 min_compare(s32 v1, s32 v2)
>  static void pm_qos_call_notifiers(struct pm_qos_object *o,
>  				  unsigned long curr_value)
>  {
> -	schedule_work(&o->notify);
> -
>  	if (o->atomic_notifiers)
>  		atomic_notifier_call_chain(o->atomic_notifiers,
> -					   curr_value, NULL);
> +				(unsigned long) curr_value, NULL);
> +
> +	if (in_interrupt()) 
> +		schedule_work(&o->notify);
> +	else 
> +		blocking_notifier_call_chain(o->blocking_notifiers, 
> +				(unsigned long) curr_value, NULL);
>  }
>  
>  static void update_notify(struct work_struct *work)

What about this? Is this ok? I don't know if it is benign to use
in_interrupt() here. I took this idea from the
execute_in_process_context() implementation. 


If this is ok, should I rebase them on your two pm_qos patches (plists
and the kzalloc removal)? 

Did you already thought about some debugging stuff that would suffice
the android needs? I kind of thought about either registerieng some
notifier callback or using the perf/tracing infrastructure. 
But I have not looked into it yet.

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 3/3] pm_qos: only schedule work when in interrupt context
  2010-06-14 14:46                   ` [PATCH 3/3] pm_qos: only schedule work when in interrupt context florian
@ 2010-06-15 17:23                     ` Florian Mickler
  2010-06-15 17:23                     ` Florian Mickler
  1 sibling, 0 replies; 40+ messages in thread
From: Florian Mickler @ 2010-06-15 17:23 UTC (permalink / raw)
  To: James Bottomley
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

Hi James!

On Mon, 14 Jun 2010 16:46:40 +0200
florian@mickler.org wrote:

> With this patch we only schedule the work when in interrupt context.
> 
> Before update_request was callable from interrupt-context there was a
> 1:1 relation between a change in the request-value and a notification.
> This patch restores that behaviour for all constraints that have update_request
> never called from interrupt context.
> 
> The notifier mutex serializes calls to blocking_notifier_call_chain, so
> that we are serialized against any pending or currently executing notification.
> 
> Signed-off-by: Florian Mickler <florian@mickler.org>
> ---
>  kernel/pm_qos_params.c |   10 +++++++---
>  1 files changed, 7 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index 9346906..c06cae9 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -152,11 +152,15 @@ static s32 min_compare(s32 v1, s32 v2)
>  static void pm_qos_call_notifiers(struct pm_qos_object *o,
>  				  unsigned long curr_value)
>  {
> -	schedule_work(&o->notify);
> -
>  	if (o->atomic_notifiers)
>  		atomic_notifier_call_chain(o->atomic_notifiers,
> -					   curr_value, NULL);
> +				(unsigned long) curr_value, NULL);
> +
> +	if (in_interrupt()) 
> +		schedule_work(&o->notify);
> +	else 
> +		blocking_notifier_call_chain(o->blocking_notifiers, 
> +				(unsigned long) curr_value, NULL);
>  }
>  
>  static void update_notify(struct work_struct *work)

What about this? Is this ok? I don't know if it is benign to use
in_interrupt() here. I took this idea from the
execute_in_process_context() implementation. 


If this is ok, should I rebase them on your two pm_qos patches (plists
and the kzalloc removal)? 

Did you already thought about some debugging stuff that would suffice
the android needs? I kind of thought about either registerieng some
notifier callback or using the perf/tracing infrastructure. 
But I have not looked into it yet.

Cheers,
Flo

^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 3/3] pm_qos: only schedule work when in interrupt context
  2010-06-15 17:23                     ` Florian Mickler
@ 2010-06-17 23:02                       ` James Bottomley
  2010-06-17 23:02                       ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-17 23:02 UTC (permalink / raw)
  To: Florian Mickler
  Cc: Frederic Weisbecker, Jonathan Corbet, markgross, linville,
	linux-kernel, pm list, Thomas Gleixner

On Tue, 2010-06-15 at 19:23 +0200, Florian Mickler wrote:
> Hi James!
> 
> On Mon, 14 Jun 2010 16:46:40 +0200
> florian@mickler.org wrote:
> 
> > With this patch we only schedule the work when in interrupt context.
> > 
> > Before update_request was callable from interrupt-context there was a
> > 1:1 relation between a change in the request-value and a notification.
> > This patch restores that behaviour for all constraints that have update_request
> > never called from interrupt context.
> > 
> > The notifier mutex serializes calls to blocking_notifier_call_chain, so
> > that we are serialized against any pending or currently executing notification.
> > 
> > Signed-off-by: Florian Mickler <florian@mickler.org>
> > ---
> >  kernel/pm_qos_params.c |   10 +++++++---
> >  1 files changed, 7 insertions(+), 3 deletions(-)
> > 
> > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > index 9346906..c06cae9 100644
> > --- a/kernel/pm_qos_params.c
> > +++ b/kernel/pm_qos_params.c
> > @@ -152,11 +152,15 @@ static s32 min_compare(s32 v1, s32 v2)
> >  static void pm_qos_call_notifiers(struct pm_qos_object *o,
> >  				  unsigned long curr_value)
> >  {
> > -	schedule_work(&o->notify);
> > -
> >  	if (o->atomic_notifiers)
> >  		atomic_notifier_call_chain(o->atomic_notifiers,
> > -					   curr_value, NULL);
> > +				(unsigned long) curr_value, NULL);
> > +
> > +	if (in_interrupt()) 
> > +		schedule_work(&o->notify);
> > +	else 
> > +		blocking_notifier_call_chain(o->blocking_notifiers, 
> > +				(unsigned long) curr_value, NULL);
> >  }
> >  
> >  static void update_notify(struct work_struct *work)
> 
> What about this? Is this ok? I don't know if it is benign to use
> in_interrupt() here. I took this idea from the
> execute_in_process_context() implementation. 

I think it will work ... but I still think it's over complex given the
listed requirements (android seems to only want atomic notifiers from
atomic contexts).

> If this is ok, should I rebase them on your two pm_qos patches (plists
> and the kzalloc removal)? 

Well, I would say yes.  However, for more impartial advice, I'd wait and
see what the pm maintainers want.

> Did you already thought about some debugging stuff that would suffice
> the android needs? I kind of thought about either registerieng some
> notifier callback or using the perf/tracing infrastructure. 
> But I have not looked into it yet.

I was just going to try the conversion when the wakelocks stuff was
finally in and see if it worked in an android kernel.

James



^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH 3/3] pm_qos: only schedule work when in interrupt context
  2010-06-15 17:23                     ` Florian Mickler
  2010-06-17 23:02                       ` James Bottomley
@ 2010-06-17 23:02                       ` James Bottomley
  1 sibling, 0 replies; 40+ messages in thread
From: James Bottomley @ 2010-06-17 23:02 UTC (permalink / raw)
  To: Florian Mickler
  Cc: markgross, Frederic Weisbecker, Jonathan Corbet, linville,
	linux-kernel, pm list, Thomas Gleixner

On Tue, 2010-06-15 at 19:23 +0200, Florian Mickler wrote:
> Hi James!
> 
> On Mon, 14 Jun 2010 16:46:40 +0200
> florian@mickler.org wrote:
> 
> > With this patch we only schedule the work when in interrupt context.
> > 
> > Before update_request was callable from interrupt-context there was a
> > 1:1 relation between a change in the request-value and a notification.
> > This patch restores that behaviour for all constraints that have update_request
> > never called from interrupt context.
> > 
> > The notifier mutex serializes calls to blocking_notifier_call_chain, so
> > that we are serialized against any pending or currently executing notification.
> > 
> > Signed-off-by: Florian Mickler <florian@mickler.org>
> > ---
> >  kernel/pm_qos_params.c |   10 +++++++---
> >  1 files changed, 7 insertions(+), 3 deletions(-)
> > 
> > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > index 9346906..c06cae9 100644
> > --- a/kernel/pm_qos_params.c
> > +++ b/kernel/pm_qos_params.c
> > @@ -152,11 +152,15 @@ static s32 min_compare(s32 v1, s32 v2)
> >  static void pm_qos_call_notifiers(struct pm_qos_object *o,
> >  				  unsigned long curr_value)
> >  {
> > -	schedule_work(&o->notify);
> > -
> >  	if (o->atomic_notifiers)
> >  		atomic_notifier_call_chain(o->atomic_notifiers,
> > -					   curr_value, NULL);
> > +				(unsigned long) curr_value, NULL);
> > +
> > +	if (in_interrupt()) 
> > +		schedule_work(&o->notify);
> > +	else 
> > +		blocking_notifier_call_chain(o->blocking_notifiers, 
> > +				(unsigned long) curr_value, NULL);
> >  }
> >  
> >  static void update_notify(struct work_struct *work)
> 
> What about this? Is this ok? I don't know if it is benign to use
> in_interrupt() here. I took this idea from the
> execute_in_process_context() implementation. 

I think it will work ... but I still think it's over complex given the
listed requirements (android seems to only want atomic notifiers from
atomic contexts).

> If this is ok, should I rebase them on your two pm_qos patches (plists
> and the kzalloc removal)? 

Well, I would say yes.  However, for more impartial advice, I'd wait and
see what the pm maintainers want.

> Did you already thought about some debugging stuff that would suffice
> the android needs? I kind of thought about either registerieng some
> notifier callback or using the perf/tracing infrastructure. 
> But I have not looked into it yet.

I was just going to try the conversion when the wakelocks stuff was
finally in and see if it worked in an android kernel.

James

^ permalink raw reply	[flat|nested] 40+ messages in thread

end of thread, other threads:[~2010-06-17 23:02 UTC | newest]

Thread overview: 40+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-06-09 15:29 [PATCH v4] pm_qos: make update_request non blocking florian
2010-06-09 15:37 ` James Bottomley
2010-06-09 15:37 ` James Bottomley
2010-06-09 16:00   ` Florian Mickler
2010-06-09 16:00   ` Florian Mickler
2010-06-09 16:07     ` James Bottomley
2010-06-09 16:07     ` James Bottomley
2010-06-09 16:32       ` Florian Mickler
2010-06-09 17:05         ` James Bottomley
2010-06-09 17:05         ` James Bottomley
2010-06-09 17:31           ` Florian Mickler
2010-06-09 17:31           ` Florian Mickler
2010-06-10  7:45           ` Florian Mickler
2010-06-10 13:39             ` James Bottomley
2010-06-10 13:39             ` [linux-pm] " James Bottomley
2010-06-10 14:41               ` Florian Mickler
2010-06-11 14:25                 ` James Bottomley
2010-06-11 14:25                 ` [linux-pm] " James Bottomley
2010-06-11 15:49                   ` Florian Mickler
2010-06-11 15:49                   ` [linux-pm] " Florian Mickler
2010-06-14 14:33                   ` Florian Mickler
2010-06-14 14:33                   ` [linux-pm] " Florian Mickler
2010-06-14 14:44                     ` James Bottomley
2010-06-14 14:44                     ` [linux-pm] " James Bottomley
2010-06-14 14:49                       ` Florian Mickler
2010-06-14 14:49                       ` [linux-pm] " Florian Mickler
2010-06-14 15:10                         ` James Bottomley
2010-06-14 15:20                           ` Florian Mickler
2010-06-14 15:20                           ` [linux-pm] " Florian Mickler
2010-06-14 15:10                         ` James Bottomley
2010-06-14 14:46                   ` [PATCH 1/3] " florian
2010-06-14 14:46                   ` [PATCH 2/3] pm_qos: add atomic notifier chain florian
2010-06-14 14:46                   ` [PATCH 3/3] pm_qos: only schedule work when in interrupt context florian
2010-06-15 17:23                     ` Florian Mickler
2010-06-15 17:23                     ` Florian Mickler
2010-06-17 23:02                       ` James Bottomley
2010-06-17 23:02                       ` James Bottomley
2010-06-10 14:41               ` [PATCH v4] pm_qos: make update_request non blocking Florian Mickler
2010-06-10  7:45           ` Florian Mickler
2010-06-09 16:32       ` Florian Mickler

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.