All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10  2:52 ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2021-12-10  2:52 UTC (permalink / raw)
  To: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton
  Cc: linux-kernel, cgroups, linux-mm, Sebastian Andrzej Siewior,
	Thomas Gleixner, Waiman Long

Direct calls to local_irq_{save/restore}() and preempt_{enable/disable}()
are not appropriate for PREEMPT_RT. To provide better PREEMPT_RT support,
change local_irq_{save/restore}() to local_lock_irq{save/restore}() and
add a local_lock_t to struct memcg_stock_pcp.

Also disable the task and interrupt context optimization for obj_stock as
there will be no performance gain in the case of PREEMPT_RT. In this case,
task obj_stock will be there but remain unused.

Note that preempt_enable() and preempt_disable() in get_obj_stock() and
put_obj_stock() are not replaced by local_lock() and local_unlock() as it
is possible that a task accessing task_obj may get interrupted and then
access irq_obj concurrently. So using local_lock for task_obj access
may cause lockdep splat.

Signed-off-by: Waiman Long <longman@redhat.com>
---
 mm/memcontrol.c | 41 ++++++++++++++++++++++-------------------
 1 file changed, 22 insertions(+), 19 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a09a7d2e0b1b..8bed8e2993e4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2097,6 +2097,7 @@ struct obj_stock {
 };
 
 struct memcg_stock_pcp {
+	local_lock_t lock;
 	struct mem_cgroup *cached; /* this never be root cgroup */
 	unsigned int nr_pages;
 	struct obj_stock task_obj;
@@ -2145,7 +2146,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (nr_pages > MEMCG_CHARGE_BATCH)
 		return ret;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2153,7 +2154,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 		ret = true;
 	}
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 
 	return ret;
 }
@@ -2189,7 +2190,7 @@ static void drain_local_stock(struct work_struct *dummy)
 	 * drain_stock races is that we always operate on local CPU stock
 	 * here with IRQ disabled
 	 */
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	drain_obj_stock(&stock->irq_obj);
@@ -2198,7 +2199,7 @@ static void drain_local_stock(struct work_struct *dummy)
 	drain_stock(stock);
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (stock->cached != memcg) { /* reset if necessary */
@@ -2223,7 +2224,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
 		drain_stock(stock);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
  * which is cheap in non-preempt kernel. The interrupt context object stock
  * can only be accessed after disabling interrupt. User context code can
  * access interrupt object stock, but not vice versa.
+ *
+ * This task and interrupt context optimization is disabled for PREEMPT_RT
+ * as there is no performance gain in this case.
  */
 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
 {
-	struct memcg_stock_pcp *stock;
-
-	if (likely(in_task())) {
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
 		*pflags = 0UL;
 		preempt_disable();
-		stock = this_cpu_ptr(&memcg_stock);
-		return &stock->task_obj;
+		return this_cpu_ptr(&memcg_stock.task_obj);
 	}
 
-	local_irq_save(*pflags);
-	stock = this_cpu_ptr(&memcg_stock);
-	return &stock->irq_obj;
+	local_lock_irqsave(&memcg_stock.lock, *pflags);
+	return this_cpu_ptr(&memcg_stock.irq_obj);
 }
 
 static inline void put_obj_stock(unsigned long flags)
 {
-	if (likely(in_task()))
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
 	else
-		local_irq_restore(flags);
+		local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -7088,9 +7088,12 @@ static int __init mem_cgroup_init(void)
 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
 				  memcg_hotplug_cpu_dead);
 
-	for_each_possible_cpu(cpu)
-		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
-			  drain_local_stock);
+	for_each_possible_cpu(cpu) {
+		struct memcg_stock_pcp *stock = per_cpu_ptr(&memcg_stock, cpu);
+
+		INIT_WORK(&stock->work, drain_local_stock);
+		local_lock_init(&stock->lock);
+	}
 
 	for_each_node(node) {
 		struct mem_cgroup_tree_per_node *rtpn;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10  2:52 ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2021-12-10  2:52 UTC (permalink / raw)
  To: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton
  Cc: linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	Sebastian Andrzej Siewior, Thomas Gleixner, Waiman Long

Direct calls to local_irq_{save/restore}() and preempt_{enable/disable}()
are not appropriate for PREEMPT_RT. To provide better PREEMPT_RT support,
change local_irq_{save/restore}() to local_lock_irq{save/restore}() and
add a local_lock_t to struct memcg_stock_pcp.

Also disable the task and interrupt context optimization for obj_stock as
there will be no performance gain in the case of PREEMPT_RT. In this case,
task obj_stock will be there but remain unused.

Note that preempt_enable() and preempt_disable() in get_obj_stock() and
put_obj_stock() are not replaced by local_lock() and local_unlock() as it
is possible that a task accessing task_obj may get interrupted and then
access irq_obj concurrently. So using local_lock for task_obj access
may cause lockdep splat.

Signed-off-by: Waiman Long <longman-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
---
 mm/memcontrol.c | 41 ++++++++++++++++++++++-------------------
 1 file changed, 22 insertions(+), 19 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a09a7d2e0b1b..8bed8e2993e4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2097,6 +2097,7 @@ struct obj_stock {
 };
 
 struct memcg_stock_pcp {
+	local_lock_t lock;
 	struct mem_cgroup *cached; /* this never be root cgroup */
 	unsigned int nr_pages;
 	struct obj_stock task_obj;
@@ -2145,7 +2146,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (nr_pages > MEMCG_CHARGE_BATCH)
 		return ret;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2153,7 +2154,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 		ret = true;
 	}
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 
 	return ret;
 }
@@ -2189,7 +2190,7 @@ static void drain_local_stock(struct work_struct *dummy)
 	 * drain_stock races is that we always operate on local CPU stock
 	 * here with IRQ disabled
 	 */
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	drain_obj_stock(&stock->irq_obj);
@@ -2198,7 +2199,7 @@ static void drain_local_stock(struct work_struct *dummy)
 	drain_stock(stock);
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_lock_irqsave(&memcg_stock.lock, flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (stock->cached != memcg) { /* reset if necessary */
@@ -2223,7 +2224,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
 		drain_stock(stock);
 
-	local_irq_restore(flags);
+	local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
  * which is cheap in non-preempt kernel. The interrupt context object stock
  * can only be accessed after disabling interrupt. User context code can
  * access interrupt object stock, but not vice versa.
+ *
+ * This task and interrupt context optimization is disabled for PREEMPT_RT
+ * as there is no performance gain in this case.
  */
 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
 {
-	struct memcg_stock_pcp *stock;
-
-	if (likely(in_task())) {
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
 		*pflags = 0UL;
 		preempt_disable();
-		stock = this_cpu_ptr(&memcg_stock);
-		return &stock->task_obj;
+		return this_cpu_ptr(&memcg_stock.task_obj);
 	}
 
-	local_irq_save(*pflags);
-	stock = this_cpu_ptr(&memcg_stock);
-	return &stock->irq_obj;
+	local_lock_irqsave(&memcg_stock.lock, *pflags);
+	return this_cpu_ptr(&memcg_stock.irq_obj);
 }
 
 static inline void put_obj_stock(unsigned long flags)
 {
-	if (likely(in_task()))
+	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
 		preempt_enable();
 	else
-		local_irq_restore(flags);
+		local_unlock_irqrestore(&memcg_stock.lock, flags);
 }
 
 /*
@@ -7088,9 +7088,12 @@ static int __init mem_cgroup_init(void)
 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
 				  memcg_hotplug_cpu_dead);
 
-	for_each_possible_cpu(cpu)
-		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
-			  drain_local_stock);
+	for_each_possible_cpu(cpu) {
+		struct memcg_stock_pcp *stock = per_cpu_ptr(&memcg_stock, cpu);
+
+		INIT_WORK(&stock->work, drain_local_stock);
+		local_lock_init(&stock->lock);
+	}
 
 	for_each_node(node) {
 		struct mem_cgroup_tree_per_node *rtpn;
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 13:01   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-12-10 13:01 UTC (permalink / raw)
  To: Waiman Long
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel, cgroups, linux-mm, Thomas Gleixner, Peter Zijlstra

On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
…
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	struct memcg_stock_pcp *stock;
>  	unsigned long flags;
>  
> -	local_irq_save(flags);
> +	local_lock_irqsave(&memcg_stock.lock, flags);

Why is this one using the lock? It isn't accessing irq_obj, right?

>  	stock = this_cpu_ptr(&memcg_stock);
>  	if (stock->cached != memcg) { /* reset if necessary */
> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>   * which is cheap in non-preempt kernel. The interrupt context object stock
>   * can only be accessed after disabling interrupt. User context code can
>   * access interrupt object stock, but not vice versa.
> + *
> + * This task and interrupt context optimization is disabled for PREEMPT_RT
> + * as there is no performance gain in this case.
>   */
>  static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>  {
> -	struct memcg_stock_pcp *stock;
> -
> -	if (likely(in_task())) {
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>  		*pflags = 0UL;
>  		preempt_disable();
> -		stock = this_cpu_ptr(&memcg_stock);
> -		return &stock->task_obj;
> +		return this_cpu_ptr(&memcg_stock.task_obj);
>  	}

We usually add the local_lock_t to the object it protects, struct
obj_stock it this case.
That would give you two different locks (instead of one) so you wouldn't
have to use preempt_disable() to avoid lockdep's complains. Also it
would warn you if you happen to use that obj_stock in !in_task() which
is isn't possible now.
The only downside would be that drain_local_stock() needs to acquire two
locks.

>  
> -	local_irq_save(*pflags);
> -	stock = this_cpu_ptr(&memcg_stock);
> -	return &stock->irq_obj;
> +	local_lock_irqsave(&memcg_stock.lock, *pflags);
> +	return this_cpu_ptr(&memcg_stock.irq_obj);
>  }
>  
>  static inline void put_obj_stock(unsigned long flags)
>  {
> -	if (likely(in_task()))
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
>  		preempt_enable();
>  	else
> -		local_irq_restore(flags);
> +		local_unlock_irqrestore(&memcg_stock.lock, flags);
>  }
>  
>  /*

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 13:01   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-12-10 13:01 UTC (permalink / raw)
  To: Waiman Long
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	Thomas Gleixner, Peter Zijlstra

On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
…
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
…
> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>  	struct memcg_stock_pcp *stock;
>  	unsigned long flags;
>  
> -	local_irq_save(flags);
> +	local_lock_irqsave(&memcg_stock.lock, flags);

Why is this one using the lock? It isn't accessing irq_obj, right?

>  	stock = this_cpu_ptr(&memcg_stock);
>  	if (stock->cached != memcg) { /* reset if necessary */
> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>   * which is cheap in non-preempt kernel. The interrupt context object stock
>   * can only be accessed after disabling interrupt. User context code can
>   * access interrupt object stock, but not vice versa.
> + *
> + * This task and interrupt context optimization is disabled for PREEMPT_RT
> + * as there is no performance gain in this case.
>   */
>  static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>  {
> -	struct memcg_stock_pcp *stock;
> -
> -	if (likely(in_task())) {
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>  		*pflags = 0UL;
>  		preempt_disable();
> -		stock = this_cpu_ptr(&memcg_stock);
> -		return &stock->task_obj;
> +		return this_cpu_ptr(&memcg_stock.task_obj);
>  	}

We usually add the local_lock_t to the object it protects, struct
obj_stock it this case.
That would give you two different locks (instead of one) so you wouldn't
have to use preempt_disable() to avoid lockdep's complains. Also it
would warn you if you happen to use that obj_stock in !in_task() which
is isn't possible now.
The only downside would be that drain_local_stock() needs to acquire two
locks.

>  
> -	local_irq_save(*pflags);
> -	stock = this_cpu_ptr(&memcg_stock);
> -	return &stock->irq_obj;
> +	local_lock_irqsave(&memcg_stock.lock, *pflags);
> +	return this_cpu_ptr(&memcg_stock.irq_obj);
>  }
>  
>  static inline void put_obj_stock(unsigned long flags)
>  {
> -	if (likely(in_task()))
> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT))
>  		preempt_enable();
>  	else
> -		local_irq_restore(flags);
> +		local_unlock_irqrestore(&memcg_stock.lock, flags);
>  }
>  
>  /*

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 16:29     ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2021-12-10 16:29 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel, cgroups, linux-mm, Thomas Gleixner, Peter Zijlstra


On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> …
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
> …
>> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	struct memcg_stock_pcp *stock;
>>   	unsigned long flags;
>>   
>> -	local_irq_save(flags);
>> +	local_lock_irqsave(&memcg_stock.lock, flags);
> Why is this one using the lock? It isn't accessing irq_obj, right?
Well, the lock isn't just for irq_obj. It protects the whole memcg_stock 
structure which include irq_obj. Sometimes, data in irq_obj (or 
task_obj) will get transfer to nr_pages and vice versa. So it is easier 
to use one single lock for the whole thing.
>
>>   	stock = this_cpu_ptr(&memcg_stock);
>>   	if (stock->cached != memcg) { /* reset if necessary */
>> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>>    * which is cheap in non-preempt kernel. The interrupt context object stock
>>    * can only be accessed after disabling interrupt. User context code can
>>    * access interrupt object stock, but not vice versa.
>> + *
>> + * This task and interrupt context optimization is disabled for PREEMPT_RT
>> + * as there is no performance gain in this case.
>>    */
>>   static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>>   {
>> -	struct memcg_stock_pcp *stock;
>> -
>> -	if (likely(in_task())) {
>> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>>   		*pflags = 0UL;
>>   		preempt_disable();
>> -		stock = this_cpu_ptr(&memcg_stock);
>> -		return &stock->task_obj;
>> +		return this_cpu_ptr(&memcg_stock.task_obj);
>>   	}
> We usually add the local_lock_t to the object it protects, struct
> obj_stock it this case.
> That would give you two different locks (instead of one) so you wouldn't
> have to use preempt_disable() to avoid lockdep's complains. Also it
> would warn you if you happen to use that obj_stock in !in_task() which
> is isn't possible now.
> The only downside would be that drain_local_stock() needs to acquire two
> locks.
>
As said above, having separate locks will complicate the interaction 
between irq_obj and the broader memcg_stock fields. Besides throughput 
is a less important matrix for PREEMPT_RT, so I am not trying to 
optimize throughput performance for PREEMPT_RT here.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 16:29     ` Waiman Long
  0 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2021-12-10 16:29 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	Thomas Gleixner, Peter Zijlstra


On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> …
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
> …
>> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>   	struct memcg_stock_pcp *stock;
>>   	unsigned long flags;
>>   
>> -	local_irq_save(flags);
>> +	local_lock_irqsave(&memcg_stock.lock, flags);
> Why is this one using the lock? It isn't accessing irq_obj, right?
Well, the lock isn't just for irq_obj. It protects the whole memcg_stock 
structure which include irq_obj. Sometimes, data in irq_obj (or 
task_obj) will get transfer to nr_pages and vice versa. So it is easier 
to use one single lock for the whole thing.
>
>>   	stock = this_cpu_ptr(&memcg_stock);
>>   	if (stock->cached != memcg) { /* reset if necessary */
>> @@ -2779,29 +2780,28 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
>>    * which is cheap in non-preempt kernel. The interrupt context object stock
>>    * can only be accessed after disabling interrupt. User context code can
>>    * access interrupt object stock, but not vice versa.
>> + *
>> + * This task and interrupt context optimization is disabled for PREEMPT_RT
>> + * as there is no performance gain in this case.
>>    */
>>   static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
>>   {
>> -	struct memcg_stock_pcp *stock;
>> -
>> -	if (likely(in_task())) {
>> +	if (likely(in_task()) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
>>   		*pflags = 0UL;
>>   		preempt_disable();
>> -		stock = this_cpu_ptr(&memcg_stock);
>> -		return &stock->task_obj;
>> +		return this_cpu_ptr(&memcg_stock.task_obj);
>>   	}
> We usually add the local_lock_t to the object it protects, struct
> obj_stock it this case.
> That would give you two different locks (instead of one) so you wouldn't
> have to use preempt_disable() to avoid lockdep's complains. Also it
> would warn you if you happen to use that obj_stock in !in_task() which
> is isn't possible now.
> The only downside would be that drain_local_stock() needs to acquire two
> locks.
>
As said above, having separate locks will complicate the interaction 
between irq_obj and the broader memcg_stock fields. Besides throughput 
is a less important matrix for PREEMPT_RT, so I am not trying to 
optimize throughput performance for PREEMPT_RT here.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 16:34       ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-12-10 16:34 UTC (permalink / raw)
  To: Waiman Long
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel, cgroups, linux-mm, Thomas Gleixner, Peter Zijlstra

On 2021-12-10 11:29:31 [-0500], Waiman Long wrote:
> 
> On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> > On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> > …
> > > --- a/mm/memcontrol.c
> > > +++ b/mm/memcontrol.c
> > …
> > > @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> > >   	struct memcg_stock_pcp *stock;
> > >   	unsigned long flags;
> > > -	local_irq_save(flags);
> > > +	local_lock_irqsave(&memcg_stock.lock, flags);
> > Why is this one using the lock? It isn't accessing irq_obj, right?
> Well, the lock isn't just for irq_obj. It protects the whole memcg_stock
> structure which include irq_obj. Sometimes, data in irq_obj (or task_obj)
> will get transfer to nr_pages and vice versa. So it is easier to use one
> single lock for the whole thing.

This needs way better documentation what protects what any why.
I don't like the quick slapping for RT only usage without any kind of
explanation. Once you think you know it is irq_obj only you end up here
where you have the lock again for no obvious reason.

> 
> Cheers,
> Longman

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
@ 2021-12-10 16:34       ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 9+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-12-10 16:34 UTC (permalink / raw)
  To: Waiman Long
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	cgroups-u79uwXL29TY76Z2rM5mHXA, linux-mm-Bw31MaZKKs3YtjvyW6yDsg,
	Thomas Gleixner, Peter Zijlstra

On 2021-12-10 11:29:31 [-0500], Waiman Long wrote:
> 
> On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
> > On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
> > …
> > > --- a/mm/memcontrol.c
> > > +++ b/mm/memcontrol.c
> > …
> > > @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
> > >   	struct memcg_stock_pcp *stock;
> > >   	unsigned long flags;
> > > -	local_irq_save(flags);
> > > +	local_lock_irqsave(&memcg_stock.lock, flags);
> > Why is this one using the lock? It isn't accessing irq_obj, right?
> Well, the lock isn't just for irq_obj. It protects the whole memcg_stock
> structure which include irq_obj. Sometimes, data in irq_obj (or task_obj)
> will get transfer to nr_pages and vice versa. So it is easier to use one
> single lock for the whole thing.

This needs way better documentation what protects what any why.
I don't like the quick slapping for RT only usage without any kind of
explanation. Once you think you know it is irq_obj only you end up here
where you have the lock again for no obvious reason.

> 
> Cheers,
> Longman

Sebastian

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT
  2021-12-10 16:34       ` Sebastian Andrzej Siewior
  (?)
@ 2021-12-10 16:37       ` Waiman Long
  -1 siblings, 0 replies; 9+ messages in thread
From: Waiman Long @ 2021-12-10 16:37 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Johannes Weiner, Michal Hocko, Vladimir Davydov, Andrew Morton,
	linux-kernel, cgroups, linux-mm, Thomas Gleixner, Peter Zijlstra


On 12/10/21 11:34, Sebastian Andrzej Siewior wrote:
> On 2021-12-10 11:29:31 [-0500], Waiman Long wrote:
>> On 12/10/21 08:01, Sebastian Andrzej Siewior wrote:
>>> On 2021-12-09 21:52:28 [-0500], Waiman Long wrote:
>>> …
>>>> --- a/mm/memcontrol.c
>>>> +++ b/mm/memcontrol.c
>>> …
>>>> @@ -2210,7 +2211,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
>>>>    	struct memcg_stock_pcp *stock;
>>>>    	unsigned long flags;
>>>> -	local_irq_save(flags);
>>>> +	local_lock_irqsave(&memcg_stock.lock, flags);
>>> Why is this one using the lock? It isn't accessing irq_obj, right?
>> Well, the lock isn't just for irq_obj. It protects the whole memcg_stock
>> structure which include irq_obj. Sometimes, data in irq_obj (or task_obj)
>> will get transfer to nr_pages and vice versa. So it is easier to use one
>> single lock for the whole thing.
> This needs way better documentation what protects what any why.
> I don't like the quick slapping for RT only usage without any kind of
> explanation. Once you think you know it is irq_obj only you end up here
> where you have the lock again for no obvious reason.

Sure, I will update the patch description and add comments to document 
that. Let's see what other feedback I have before I make the update.

Cheers,
Longman


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-12-10 16:37 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-10  2:52 [PATCH-next v2] mm/memcg: Properly handle memcg_stock access for PREEMPT_RT Waiman Long
2021-12-10  2:52 ` Waiman Long
2021-12-10 13:01 ` Sebastian Andrzej Siewior
2021-12-10 13:01   ` Sebastian Andrzej Siewior
2021-12-10 16:29   ` Waiman Long
2021-12-10 16:29     ` Waiman Long
2021-12-10 16:34     ` Sebastian Andrzej Siewior
2021-12-10 16:34       ` Sebastian Andrzej Siewior
2021-12-10 16:37       ` Waiman Long

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.