linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/3] mm: rework memcg kernel stack accounting
@ 2018-08-27 16:26 Roman Gushchin
  2018-08-27 16:26 ` [PATCH v3 2/3] mm: drain memcg stocks on css offlining Roman Gushchin
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Roman Gushchin @ 2018-08-27 16:26 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Andrew Morton, Roman Gushchin, Johannes Weiner, Andy Lutomirski,
	Konstantin Khlebnikov, Tejun Heo

If CONFIG_VMAP_STACK is set, kernel stacks are allocated
using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel
stack pages are charged against corresponding memory cgroups
on allocation and uncharged on releasing them.

The problem is that we do cache kernel stacks in small
per-cpu caches and do reuse them for new tasks, which can
belong to different memory cgroups.

Each stack page still holds a reference to the original cgroup,
so the cgroup can't be released until the vmap area is released.

To make this happen we need more than two subsequent exits
without forks in between on the current cpu, which makes it
very unlikely to happen. As a result, I saw a significant number
of dying cgroups (in theory, up to 2 * number_of_cpu +
number_of_tasks), which can't be released even by significant
memory pressure.

As a cgroup structure can take a significant amount of memory
(first of all, per-cpu data like memcg statistics), it leads
to a noticeable waste of memory.

Fixes: ac496bf48d97 ("fork: Optimize task creation by caching
two thread stacks per CPU if CONFIG_VMAP_STACK=y")
Signed-off-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
---
 include/linux/memcontrol.h | 13 ++++++++-
 kernel/fork.c              | 55 +++++++++++++++++++++++++++++++++-----
 2 files changed, 61 insertions(+), 7 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 652f602167df..4399cc3f00e4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
 void memcg_kmem_put_cache(struct kmem_cache *cachep);
 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
 			    struct mem_cgroup *memcg);
+
+#ifdef CONFIG_MEMCG_KMEM
 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
 void memcg_kmem_uncharge(struct page *page, int order);
 
-#ifdef CONFIG_MEMCG_KMEM
 extern struct static_key_false memcg_kmem_enabled_key;
 extern struct workqueue_struct *memcg_kmem_cache_wq;
 
@@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
 				   int nid, int shrinker_id);
 #else
+
+static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
+{
+	return 0;
+}
+
+static inline void memcg_kmem_uncharge(struct page *page, int order)
+{
+}
+
 #define for_each_memcg_cache_index(_idx)	\
 	for (; NULL; )
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 6ad26f6ef456..c0fb8d00f3cb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -224,9 +224,14 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
 		return s->addr;
 	}
 
+	/*
+	 * Allocated stacks are cached and later reused by new threads,
+	 * so memcg accounting is performed manually on assigning/releasing
+	 * stacks to tasks. Drop __GFP_ACCOUNT.
+	 */
 	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
 				     VMALLOC_START, VMALLOC_END,
-				     THREADINFO_GFP,
+				     THREADINFO_GFP & ~__GFP_ACCOUNT,
 				     PAGE_KERNEL,
 				     0, node, __builtin_return_address(0));
 
@@ -249,9 +254,19 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
 static inline void free_thread_stack(struct task_struct *tsk)
 {
 #ifdef CONFIG_VMAP_STACK
-	if (task_stack_vm_area(tsk)) {
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+
+	if (vm) {
 		int i;
 
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			mod_memcg_page_state(vm->pages[i],
+					     MEMCG_KERNEL_STACK_KB,
+					     -(int)(PAGE_SIZE / 1024));
+
+			memcg_kmem_uncharge(vm->pages[i], 0);
+		}
+
 		for (i = 0; i < NR_CACHED_STACKS; i++) {
 			if (this_cpu_cmpxchg(cached_stacks[i],
 					NULL, tsk->stack_vm_area) != NULL)
@@ -352,10 +367,6 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
 					    NR_KERNEL_STACK_KB,
 					    PAGE_SIZE / 1024 * account);
 		}
-
-		/* All stack pages belong to the same memcg. */
-		mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
-				     account * (THREAD_SIZE / 1024));
 	} else {
 		/*
 		 * All stack pages are in the same zone and belong to the
@@ -371,6 +382,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
 	}
 }
 
+static int memcg_charge_kernel_stack(struct task_struct *tsk)
+{
+#ifdef CONFIG_VMAP_STACK
+	struct vm_struct *vm = task_stack_vm_area(tsk);
+	int ret;
+
+	if (vm) {
+		int i;
+
+		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
+			/*
+			 * If memcg_kmem_charge() fails, page->mem_cgroup
+			 * pointer is NULL, and both memcg_kmem_uncharge()
+			 * and mod_memcg_page_state() in free_thread_stack()
+			 * will ignore this page. So it's safe.
+			 */
+			ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
+			if (ret)
+				return ret;
+
+			mod_memcg_page_state(vm->pages[i],
+					     MEMCG_KERNEL_STACK_KB,
+					     PAGE_SIZE / 1024);
+		}
+	}
+#endif
+	return 0;
+}
+
 static void release_task_stack(struct task_struct *tsk)
 {
 	if (WARN_ON(tsk->state != TASK_DEAD))
@@ -808,6 +848,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 	if (!stack)
 		goto free_tsk;
 
+	if (memcg_charge_kernel_stack(tsk))
+		goto free_stack;
+
 	stack_vm_area = task_stack_vm_area(tsk);
 
 	err = arch_dup_task_struct(tsk, orig);
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 2/3] mm: drain memcg stocks on css offlining
  2018-08-27 16:26 [PATCH v3 1/3] mm: rework memcg kernel stack accounting Roman Gushchin
@ 2018-08-27 16:26 ` Roman Gushchin
  2018-08-27 16:26 ` [PATCH v3 3/3] mm: don't miss the last page because of round-off error Roman Gushchin
  2018-08-27 21:01 ` [PATCH v3 1/3] mm: rework memcg kernel stack accounting Andrew Morton
  2 siblings, 0 replies; 9+ messages in thread
From: Roman Gushchin @ 2018-08-27 16:26 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Andrew Morton, Roman Gushchin, Johannes Weiner,
	Konstantin Khlebnikov, Tejun Heo

Memcg charge is batched using per-cpu stocks, so an offline memcg
can be pinned by a cached charge up to a moment, when a process
belonging to some other cgroup will charge some memory on the same
cpu. In other words, cached charges can prevent a memory cgroup
from being reclaimed for some time, without any clear need.

Let's optimize it by explicit draining of all stocks on css offlining.
As draining is performed asynchronously, and is skipped if any
parallel draining is happening, it's cheap.

Signed-off-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Michal Hocko <mhocko@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
---
 mm/memcontrol.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29d9d1a69b36..17ce6f2e6caf 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4573,6 +4573,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 	memcg_offline_kmem(memcg);
 	wb_memcg_offline(memcg);
 
+	drain_all_stock(memcg);
+
 	mem_cgroup_id_put(memcg);
 }
 
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 3/3] mm: don't miss the last page because of round-off error
  2018-08-27 16:26 [PATCH v3 1/3] mm: rework memcg kernel stack accounting Roman Gushchin
  2018-08-27 16:26 ` [PATCH v3 2/3] mm: drain memcg stocks on css offlining Roman Gushchin
@ 2018-08-27 16:26 ` Roman Gushchin
  2018-08-27 21:04   ` Andrew Morton
  2018-08-27 21:01 ` [PATCH v3 1/3] mm: rework memcg kernel stack accounting Andrew Morton
  2 siblings, 1 reply; 9+ messages in thread
From: Roman Gushchin @ 2018-08-27 16:26 UTC (permalink / raw)
  To: linux-mm
  Cc: linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Andrew Morton, Roman Gushchin, Johannes Weiner, Tejun Heo,
	Rik van Riel, Konstantin Khlebnikov, Matthew Wilcox

I've noticed, that dying memory cgroups are  often pinned
in memory by a single pagecache page. Even under moderate
memory pressure they sometimes stayed in such state
for a long time. That looked strange.

My investigation showed that the problem is caused by
applying the LRU pressure balancing math:

  scan = div64_u64(scan * fraction[lru], denominator),

where

  denominator = fraction[anon] + fraction[file] + 1.

Because fraction[lru] is always less than denominator,
if the initial scan size is 1, the result is always 0.

This means the last page is not scanned and has
no chances to be reclaimed.

Fix this by rounding up the result of the division.

In practice this change significantly improves the speed
of dying cgroups reclaim.

Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
---
 include/linux/math64.h | 2 ++
 mm/vmscan.c            | 6 ++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/include/linux/math64.h b/include/linux/math64.h
index 837f2f2d1d34..94af3d9c73e7 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -281,4 +281,6 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
 }
 #endif /* mul_u64_u32_div */
 
+#define DIV64_U64_ROUND_UP(ll, d)	div64_u64((ll) + (d) - 1, (d))
+
 #endif /* _LINUX_MATH64_H */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d649b242b989..2c67a0121c6d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2446,9 +2446,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
 			/*
 			 * Scan types proportional to swappiness and
 			 * their relative recent reclaim efficiency.
+			 * Make sure we don't miss the last page
+			 * because of a round-off error.
 			 */
-			scan = div64_u64(scan * fraction[file],
-					 denominator);
+			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
+						  denominator);
 			break;
 		case SCAN_FILE:
 		case SCAN_ANON:
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 1/3] mm: rework memcg kernel stack accounting
  2018-08-27 16:26 [PATCH v3 1/3] mm: rework memcg kernel stack accounting Roman Gushchin
  2018-08-27 16:26 ` [PATCH v3 2/3] mm: drain memcg stocks on css offlining Roman Gushchin
  2018-08-27 16:26 ` [PATCH v3 3/3] mm: don't miss the last page because of round-off error Roman Gushchin
@ 2018-08-27 21:01 ` Andrew Morton
  2018-08-27 23:19   ` Roman Gushchin
  2 siblings, 1 reply; 9+ messages in thread
From: Andrew Morton @ 2018-08-27 21:01 UTC (permalink / raw)
  To: Roman Gushchin
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Andy Lutomirski, Konstantin Khlebnikov,
	Tejun Heo

On Mon, 27 Aug 2018 09:26:19 -0700 Roman Gushchin <guro@fb.com> wrote:

> If CONFIG_VMAP_STACK is set, kernel stacks are allocated
> using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel
> stack pages are charged against corresponding memory cgroups
> on allocation and uncharged on releasing them.
> 
> The problem is that we do cache kernel stacks in small
> per-cpu caches and do reuse them for new tasks, which can
> belong to different memory cgroups.
> 
> Each stack page still holds a reference to the original cgroup,
> so the cgroup can't be released until the vmap area is released.
> 
> To make this happen we need more than two subsequent exits
> without forks in between on the current cpu, which makes it
> very unlikely to happen. As a result, I saw a significant number
> of dying cgroups (in theory, up to 2 * number_of_cpu +
> number_of_tasks), which can't be released even by significant
> memory pressure.
> 
> As a cgroup structure can take a significant amount of memory
> (first of all, per-cpu data like memcg statistics), it leads
> to a noticeable waste of memory.

OK, but this doesn't describe how the patch addresses this issue?

>
> ...
>
> @@ -371,6 +382,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
>  	}
>  }
>  
> +static int memcg_charge_kernel_stack(struct task_struct *tsk)
> +{
> +#ifdef CONFIG_VMAP_STACK
> +	struct vm_struct *vm = task_stack_vm_area(tsk);
> +	int ret;
> +
> +	if (vm) {
> +		int i;
> +
> +		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {

Can we ever have THREAD_SIZE < PAGE_SIZE?  64k pages?

> +			/*
> +			 * If memcg_kmem_charge() fails, page->mem_cgroup
> +			 * pointer is NULL, and both memcg_kmem_uncharge()
> +			 * and mod_memcg_page_state() in free_thread_stack()
> +			 * will ignore this page. So it's safe.
> +			 */
> +			ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
> +			if (ret)
> +				return ret;
> +
> +			mod_memcg_page_state(vm->pages[i],
> +					     MEMCG_KERNEL_STACK_KB,
> +					     PAGE_SIZE / 1024);
> +		}
> +	}
> +#endif
> +	return 0;
> +}
>
> ...
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] mm: don't miss the last page because of round-off error
  2018-08-27 16:26 ` [PATCH v3 3/3] mm: don't miss the last page because of round-off error Roman Gushchin
@ 2018-08-27 21:04   ` Andrew Morton
  2018-08-27 23:24     ` Roman Gushchin
  2018-08-29 21:33     ` Roman Gushchin
  0 siblings, 2 replies; 9+ messages in thread
From: Andrew Morton @ 2018-08-27 21:04 UTC (permalink / raw)
  To: Roman Gushchin
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Tejun Heo, Rik van Riel, Konstantin Khlebnikov,
	Matthew Wilcox

On Mon, 27 Aug 2018 09:26:21 -0700 Roman Gushchin <guro@fb.com> wrote:

> I've noticed, that dying memory cgroups are  often pinned
> in memory by a single pagecache page. Even under moderate
> memory pressure they sometimes stayed in such state
> for a long time. That looked strange.
> 
> My investigation showed that the problem is caused by
> applying the LRU pressure balancing math:
> 
>   scan = div64_u64(scan * fraction[lru], denominator),
> 
> where
> 
>   denominator = fraction[anon] + fraction[file] + 1.
> 
> Because fraction[lru] is always less than denominator,
> if the initial scan size is 1, the result is always 0.
> 
> This means the last page is not scanned and has
> no chances to be reclaimed.
> 
> Fix this by rounding up the result of the division.
> 
> In practice this change significantly improves the speed
> of dying cgroups reclaim.
> 
> ...
>
> --- a/include/linux/math64.h
> +++ b/include/linux/math64.h
> @@ -281,4 +281,6 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
>  }
>  #endif /* mul_u64_u32_div */
>  
> +#define DIV64_U64_ROUND_UP(ll, d)	div64_u64((ll) + (d) - 1, (d))

This macro references arg `d' more than once.  That can cause problems
if the passed expression has side-effects and is poor practice.  Can
we please redo this with a temporary?

>  #endif /* _LINUX_MATH64_H */
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index d649b242b989..2c67a0121c6d 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2446,9 +2446,11 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
>  			/*
>  			 * Scan types proportional to swappiness and
>  			 * their relative recent reclaim efficiency.
> +			 * Make sure we don't miss the last page
> +			 * because of a round-off error.
>  			 */
> -			scan = div64_u64(scan * fraction[file],
> -					 denominator);
> +			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
> +						  denominator);
>  			break;
>  		case SCAN_FILE:
>  		case SCAN_ANON:

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 1/3] mm: rework memcg kernel stack accounting
  2018-08-27 21:01 ` [PATCH v3 1/3] mm: rework memcg kernel stack accounting Andrew Morton
@ 2018-08-27 23:19   ` Roman Gushchin
  0 siblings, 0 replies; 9+ messages in thread
From: Roman Gushchin @ 2018-08-27 23:19 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Andy Lutomirski, Konstantin Khlebnikov,
	Tejun Heo

On Mon, Aug 27, 2018 at 02:01:43PM -0700, Andrew Morton wrote:
> On Mon, 27 Aug 2018 09:26:19 -0700 Roman Gushchin <guro@fb.com> wrote:
> 
> > If CONFIG_VMAP_STACK is set, kernel stacks are allocated
> > using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel
> > stack pages are charged against corresponding memory cgroups
> > on allocation and uncharged on releasing them.
> > 
> > The problem is that we do cache kernel stacks in small
> > per-cpu caches and do reuse them for new tasks, which can
> > belong to different memory cgroups.
> > 
> > Each stack page still holds a reference to the original cgroup,
> > so the cgroup can't be released until the vmap area is released.
> > 
> > To make this happen we need more than two subsequent exits
> > without forks in between on the current cpu, which makes it
> > very unlikely to happen. As a result, I saw a significant number
> > of dying cgroups (in theory, up to 2 * number_of_cpu +
> > number_of_tasks), which can't be released even by significant
> > memory pressure.
> > 
> > As a cgroup structure can take a significant amount of memory
> > (first of all, per-cpu data like memcg statistics), it leads
> > to a noticeable waste of memory.
> 
> OK, but this doesn't describe how the patch addresses this issue?

Sorry, missed this part. Let's add the following paragraph to the
commit message (the full updated patch is below):

To address the issue, let's charge thread stacks on assigning
them to tasks, and uncharge on releasing them and putting into
the per-cpu cache. So, cached stacks will not be assigned to
any memcg and will not hold any memcg reference.


> 
> >
> > ...
> >
> > @@ -371,6 +382,35 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
> >  	}
> >  }
> >  
> > +static int memcg_charge_kernel_stack(struct task_struct *tsk)
> > +{
> > +#ifdef CONFIG_VMAP_STACK
> > +	struct vm_struct *vm = task_stack_vm_area(tsk);
> > +	int ret;
> > +
> > +	if (vm) {
> > +		int i;
> > +
> > +		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
> 
> Can we ever have THREAD_SIZE < PAGE_SIZE?  64k pages?

Hm, good question!
We can, but I doubt that anyone using 64k pages AND CONFIG_VMAP_STACK,
and I *suspect* that it will trigger the BUG_ON() in account_kernel_stack():

static void account_kernel_stack(struct task_struct *tsk, int account) {
	...

	if (vm) {
		...

		BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);

But I don't see anything that makes such a config illegitimate.
Does it makes any sense to use vmap if THREAD_SIZE < PAGE_SIZE?

> 
> > +			/*
> > +			 * If memcg_kmem_charge() fails, page->mem_cgroup
> > +			 * pointer is NULL, and both memcg_kmem_uncharge()
> > +			 * and mod_memcg_page_state() in free_thread_stack()
> > +			 * will ignore this page. So it's safe.
> > +			 */
> > +			ret = memcg_kmem_charge(vm->pages[i], GFP_KERNEL, 0);
> > +			if (ret)
> > +				return ret;
> > +
> > +			mod_memcg_page_state(vm->pages[i],
> > +					     MEMCG_KERNEL_STACK_KB,
> > +					     PAGE_SIZE / 1024);
> > +		}
> > +	}
> > +#endif
> > +	return 0;
> > +}
> >
> > ...
> >

Thanks!


--

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] mm: don't miss the last page because of round-off error
  2018-08-27 21:04   ` Andrew Morton
@ 2018-08-27 23:24     ` Roman Gushchin
  2018-08-29 21:33     ` Roman Gushchin
  1 sibling, 0 replies; 9+ messages in thread
From: Roman Gushchin @ 2018-08-27 23:24 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Tejun Heo, Rik van Riel, Konstantin Khlebnikov,
	Matthew Wilcox

On Mon, Aug 27, 2018 at 02:04:32PM -0700, Andrew Morton wrote:
> On Mon, 27 Aug 2018 09:26:21 -0700 Roman Gushchin <guro@fb.com> wrote:
> 
> > I've noticed, that dying memory cgroups are  often pinned
> > in memory by a single pagecache page. Even under moderate
> > memory pressure they sometimes stayed in such state
> > for a long time. That looked strange.
> > 
> > My investigation showed that the problem is caused by
> > applying the LRU pressure balancing math:
> > 
> >   scan = div64_u64(scan * fraction[lru], denominator),
> > 
> > where
> > 
> >   denominator = fraction[anon] + fraction[file] + 1.
> > 
> > Because fraction[lru] is always less than denominator,
> > if the initial scan size is 1, the result is always 0.
> > 
> > This means the last page is not scanned and has
> > no chances to be reclaimed.
> > 
> > Fix this by rounding up the result of the division.
> > 
> > In practice this change significantly improves the speed
> > of dying cgroups reclaim.
> > 
> > ...
> >
> > --- a/include/linux/math64.h
> > +++ b/include/linux/math64.h
> > @@ -281,4 +281,6 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
> >  }
> >  #endif /* mul_u64_u32_div */
> >  
> > +#define DIV64_U64_ROUND_UP(ll, d)	div64_u64((ll) + (d) - 1, (d))
> 
> This macro references arg `d' more than once.  That can cause problems
> if the passed expression has side-effects and is poor practice.  Can
> we please redo this with a temporary?

Sure. This was copy-pasted to match the existing DIV_ROUND_UP
(probably, not the best idea).

So let me fix them both in a separate patch.

Thanks!

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] mm: don't miss the last page because of round-off error
  2018-08-27 21:04   ` Andrew Morton
  2018-08-27 23:24     ` Roman Gushchin
@ 2018-08-29 21:33     ` Roman Gushchin
  2018-09-05 21:08       ` Andrew Morton
  1 sibling, 1 reply; 9+ messages in thread
From: Roman Gushchin @ 2018-08-29 21:33 UTC (permalink / raw)
  To: Andrew Morton
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Tejun Heo, Rik van Riel, Konstantin Khlebnikov,
	Matthew Wilcox

On Mon, Aug 27, 2018 at 02:04:32PM -0700, Andrew Morton wrote:
> On Mon, 27 Aug 2018 09:26:21 -0700 Roman Gushchin <guro@fb.com> wrote:
> 
> > I've noticed, that dying memory cgroups are  often pinned
> > in memory by a single pagecache page. Even under moderate
> > memory pressure they sometimes stayed in such state
> > for a long time. That looked strange.
> > 
> > My investigation showed that the problem is caused by
> > applying the LRU pressure balancing math:
> > 
> >   scan = div64_u64(scan * fraction[lru], denominator),
> > 
> > where
> > 
> >   denominator = fraction[anon] + fraction[file] + 1.
> > 
> > Because fraction[lru] is always less than denominator,
> > if the initial scan size is 1, the result is always 0.
> > 
> > This means the last page is not scanned and has
> > no chances to be reclaimed.
> > 
> > Fix this by rounding up the result of the division.
> > 
> > In practice this change significantly improves the speed
> > of dying cgroups reclaim.
> > 
> > ...
> >
> > --- a/include/linux/math64.h
> > +++ b/include/linux/math64.h
> > @@ -281,4 +281,6 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
> >  }
> >  #endif /* mul_u64_u32_div */
> >  
> > +#define DIV64_U64_ROUND_UP(ll, d)	div64_u64((ll) + (d) - 1, (d))
> 
> This macro references arg `d' more than once.  That can cause problems
> if the passed expression has side-effects and is poor practice.  Can
> we please redo this with a temporary?

Argh, the original DIV_ROUND_UP can't be fixed this way, as it's used
in array's size declarations.

So, below is the patch for the new DIV64_U64_ROUND_UP macro only.

Thanks!

--

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] mm: don't miss the last page because of round-off error
  2018-08-29 21:33     ` Roman Gushchin
@ 2018-09-05 21:08       ` Andrew Morton
  0 siblings, 0 replies; 9+ messages in thread
From: Andrew Morton @ 2018-09-05 21:08 UTC (permalink / raw)
  To: Roman Gushchin
  Cc: linux-mm, linux-kernel, kernel-team, Shakeel Butt, Michal Hocko,
	Johannes Weiner, Tejun Heo, Rik van Riel, Konstantin Khlebnikov,
	Matthew Wilcox

On Wed, 29 Aug 2018 14:33:19 -0700 Roman Gushchin <guro@fb.com> wrote:

> >From d8237d3df222e6c5a98a74baa04bc52edf8a3677 Mon Sep 17 00:00:00 2001
> From: Roman Gushchin <guro@fb.com>
> Date: Wed, 29 Aug 2018 14:14:48 -0700
> Subject: [PATCH] math64: prevent double calculation of DIV64_U64_ROUND_UP()
>  arguments
> 
> Cause the DIV64_U64_ROUND_UP(ll, d) macro to cache
> the result of (d) expression in a local variable to
> avoid double calculation, which might bring unexpected
> side effects.
> 
> Signed-off-by: Roman Gushchin <guro@fb.com>
> ---
>  include/linux/math64.h | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/include/linux/math64.h b/include/linux/math64.h
> index 94af3d9c73e7..bb2c84afb80c 100644
> --- a/include/linux/math64.h
> +++ b/include/linux/math64.h
> @@ -281,6 +281,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
>  }
>  #endif /* mul_u64_u32_div */
>  
> -#define DIV64_U64_ROUND_UP(ll, d)	div64_u64((ll) + (d) - 1, (d))
> +#define DIV64_U64_ROUND_UP(ll, d)	\
> +	({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
>  
>  #endif /* _LINUX_MATH64_H */

Does it have to be done as a macro?  A lot of these things are
implemented as nice inline C functions.

Also, most of these functions and macros return a value whereas
DIV64_U64_ROUND_UP() does not.  Desirable?

(And we're quite pathetic about documenting what those return values
_are_, which gets frustrating for the poor schmucks who sit here
reviewing code all day).

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-09-05 21:08 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-27 16:26 [PATCH v3 1/3] mm: rework memcg kernel stack accounting Roman Gushchin
2018-08-27 16:26 ` [PATCH v3 2/3] mm: drain memcg stocks on css offlining Roman Gushchin
2018-08-27 16:26 ` [PATCH v3 3/3] mm: don't miss the last page because of round-off error Roman Gushchin
2018-08-27 21:04   ` Andrew Morton
2018-08-27 23:24     ` Roman Gushchin
2018-08-29 21:33     ` Roman Gushchin
2018-09-05 21:08       ` Andrew Morton
2018-08-27 21:01 ` [PATCH v3 1/3] mm: rework memcg kernel stack accounting Andrew Morton
2018-08-27 23:19   ` Roman Gushchin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).