All of lore.kernel.org
 help / color / mirror / Atom feed
* + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
@ 2021-07-09 22:22 akpm
  2021-07-09 23:35 ` Suren Baghdasaryan
  0 siblings, 1 reply; 6+ messages in thread
From: akpm @ 2021-07-09 22:22 UTC (permalink / raw)
  To: mm-commits, vdavydov.dev, shakeelb, mhocko, hannes, surenb


The patch titled
     Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
has been added to the -mm tree.  Its filename is
     mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch

This patch should soon appear at
    https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
and later at
    https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Suren Baghdasaryan <surenb@google.com>
Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config

Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
functions to perform mem_cgroup_disabled static key check inline before
calling the main body of the function.  This minimizes the memcg overhead
in the pagefault and exit_mmap paths when memcgs are disabled using
cgroup_disable=memory command-line option.

This change results in ~0.4% overhead reduction when running PFT test
comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
configurationon on an 8-core ARM64 Android device.

Link: https://lkml.kernel.org/r/20210709171554.3494654-1-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/memcontrol.h |   28 +++++++++++++++++++++++++---
 mm/memcontrol.c            |   29 ++++++++++-------------------
 2 files changed, 35 insertions(+), 22 deletions(-)

--- a/include/linux/memcontrol.h~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
+++ a/include/linux/memcontrol.h
@@ -693,13 +693,35 @@ static inline bool mem_cgroup_below_min(
 		page_counter_read(&memcg->memory);
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
+int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+			gfp_t gfp_mask);
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+				    gfp_t gfp_mask)
+{
+	if (mem_cgroup_disabled())
+		return 0;
+	return __mem_cgroup_charge(page, mm, gfp_mask);
+}
+
 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
 				  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct page *page);
+static inline void mem_cgroup_uncharge(struct page *page)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge(page);
+}
+
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge_list(page_list);
+}
 
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 
--- a/mm/memcontrol.c~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
+++ a/mm/memcontrol.c
@@ -6701,8 +6701,7 @@ void mem_cgroup_calculate_protection(str
 			atomic_long_read(&parent->memory.children_low_usage)));
 }
 
-static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
-			       gfp_t gfp)
+static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
 {
 	unsigned int nr_pages = thp_nr_pages(page);
 	int ret;
@@ -6723,7 +6722,7 @@ out:
 }
 
 /**
- * mem_cgroup_charge - charge a newly allocated page to a cgroup
+ * __mem_cgroup_charge - charge a newly allocated page to a cgroup
  * @page: page to charge
  * @mm: mm context of the victim
  * @gfp_mask: reclaim mode
@@ -6736,16 +6735,14 @@ out:
  *
  * Returns 0 on success. Otherwise, an error code is returned.
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+			gfp_t gfp_mask)
 {
 	struct mem_cgroup *memcg;
 	int ret;
 
-	if (mem_cgroup_disabled())
-		return 0;
-
 	memcg = get_mem_cgroup_from_mm(mm);
-	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
+	ret = charge_memcg(page, memcg, gfp_mask);
 	css_put(&memcg->css);
 
 	return ret;
@@ -6780,7 +6777,7 @@ int mem_cgroup_swapin_charge_page(struct
 		memcg = get_mem_cgroup_from_mm(mm);
 	rcu_read_unlock();
 
-	ret = __mem_cgroup_charge(page, memcg, gfp);
+	ret = charge_memcg(page, memcg, gfp);
 
 	css_put(&memcg->css);
 	return ret;
@@ -6916,18 +6913,15 @@ static void uncharge_page(struct page *p
 }
 
 /**
- * mem_cgroup_uncharge - uncharge a page
+ * __mem_cgroup_uncharge - uncharge a page
  * @page: page to uncharge
  *
  * Uncharge a page previously charged with mem_cgroup_charge().
  */
-void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct page *page)
 {
 	struct uncharge_gather ug;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	/* Don't touch page->lru of any random page, pre-check: */
 	if (!page_memcg(page))
 		return;
@@ -6938,20 +6932,17 @@ void mem_cgroup_uncharge(struct page *pa
 }
 
 /**
- * mem_cgroup_uncharge_list - uncharge a list of page
+ * __mem_cgroup_uncharge_list - uncharge a list of page
  * @page_list: list of pages to uncharge
  *
  * Uncharge a list of pages previously charged with
  * mem_cgroup_charge().
  */
-void mem_cgroup_uncharge_list(struct list_head *page_list)
+void __mem_cgroup_uncharge_list(struct list_head *page_list)
 {
 	struct uncharge_gather ug;
 	struct page *page;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	uncharge_gather_clear(&ug);
 	list_for_each_entry(page, page_list, lru)
 		uncharge_page(page, &ug);
_

Patches currently in -mm which might be from surenb@google.com are

mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
  2021-07-09 22:22 + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree akpm
@ 2021-07-09 23:35 ` Suren Baghdasaryan
  2021-07-10  0:01   ` Andrew Morton
  0 siblings, 1 reply; 6+ messages in thread
From: Suren Baghdasaryan @ 2021-07-09 23:35 UTC (permalink / raw)
  To: Andrew Morton
  Cc: mm-commits, vdavydov.dev, Shakeel Butt, Michal Hocko, Johannes Weiner

On Fri, Jul 9, 2021 at 3:22 PM <akpm@linux-foundation.org> wrote:
>
>
> The patch titled
>      Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
> has been added to the -mm tree.  Its filename is
>      mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
>
> This patch should soon appear at
>     https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> and later at
>     https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch

Hi Andrew,
Please note that this patch is the second in the series of 3 here:
https://lore.kernel.org/patchwork/project/lkml/list/?series=507567 and
the only one that needed a v2 (others seem to be fine and are Ack'ed).
If you would like me to respin the whole series please let me know.
Otherwise please don't forget to pick up the other two patches from
this series.
Thanks,
Suren.

>
> Before you just go and hit "reply", please:
>    a) Consider who else should be cc'ed
>    b) Prefer to cc a suitable mailing list as well
>    c) Ideally: find the original patch on the mailing list and do a
>       reply-to-all to that, adding suitable additional cc's
>
> *** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
>
> The -mm tree is included into linux-next and is updated
> there every 3-4 working days
>
> ------------------------------------------------------
> From: Suren Baghdasaryan <surenb@google.com>
> Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
>
> Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
> functions to perform mem_cgroup_disabled static key check inline before
> calling the main body of the function.  This minimizes the memcg overhead
> in the pagefault and exit_mmap paths when memcgs are disabled using
> cgroup_disable=memory command-line option.
>
> This change results in ~0.4% overhead reduction when running PFT test
> comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
> configurationon on an 8-core ARM64 Android device.
>
> Link: https://lkml.kernel.org/r/20210709171554.3494654-1-surenb@google.com
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> Reviewed-by: Shakeel Butt <shakeelb@google.com>
> Cc: Johannes Weiner <hannes@cmpxchg.org>
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
> ---
>
>  include/linux/memcontrol.h |   28 +++++++++++++++++++++++++---
>  mm/memcontrol.c            |   29 ++++++++++-------------------
>  2 files changed, 35 insertions(+), 22 deletions(-)
>
> --- a/include/linux/memcontrol.h~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
> +++ a/include/linux/memcontrol.h
> @@ -693,13 +693,35 @@ static inline bool mem_cgroup_below_min(
>                 page_counter_read(&memcg->memory);
>  }
>
> -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
> +int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> +                       gfp_t gfp_mask);
> +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> +                                   gfp_t gfp_mask)
> +{
> +       if (mem_cgroup_disabled())
> +               return 0;
> +       return __mem_cgroup_charge(page, mm, gfp_mask);
> +}
> +
>  int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
>                                   gfp_t gfp, swp_entry_t entry);
>  void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
>
> -void mem_cgroup_uncharge(struct page *page);
> -void mem_cgroup_uncharge_list(struct list_head *page_list);
> +void __mem_cgroup_uncharge(struct page *page);
> +static inline void mem_cgroup_uncharge(struct page *page)
> +{
> +       if (mem_cgroup_disabled())
> +               return;
> +       __mem_cgroup_uncharge(page);
> +}
> +
> +void __mem_cgroup_uncharge_list(struct list_head *page_list);
> +static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
> +{
> +       if (mem_cgroup_disabled())
> +               return;
> +       __mem_cgroup_uncharge_list(page_list);
> +}
>
>  void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
>
> --- a/mm/memcontrol.c~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
> +++ a/mm/memcontrol.c
> @@ -6701,8 +6701,7 @@ void mem_cgroup_calculate_protection(str
>                         atomic_long_read(&parent->memory.children_low_usage)));
>  }
>
> -static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
> -                              gfp_t gfp)
> +static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
>  {
>         unsigned int nr_pages = thp_nr_pages(page);
>         int ret;
> @@ -6723,7 +6722,7 @@ out:
>  }
>
>  /**
> - * mem_cgroup_charge - charge a newly allocated page to a cgroup
> + * __mem_cgroup_charge - charge a newly allocated page to a cgroup
>   * @page: page to charge
>   * @mm: mm context of the victim
>   * @gfp_mask: reclaim mode
> @@ -6736,16 +6735,14 @@ out:
>   *
>   * Returns 0 on success. Otherwise, an error code is returned.
>   */
> -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
> +int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
> +                       gfp_t gfp_mask)
>  {
>         struct mem_cgroup *memcg;
>         int ret;
>
> -       if (mem_cgroup_disabled())
> -               return 0;
> -
>         memcg = get_mem_cgroup_from_mm(mm);
> -       ret = __mem_cgroup_charge(page, memcg, gfp_mask);
> +       ret = charge_memcg(page, memcg, gfp_mask);
>         css_put(&memcg->css);
>
>         return ret;
> @@ -6780,7 +6777,7 @@ int mem_cgroup_swapin_charge_page(struct
>                 memcg = get_mem_cgroup_from_mm(mm);
>         rcu_read_unlock();
>
> -       ret = __mem_cgroup_charge(page, memcg, gfp);
> +       ret = charge_memcg(page, memcg, gfp);
>
>         css_put(&memcg->css);
>         return ret;
> @@ -6916,18 +6913,15 @@ static void uncharge_page(struct page *p
>  }
>
>  /**
> - * mem_cgroup_uncharge - uncharge a page
> + * __mem_cgroup_uncharge - uncharge a page
>   * @page: page to uncharge
>   *
>   * Uncharge a page previously charged with mem_cgroup_charge().
>   */
> -void mem_cgroup_uncharge(struct page *page)
> +void __mem_cgroup_uncharge(struct page *page)
>  {
>         struct uncharge_gather ug;
>
> -       if (mem_cgroup_disabled())
> -               return;
> -
>         /* Don't touch page->lru of any random page, pre-check: */
>         if (!page_memcg(page))
>                 return;
> @@ -6938,20 +6932,17 @@ void mem_cgroup_uncharge(struct page *pa
>  }
>
>  /**
> - * mem_cgroup_uncharge_list - uncharge a list of page
> + * __mem_cgroup_uncharge_list - uncharge a list of page
>   * @page_list: list of pages to uncharge
>   *
>   * Uncharge a list of pages previously charged with
>   * mem_cgroup_charge().
>   */
> -void mem_cgroup_uncharge_list(struct list_head *page_list)
> +void __mem_cgroup_uncharge_list(struct list_head *page_list)
>  {
>         struct uncharge_gather ug;
>         struct page *page;
>
> -       if (mem_cgroup_disabled())
> -               return;
> -
>         uncharge_gather_clear(&ug);
>         list_for_each_entry(page, page_list, lru)
>                 uncharge_page(page, &ug);
> _
>
> Patches currently in -mm which might be from surenb@google.com are
>
> mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
  2021-07-09 23:35 ` Suren Baghdasaryan
@ 2021-07-10  0:01   ` Andrew Morton
  2021-07-10  0:24     ` Suren Baghdasaryan
  0 siblings, 1 reply; 6+ messages in thread
From: Andrew Morton @ 2021-07-10  0:01 UTC (permalink / raw)
  To: Suren Baghdasaryan
  Cc: mm-commits, vdavydov.dev, Shakeel Butt, Michal Hocko, Johannes Weiner

On Fri, 9 Jul 2021 16:35:18 -0700 Suren Baghdasaryan <surenb@google.com> wrote:

> On Fri, Jul 9, 2021 at 3:22 PM <akpm@linux-foundation.org> wrote:
> >
> >
> > The patch titled
> >      Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
> > has been added to the -mm tree.  Its filename is
> >      mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> >
> > This patch should soon appear at
> >     https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > and later at
> >     https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> 
> Hi Andrew,
> Please note that this patch is the second in the series of 3 here:

This patch had "[PATCH v2 1/1]" in the title.  Poor me.

> https://lore.kernel.org/patchwork/project/lkml/list/?series=507567 and
> the only one that needed a v2 (others seem to be fine and are Ack'ed).
> If you would like me to respin the whole series please let me know.

Yes please.



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
  2021-07-10  0:01   ` Andrew Morton
@ 2021-07-10  0:24     ` Suren Baghdasaryan
  2021-07-10  0:38       ` Suren Baghdasaryan
  0 siblings, 1 reply; 6+ messages in thread
From: Suren Baghdasaryan @ 2021-07-10  0:24 UTC (permalink / raw)
  To: Andrew Morton
  Cc: mm-commits, vdavydov.dev, Shakeel Butt, Michal Hocko, Johannes Weiner

On Fri, Jul 9, 2021 at 5:01 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Fri, 9 Jul 2021 16:35:18 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
>
> > On Fri, Jul 9, 2021 at 3:22 PM <akpm@linux-foundation.org> wrote:
> > >
> > >
> > > The patch titled
> > >      Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
> > > has been added to the -mm tree.  Its filename is
> > >      mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > >
> > > This patch should soon appear at
> > >     https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > > and later at
> > >     https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> >
> > Hi Andrew,
> > Please note that this patch is the second in the series of 3 here:
>
> This patch had "[PATCH v2 1/1]" in the title.  Poor me.

Oops! My bad, sorry.

>
> > https://lore.kernel.org/patchwork/project/lkml/list/?series=507567 and
> > the only one that needed a v2 (others seem to be fine and are Ack'ed).
> > If you would like me to respin the whole series please let me know.
>
> Yes please.

Coming right up!

>
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
  2021-07-10  0:24     ` Suren Baghdasaryan
@ 2021-07-10  0:38       ` Suren Baghdasaryan
  0 siblings, 0 replies; 6+ messages in thread
From: Suren Baghdasaryan @ 2021-07-10  0:38 UTC (permalink / raw)
  To: Andrew Morton
  Cc: mm-commits, vdavydov.dev, Shakeel Butt, Michal Hocko, Johannes Weiner

On Fri, Jul 9, 2021 at 5:24 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Fri, Jul 9, 2021 at 5:01 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> > On Fri, 9 Jul 2021 16:35:18 -0700 Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > > On Fri, Jul 9, 2021 at 3:22 PM <akpm@linux-foundation.org> wrote:
> > > >
> > > >
> > > > The patch titled
> > > >      Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
> > > > has been added to the -mm tree.  Its filename is
> > > >      mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > > >
> > > > This patch should soon appear at
> > > >     https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > > > and later at
> > > >     https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
> > >
> > > Hi Andrew,
> > > Please note that this patch is the second in the series of 3 here:
> >
> > This patch had "[PATCH v2 1/1]" in the title.  Poor me.
>
> Oops! My bad, sorry.
>
> >
> > > https://lore.kernel.org/patchwork/project/lkml/list/?series=507567 and
> > > the only one that needed a v2 (others seem to be fine and are Ack'ed).
> > > If you would like me to respin the whole series please let me know.
> >
> > Yes please.
>
> Coming right up!

Posted v3 of the whole series here:
https://lore.kernel.org/patchwork/project/lkml/list/?series=507708
Sorry about the confusion.

>
> >
> >

^ permalink raw reply	[flat|nested] 6+ messages in thread

* + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree
@ 2021-07-14  1:36 akpm
  0 siblings, 0 replies; 6+ messages in thread
From: akpm @ 2021-07-14  1:36 UTC (permalink / raw)
  To: alexs, apopple, axboe, david, guro, hannes, iamjoonsoo.kim,
	linmiaohe, mhocko, minchan, mm-commits, richard.weiyang,
	shakeelb, shy828301, songmuchun, surenb, tj, willy


The patch titled
     Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config
has been added to the -mm tree.  Its filename is
     mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch

This patch should soon appear at
    https://ozlabs.org/~akpm/mmots/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
and later at
    https://ozlabs.org/~akpm/mmotm/broken-out/mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Suren Baghdasaryan <surenb@google.com>
Subject: mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config

Inline mem_cgroup_{charge/uncharge} and mem_cgroup_uncharge_list functions
functions to perform mem_cgroup_disabled static key check inline before
calling the main body of the function.  This minimizes the memcg overhead
in the pagefault and exit_mmap paths when memcgs are disabled using
cgroup_disable=memory command-line option.

This change results in ~0.4% overhead reduction when running PFT test [1]
comparing {CONFIG_MEMCG=n} against {CONFIG_MEMCG=y, cgroup_disable=memory}
configuration on an 8-core ARM64 Android device.

[1] https://lkml.org/lkml/2006/8/29/294 also used in mmtests suite

Link: https://lkml.kernel.org/r/20210713010934.299876-2-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alex Shi <alexs@kernel.org>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/memcontrol.h |   28 +++++++++++++++++++++++++---
 mm/memcontrol.c            |   33 ++++++++++++---------------------
 2 files changed, 37 insertions(+), 24 deletions(-)

--- a/include/linux/memcontrol.h~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
+++ a/include/linux/memcontrol.h
@@ -693,13 +693,35 @@ static inline bool mem_cgroup_below_min(
 		page_counter_read(&memcg->memory);
 }
 
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
+int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+			gfp_t gfp_mask);
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+				    gfp_t gfp_mask)
+{
+	if (mem_cgroup_disabled())
+		return 0;
+	return __mem_cgroup_charge(page, mm, gfp_mask);
+}
+
 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
 				  gfp_t gfp, swp_entry_t entry);
 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
 
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct page *page);
+static inline void mem_cgroup_uncharge(struct page *page)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge(page);
+}
+
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+{
+	if (mem_cgroup_disabled())
+		return;
+	__mem_cgroup_uncharge_list(page_list);
+}
 
 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
 
--- a/mm/memcontrol.c~mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config
+++ a/mm/memcontrol.c
@@ -6701,8 +6701,7 @@ void mem_cgroup_calculate_protection(str
 			atomic_long_read(&parent->memory.children_low_usage)));
 }
 
-static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
-			       gfp_t gfp)
+static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp)
 {
 	unsigned int nr_pages = thp_nr_pages(page);
 	int ret;
@@ -6723,7 +6722,7 @@ out:
 }
 
 /**
- * mem_cgroup_charge - charge a newly allocated page to a cgroup
+ * __mem_cgroup_charge - charge a newly allocated page to a cgroup
  * @page: page to charge
  * @mm: mm context of the victim
  * @gfp_mask: reclaim mode
@@ -6736,16 +6735,14 @@ out:
  *
  * Returns 0 on success. Otherwise, an error code is returned.
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+			gfp_t gfp_mask)
 {
 	struct mem_cgroup *memcg;
 	int ret;
 
-	if (mem_cgroup_disabled())
-		return 0;
-
 	memcg = get_mem_cgroup_from_mm(mm);
-	ret = __mem_cgroup_charge(page, memcg, gfp_mask);
+	ret = charge_memcg(page, memcg, gfp_mask);
 	css_put(&memcg->css);
 
 	return ret;
@@ -6780,7 +6777,7 @@ int mem_cgroup_swapin_charge_page(struct
 		memcg = get_mem_cgroup_from_mm(mm);
 	rcu_read_unlock();
 
-	ret = __mem_cgroup_charge(page, memcg, gfp);
+	ret = charge_memcg(page, memcg, gfp);
 
 	css_put(&memcg->css);
 	return ret;
@@ -6916,18 +6913,15 @@ static void uncharge_page(struct page *p
 }
 
 /**
- * mem_cgroup_uncharge - uncharge a page
+ * __mem_cgroup_uncharge - uncharge a page
  * @page: page to uncharge
  *
- * Uncharge a page previously charged with mem_cgroup_charge().
+ * Uncharge a page previously charged with __mem_cgroup_charge().
  */
-void mem_cgroup_uncharge(struct page *page)
+void __mem_cgroup_uncharge(struct page *page)
 {
 	struct uncharge_gather ug;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	/* Don't touch page->lru of any random page, pre-check: */
 	if (!page_memcg(page))
 		return;
@@ -6938,20 +6932,17 @@ void mem_cgroup_uncharge(struct page *pa
 }
 
 /**
- * mem_cgroup_uncharge_list - uncharge a list of page
+ * __mem_cgroup_uncharge_list - uncharge a list of page
  * @page_list: list of pages to uncharge
  *
  * Uncharge a list of pages previously charged with
- * mem_cgroup_charge().
+ * __mem_cgroup_charge().
  */
-void mem_cgroup_uncharge_list(struct list_head *page_list)
+void __mem_cgroup_uncharge_list(struct list_head *page_list)
 {
 	struct uncharge_gather ug;
 	struct page *page;
 
-	if (mem_cgroup_disabled())
-		return;
-
 	uncharge_gather_clear(&ug);
 	list_for_each_entry(page, page_list, lru)
 		uncharge_page(page, &ug);
_

Patches currently in -mm which might be from surenb@google.com are

mm-memcg-add-mem_cgroup_disabled-checks-in-vmpressure-and-swap-related-functions.patch
mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch
mm-memcg-inline-swap-related-functions-to-improve-disabled-memcg-config.patch


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-07-14  1:36 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-09 22:22 + mm-memcg-inline-mem_cgroup_charge-uncharge-to-improve-disabled-memcg-config.patch added to -mm tree akpm
2021-07-09 23:35 ` Suren Baghdasaryan
2021-07-10  0:01   ` Andrew Morton
2021-07-10  0:24     ` Suren Baghdasaryan
2021-07-10  0:38       ` Suren Baghdasaryan
2021-07-14  1:36 akpm

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.