From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, almasrymina@google.com,
gthelen@google.com, linux-mm@kvack.org, mike.kravetz@oracle.com,
mm-commits@vger.kernel.org, rientjes@google.com,
sandipan@linux.ibm.com, shakeelb@google.com, shuah@kernel.org,
torvalds@linux-foundation.org
Subject: [patch 142/155] hugetlb_cgroup: add interface for charge/uncharge hugetlb reservations
Date: Wed, 01 Apr 2020 21:11:15 -0700 [thread overview]
Message-ID: <20200402041115.Pg_TV1gbu%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200401210155.09e3b9742e1c6e732f5a7250@linux-foundation.org>
From: Mina Almasry <almasrymina@google.com>
Subject: hugetlb_cgroup: add interface for charge/uncharge hugetlb reservations
Augments hugetlb_cgroup_charge_cgroup to be able to charge hugetlb usage
or hugetlb reservation counter.
Adds a new interface to uncharge a hugetlb_cgroup counter via
hugetlb_cgroup_uncharge_counter.
Integrates the counter with hugetlb_cgroup, via hugetlb_cgroup_init,
hugetlb_cgroup_have_usage, and hugetlb_cgroup_css_offline.
Link: http://lkml.kernel.org/r/20200211213128.73302-2-almasrymina@google.com
Signed-off-by: Mina Almasry <almasrymina@google.com>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Sandipan Das <sandipan@linux.ibm.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/hugetlb_cgroup.h | 123 ++++++++++++++++++---
mm/hugetlb.c | 2
mm/hugetlb_cgroup.c | 174 +++++++++++++++++++++++++------
3 files changed, 251 insertions(+), 48 deletions(-)
--- a/include/linux/hugetlb_cgroup.h~hugetlb_cgroup-add-interface-for-charge-uncharge-hugetlb-reservations
+++ a/include/linux/hugetlb_cgroup.h
@@ -20,32 +20,64 @@
struct hugetlb_cgroup;
/*
* Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
+ * At least 4 pages are necessary for all the tracking information.
+ * The second tail page (hpage[2]) is the fault usage cgroup.
+ * The third tail page (hpage[3]) is the reservation usage cgroup.
*/
#define HUGETLB_CGROUP_MIN_ORDER 2
#ifdef CONFIG_CGROUP_HUGETLB
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline struct hugetlb_cgroup *
+__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
- return (struct hugetlb_cgroup *)page[2].private;
+ if (rsvd)
+ return (struct hugetlb_cgroup *)page[3].private;
+ else
+ return (struct hugetlb_cgroup *)page[2].private;
+}
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, false);
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return __hugetlb_cgroup_from_page(page, true);
+}
+
+static inline int __set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
- page[2].private = (unsigned long)h_cg;
+ if (rsvd)
+ page[3].private = (unsigned long)h_cg;
+ else
+ page[2].private = (unsigned long)h_cg;
return 0;
}
+static inline int set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return __set_hugetlb_cgroup(page, h_cg, false);
+}
+
+static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return __set_hugetlb_cgroup(page, h_cg, true);
+}
+
static inline bool hugetlb_cgroup_disabled(void)
{
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
@@ -53,13 +85,27 @@ static inline bool hugetlb_cgroup_disabl
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
+extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page);
+extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
+extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+ struct page *page);
+
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_uncharge_counter(struct page_counter *p,
+ unsigned long nr_pages,
+ struct cgroup_subsys_state *css);
+
extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
struct page *newhpage);
@@ -70,8 +116,26 @@ static inline struct hugetlb_cgroup *hug
return NULL;
}
-static inline
-int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_resv(struct page *page)
+{
+ return NULL;
+}
+
+static inline struct hugetlb_cgroup *
+hugetlb_cgroup_from_page_rsvd(struct page *page)
+{
+ return NULL;
+}
+
+static inline int set_hugetlb_cgroup(struct page *page,
+ struct hugetlb_cgroup *h_cg)
+{
+ return 0;
+}
+
+static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+ struct hugetlb_cgroup *h_cg)
{
return 0;
}
@@ -81,28 +145,51 @@ static inline bool hugetlb_cgroup_disabl
return true;
}
-static inline int
-hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
{
return 0;
}
-static inline void
-hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
{
}
static inline void
-hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+}
+
+static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+ unsigned long nr_pages,
+ struct page *page)
+{
+}
+static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+ unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
static inline void
-hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
{
}
--- a/mm/hugetlb.c~hugetlb_cgroup-add-interface-for-charge-uncharge-hugetlb-reservations
+++ a/mm/hugetlb.c
@@ -1072,6 +1072,7 @@ static void update_and_free_page(struct
1 << PG_writeback);
}
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
+ VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
set_page_refcounted(page);
if (hstate_is_gigantic(h)) {
@@ -1257,6 +1258,7 @@ static void prep_new_huge_page(struct hs
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, NULL);
+ set_hugetlb_cgroup_rsvd(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
--- a/mm/hugetlb_cgroup.c~hugetlb_cgroup-add-interface-for-charge-uncharge-hugetlb-reservations
+++ a/mm/hugetlb_cgroup.c
@@ -61,14 +61,26 @@ struct hugetlb_cgroup {
static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static inline struct page_counter *
-hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
- bool rsvd)
+__hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
+ bool rsvd)
{
if (rsvd)
return &h_cg->rsvd_hugepage[idx];
return &h_cg->hugepage[idx];
}
+static inline struct page_counter *
+hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
+{
+ return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
+}
+
+static inline struct page_counter *
+hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
+{
+ return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
+}
+
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{
@@ -97,8 +109,12 @@ static inline bool hugetlb_cgroup_have_u
int idx;
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
- if (page_counter_read(&h_cg->hugepage[idx]))
+ if (page_counter_read(
+ hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) ||
+ page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
+ h_cg, idx))) {
return true;
+ }
}
return false;
}
@@ -109,18 +125,34 @@ static void hugetlb_cgroup_init(struct h
int idx;
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
- struct page_counter *counter = &h_cgroup->hugepage[idx];
- struct page_counter *parent = NULL;
+ struct page_counter *fault_parent = NULL;
+ struct page_counter *rsvd_parent = NULL;
unsigned long limit;
int ret;
- if (parent_h_cgroup)
- parent = &parent_h_cgroup->hugepage[idx];
- page_counter_init(counter, parent);
+ if (parent_h_cgroup) {
+ fault_parent = hugetlb_cgroup_counter_from_cgroup(
+ parent_h_cgroup, idx);
+ rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
+ parent_h_cgroup, idx);
+ }
+ page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
+ idx),
+ fault_parent);
+ page_counter_init(
+ hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
+ rsvd_parent);
limit = round_down(PAGE_COUNTER_MAX,
1 << huge_page_order(&hstates[idx]));
- ret = page_counter_set_max(counter, limit);
+
+ ret = page_counter_set_max(
+ hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
+ limit);
+ VM_BUG_ON(ret);
+ ret = page_counter_set_max(
+ hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
+ limit);
VM_BUG_ON(ret);
}
}
@@ -150,7 +182,6 @@ static void hugetlb_cgroup_css_free(stru
kfree(h_cgroup);
}
-
/*
* Should be called with hugetlb_lock held.
* Since we are holding hugetlb_lock, pages cannot get moved from
@@ -227,8 +258,9 @@ static inline void hugetlb_event(struct
!hugetlb_cgroup_is_root(hugetlb));
}
-int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup **ptr)
+static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr,
+ bool rsvd)
{
int ret = 0;
struct page_counter *counter;
@@ -251,50 +283,103 @@ again:
}
rcu_read_unlock();
- if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages,
- &counter)) {
+ if (!page_counter_try_charge(
+ __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
+ nr_pages, &counter)) {
ret = -ENOMEM;
hugetlb_event(h_cg, idx, HUGETLB_MAX);
+ css_put(&h_cg->css);
+ goto done;
}
- css_put(&h_cg->css);
+ /* Reservations take a reference to the css because they do not get
+ * reparented.
+ */
+ if (!rsvd)
+ css_put(&h_cg->css);
done:
*ptr = h_cg;
return ret;
}
+int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
+}
+
+int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
+}
+
/* Should be called with hugetlb_lock held */
-void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg,
- struct page *page)
+static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page, bool rsvd)
{
if (hugetlb_cgroup_disabled() || !h_cg)
return;
- set_hugetlb_cgroup(page, h_cg);
+ __set_hugetlb_cgroup(page, h_cg, rsvd);
return;
}
+void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
+}
+
+void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
+}
+
/*
* Should be called with hugetlb_lock held
*/
-void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page)
+static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page, bool rsvd)
{
struct hugetlb_cgroup *h_cg;
if (hugetlb_cgroup_disabled())
return;
lockdep_assert_held(&hugetlb_lock);
- h_cg = hugetlb_cgroup_from_page(page);
+ h_cg = __hugetlb_cgroup_from_page(page, rsvd);
if (unlikely(!h_cg))
return;
- set_hugetlb_cgroup(page, NULL);
- page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
+ __set_hugetlb_cgroup(page, NULL, rsvd);
+
+ page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
+ rsvd),
+ nr_pages);
+
+ if (rsvd)
+ css_put(&h_cg->css);
+
return;
}
-void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
- struct hugetlb_cgroup *h_cg)
+void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+ __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
+}
+
+void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+ __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
+}
+
+static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ bool rsvd)
{
if (hugetlb_cgroup_disabled() || !h_cg)
return;
@@ -302,8 +387,35 @@ void hugetlb_cgroup_uncharge_cgroup(int
if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
return;
- page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
- return;
+ page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
+ rsvd),
+ nr_pages);
+
+ if (rsvd)
+ css_put(&h_cg->css);
+}
+
+void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
+}
+
+void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
+}
+
+void hugetlb_cgroup_uncharge_counter(struct page_counter *p,
+ unsigned long nr_pages,
+ struct cgroup_subsys_state *css)
+{
+ if (hugetlb_cgroup_disabled() || !p || !css)
+ return;
+
+ page_counter_uncharge(p, nr_pages);
+ css_put(css);
}
enum {
@@ -418,7 +530,7 @@ static ssize_t hugetlb_cgroup_write(stru
case RES_LIMIT:
mutex_lock(&hugetlb_limit_mutex);
ret = page_counter_set_max(
- hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
+ __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
nr_pages);
mutex_unlock(&hugetlb_limit_mutex);
break;
@@ -674,6 +786,7 @@ void __init hugetlb_cgroup_file_init(voi
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
struct hugetlb_cgroup *h_cg;
+ struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage);
if (hugetlb_cgroup_disabled())
@@ -682,10 +795,11 @@ void hugetlb_cgroup_migrate(struct page
VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
spin_lock(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_page(oldhpage);
+ h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
set_hugetlb_cgroup(oldhpage, NULL);
/* move the h_cg details to new cgroup */
- set_hugetlb_cgroup(newhpage, h_cg);
+ set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
return;
_
next prev parent reply other threads:[~2020-04-02 4:11 UTC|newest]
Thread overview: 163+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-02 4:01 incoming Andrew Morton
2020-04-02 4:02 ` [patch 001/155] tools/accounting/getdelays.c: fix netlink attribute length Andrew Morton
2020-04-02 4:02 ` [patch 002/155] kthread: mark timer used by delayed kthread works as IRQ safe Andrew Morton
2020-04-02 4:03 ` [patch 003/155] asm-generic: make more kernel-space headers mandatory Andrew Morton
2020-04-02 4:03 ` [patch 004/155] scripts/spelling.txt: add syfs/sysfs pattern Andrew Morton
2020-04-02 4:03 ` [patch 005/155] scripts/spelling.txt: add more spellings to spelling.txt Andrew Morton
2020-04-02 4:03 ` [patch 006/155] ocfs2: remove FS_OCFS2_NM Andrew Morton
2020-04-02 4:03 ` [patch 007/155] ocfs2: remove unused macros Andrew Morton
2020-04-02 4:03 ` [patch 008/155] ocfs2: use OCFS2_SEC_BITS in macro Andrew Morton
2020-04-02 4:03 ` [patch 009/155] ocfs2: remove dlm_lock_is_remote Andrew Morton
2020-04-02 4:03 ` [patch 010/155] ocfs2: there is no need to log twice in several functions Andrew Morton
2020-04-02 4:03 ` [patch 011/155] ocfs2: correct annotation from "l_next_rec" to "l_next_free_rec" Andrew Morton
2020-04-02 4:03 ` [patch 012/155] ocfs2: remove useless err Andrew Morton
2020-04-02 4:03 ` [patch 013/155] ocfs2: add missing annotations for ocfs2_refcount_cache_lock() and ocfs2_refcount_cache_unlock() Andrew Morton
2020-04-02 4:03 ` [patch 014/155] ocfs2: replace zero-length array with flexible-array member Andrew Morton
2020-04-02 4:03 ` [patch 015/155] ocfs2: cluster: " Andrew Morton
2020-04-02 4:03 ` [patch 016/155] ocfs2: dlm: " Andrew Morton
2020-04-02 4:03 ` [patch 017/155] ocfs2: ocfs2_fs.h: " Andrew Morton
2020-04-02 4:04 ` [patch 018/155] ocfs2: roll back the reference count modification of the parent directory if an error occurs Andrew Morton
2020-04-02 4:04 ` [patch 019/155] ocfs2: use scnprintf() for avoiding potential buffer overflow Andrew Morton
2020-04-02 4:04 ` [patch 020/155] ocfs2: use memalloc_nofs_save instead of memalloc_noio_save Andrew Morton
2020-04-02 4:04 ` [patch 021/155] fs_parse: remove pr_notice() about each validation Andrew Morton
2020-04-02 4:04 ` [patch 022/155] mm/slub.c: replace cpu_slab->partial with wrapped APIs Andrew Morton
2020-04-02 4:04 ` [patch 023/155] mm/slub.c: replace kmem_cache->cpu_partial " Andrew Morton
2020-04-02 4:04 ` [patch 024/155] slub: improve bit diffusion for freelist ptr obfuscation Andrew Morton
2020-04-02 4:04 ` [patch 025/155] slub: relocate freelist pointer to middle of object Andrew Morton
2020-04-15 16:47 ` Marco Elver
2020-04-15 17:07 ` Kees Cook
2020-04-15 18:00 ` Kees Cook
2020-04-02 4:04 ` [patch 026/155] revert "topology: add support for node_to_mem_node() to determine the fallback node" Andrew Morton
2020-04-02 4:04 ` [patch 027/155] mm/kmemleak.c: use address-of operator on section symbols Andrew Morton
2020-04-02 4:04 ` [patch 028/155] mm/Makefile: disable KCSAN for kmemleak Andrew Morton
2020-04-02 4:04 ` [patch 029/155] mm/filemap.c: don't bother dropping mmap_sem for zero size readahead Andrew Morton
2020-04-02 4:04 ` [patch 030/155] mm/page-writeback.c: write_cache_pages(): deduplicate identical checks Andrew Morton
2020-04-02 4:04 ` [patch 031/155] mm/filemap.c: clear page error before actual read Andrew Morton
2020-04-02 4:04 ` [patch 032/155] mm/filemap.c: remove unused argument from shrink_readahead_size_eio() Andrew Morton
2020-04-02 4:04 ` [patch 033/155] mm/filemap.c: use vm_fault error code directly Andrew Morton
2020-04-02 4:04 ` [patch 034/155] include/linux/pagemap.h: rename arguments to find_subpage Andrew Morton
2020-04-02 4:05 ` [patch 035/155] mm/page-writeback.c: use VM_BUG_ON_PAGE in clear_page_dirty_for_io Andrew Morton
2020-04-02 4:05 ` [patch 036/155] mm/filemap.c: unexport find_get_entry Andrew Morton
2020-04-02 4:05 ` [patch 037/155] mm/filemap.c: rewrite pagecache_get_page documentation Andrew Morton
2020-04-02 4:05 ` [patch 038/155] mm/gup: split get_user_pages_remote() into two routines Andrew Morton
2020-04-02 4:05 ` [patch 039/155] mm/gup: pass a flags arg to __gup_device_* functions Andrew Morton
2020-04-02 4:05 ` [patch 040/155] mm: introduce page_ref_sub_return() Andrew Morton
2020-04-02 4:05 ` [patch 041/155] mm/gup: pass gup flags to two more routines Andrew Morton
2020-04-02 4:05 ` [patch 042/155] mm/gup: require FOLL_GET for get_user_pages_fast() Andrew Morton
2020-04-02 4:05 ` [patch 043/155] mm/gup: track FOLL_PIN pages Andrew Morton
2020-04-09 6:08 ` Tetsuo Handa
2020-04-09 6:38 ` John Hubbard
2020-04-09 7:20 ` Tetsuo Handa
2020-04-09 7:46 ` John Hubbard
2020-04-02 4:05 ` [patch 044/155] mm/gup: page->hpage_pinned_refcount: exact pin counts for huge pages Andrew Morton
2020-04-02 4:05 ` [patch 045/155] mm/gup: /proc/vmstat: pin_user_pages (FOLL_PIN) reporting Andrew Morton
2020-04-02 4:05 ` [patch 046/155] mm/gup_benchmark: support pin_user_pages() and related calls Andrew Morton
2020-04-02 4:05 ` [patch 047/155] selftests/vm: run_vmtests: invoke gup_benchmark with basic FOLL_PIN coverage Andrew Morton
2020-04-02 4:05 ` [patch 048/155] mm: improve dump_page() for compound pages Andrew Morton
2020-04-02 4:05 ` [patch 049/155] mm: dump_page(): additional diagnostics for huge pinned pages Andrew Morton
2020-04-02 4:05 ` [patch 050/155] mm/gup/writeback: add callbacks for inaccessible pages Andrew Morton
2020-04-02 4:06 ` [patch 051/155] mm/gup: rename nr as nr_pinned in get_user_pages_fast() Andrew Morton
2020-04-02 4:06 ` [patch 052/155] mm/gup: fix omission of check on FOLL_LONGTERM in gup fast path Andrew Morton
2020-04-02 4:06 ` [patch 053/155] mm/swapfile.c: fix comments for swapcache_prepare Andrew Morton
2020-04-02 4:06 ` [patch 054/155] mm/swap.c: not necessary to export __pagevec_lru_add() Andrew Morton
2020-04-02 4:06 ` [patch 055/155] mm/swapfile: fix data races in try_to_unuse() Andrew Morton
2020-04-02 4:06 ` [patch 056/155] mm/swap_slots.c: assign|reset cache slot by value directly Andrew Morton
2020-04-02 4:06 ` [patch 057/155] mm: swap: make page_evictable() inline Andrew Morton
2020-04-02 4:06 ` [patch 058/155] mm: swap: use smp_mb__after_atomic() to order LRU bit set Andrew Morton
2020-04-02 4:06 ` [patch 059/155] mm/swap_state.c: use the same way to count page in [add_to|delete_from]_swap_cache Andrew Morton
2020-04-02 4:06 ` [patch 060/155] mm, memcg: fix build error around the usage of kmem_caches Andrew Morton
2020-04-02 4:06 ` [patch 061/155] mm/memcontrol.c: allocate shrinker_map on appropriate NUMA node Andrew Morton
2020-04-02 4:06 ` [patch 062/155] mm: memcg/slab: use mem_cgroup_from_obj() Andrew Morton
2020-04-02 4:06 ` [patch 063/155] mm: kmem: cleanup (__)memcg_kmem_charge_memcg() arguments Andrew Morton
2020-04-02 4:06 ` [patch 064/155] mm: kmem: cleanup memcg_kmem_uncharge_memcg() arguments Andrew Morton
2020-04-02 4:06 ` [patch 065/155] mm: kmem: rename memcg_kmem_(un)charge() into memcg_kmem_(un)charge_page() Andrew Morton
2020-04-02 4:06 ` [patch 066/155] mm: kmem: switch to nr_pages in (__)memcg_kmem_charge_memcg() Andrew Morton
2020-04-02 4:06 ` [patch 067/155] mm: memcg/slab: cache page number in memcg_(un)charge_slab() Andrew Morton
2020-04-02 4:06 ` [patch 068/155] mm: kmem: rename (__)memcg_kmem_(un)charge_memcg() to __memcg_kmem_(un)charge() Andrew Morton
2020-04-02 4:07 ` [patch 069/155] mm: memcontrol: fix memory.low proportional distribution Andrew Morton
2020-04-02 4:07 ` [patch 070/155] mm: memcontrol: clean up and document effective low/min calculations Andrew Morton
2020-04-02 4:07 ` [patch 071/155] mm: memcontrol: recursive memory.low protection Andrew Morton
2020-04-02 4:07 ` [patch 072/155] memcg: css_tryget_online cleanups Andrew Morton
2020-04-02 4:07 ` [patch 073/155] mm/memcontrol.c: make mem_cgroup_id_get_many() __maybe_unused Andrew Morton
2020-04-02 4:07 ` [patch 074/155] mm, memcg: prevent memory.high load/store tearing Andrew Morton
2020-04-02 4:07 ` [patch 075/155] mm, memcg: prevent memory.max load tearing Andrew Morton
2020-04-02 4:07 ` [patch 076/155] mm, memcg: prevent memory.low load/store tearing Andrew Morton
2020-04-02 4:07 ` [patch 077/155] mm, memcg: prevent memory.min " Andrew Morton
2020-04-02 4:07 ` [patch 078/155] mm, memcg: prevent memory.swap.max load tearing Andrew Morton
2020-04-02 4:07 ` [patch 079/155] mm, memcg: prevent mem_cgroup_protected store tearing Andrew Morton
2020-04-02 4:07 ` [patch 080/155] mm: memcg: make memory.oom.group tolerable to task migration Andrew Morton
2020-04-02 4:07 ` [patch 081/155] mm/mapping_dirty_helpers: update huge page-table entry callbacks Andrew Morton
2020-04-02 4:07 ` [patch 082/155] mm/vma: move VM_NO_KHUGEPAGED into generic header Andrew Morton
2020-04-02 4:07 ` [patch 083/155] mm/vma: make vma_is_foreign() available for general use Andrew Morton
2020-04-02 4:07 ` [patch 084/155] mm/vma: make is_vma_temporary_stack() " Andrew Morton
2020-04-02 4:07 ` [patch 085/155] mm: add pagemap.h to the fine documentation Andrew Morton
2020-04-02 4:07 ` [patch 086/155] mm/gup: rename "nonblocking" to "locked" where proper Andrew Morton
2020-04-02 4:08 ` [patch 087/155] mm/gup: fix __get_user_pages() on fault retry of hugetlb Andrew Morton
2020-04-02 4:08 ` [patch 088/155] mm: introduce fault_signal_pending() Andrew Morton
2020-04-02 4:08 ` [patch 089/155] x86/mm: use helper fault_signal_pending() Andrew Morton
2020-04-02 4:08 ` [patch 090/155] arc/mm: " Andrew Morton
2020-04-02 4:08 ` [patch 091/155] arm64/mm: " Andrew Morton
2020-04-02 4:08 ` [patch 092/155] powerpc/mm: " Andrew Morton
2020-04-02 4:08 ` [patch 093/155] sh/mm: " Andrew Morton
2020-04-02 4:08 ` [patch 094/155] mm: return faster for non-fatal signals in user mode faults Andrew Morton
2020-04-02 4:08 ` [patch 095/155] userfaultfd: don't retake mmap_sem to emulate NOPAGE Andrew Morton
2020-04-02 4:08 ` [patch 096/155] mm: introduce FAULT_FLAG_DEFAULT Andrew Morton
2020-04-02 4:08 ` [patch 097/155] mm: introduce FAULT_FLAG_INTERRUPTIBLE Andrew Morton
2020-04-02 4:08 ` [patch 098/155] mm: allow VM_FAULT_RETRY for multiple times Andrew Morton
2020-04-02 4:08 ` [patch 099/155] mm/gup: " Andrew Morton
2020-04-02 4:08 ` [patch 100/155] mm/gup: allow to react to fatal signals Andrew Morton
2020-04-02 4:09 ` [patch 101/155] mm/userfaultfd: honor FAULT_FLAG_KILLABLE in fault path Andrew Morton
2020-04-02 4:09 ` [patch 102/155] mm: clarify a confusing comment for remap_pfn_range() Andrew Morton
2020-04-02 4:09 ` [patch 103/155] mm/memory.c: clarify a confusing comment for vm_iomap_memory Andrew Morton
2020-04-02 4:09 ` [patch 104/155] mmap: remove inline of vm_unmapped_area Andrew Morton
2020-04-02 4:09 ` [patch 105/155] mm: mmap: add trace point " Andrew Morton
2020-04-02 4:09 ` [patch 106/155] mm/mremap: add MREMAP_DONTUNMAP to mremap() Andrew Morton
2020-04-02 4:09 ` [patch 107/155] selftests: add MREMAP_DONTUNMAP selftest Andrew Morton
2020-04-02 4:09 ` [patch 108/155] mm/sparsemem: get address to page struct instead of address to pfn Andrew Morton
2020-04-02 4:09 ` [patch 109/155] mm/sparse: rename pfn_present() to pfn_in_present_section() Andrew Morton
2020-04-02 4:09 ` [patch 110/155] mm/sparse.c: use kvmalloc/kvfree to alloc/free memmap for the classic sparse Andrew Morton
2020-04-02 4:09 ` [patch 111/155] mm/sparse.c: allocate memmap preferring the given node Andrew Morton
2020-04-02 4:09 ` [patch 112/155] kasan: detect negative size in memory operation function Andrew Morton
2020-04-02 4:09 ` [patch 113/155] kasan: add test for invalid size in memmove Andrew Morton
2020-04-02 4:09 ` [patch 114/155] mm/page_alloc: increase default min_free_kbytes bound Andrew Morton
2020-04-02 4:09 ` [patch 115/155] mm, pagealloc: micro-optimisation: save two branches on hot page allocation path Andrew Morton
2020-04-02 4:09 ` [patch 116/155] mm/page_alloc.c: use free_area_empty() instead of open-coding Andrew Morton
2020-04-02 4:09 ` [patch 117/155] mm/page_alloc.c: micro-optimisation Remove unnecessary branch Andrew Morton
2020-04-02 4:09 ` [patch 118/155] mm/page_alloc: simplify page_is_buddy() for better code readability Andrew Morton
2020-04-02 4:09 ` [patch 119/155] mm: vmpressure: don't need call kfree if kstrndup fails Andrew Morton
2020-04-02 4:10 ` [patch 120/155] mm: vmpressure: use mem_cgroup_is_root API Andrew Morton
2020-04-02 4:10 ` [patch 121/155] mm: vmscan: replace open codings to NUMA_NO_NODE Andrew Morton
2020-04-02 4:10 ` [patch 122/155] mm/vmscan.c: remove cpu online notification for now Andrew Morton
2020-04-02 4:10 ` [patch 123/155] mm/vmscan.c: fix data races using kswapd_classzone_idx Andrew Morton
2020-04-02 4:10 ` [patch 124/155] mm/vmscan.c: clean code by removing unnecessary assignment Andrew Morton
2020-04-02 4:10 ` [patch 125/155] mm/vmscan.c: make may_enter_fs bool in shrink_page_list() Andrew Morton
2020-04-02 4:10 ` [patch 126/155] mm/vmscan.c: do_try_to_free_pages(): clean code by removing unnecessary assignment Andrew Morton
2020-04-02 4:10 ` [patch 127/155] selftests: vm: drop dependencies on page flags from mlock2 tests Andrew Morton
2020-04-02 4:10 ` [patch 128/155] mm,compaction,cma: add alloc_contig flag to compact_control Andrew Morton
2020-04-02 4:10 ` [patch 129/155] mm,thp,compaction,cma: allow THP migration for CMA allocations Andrew Morton
2020-04-02 4:10 ` [patch 130/155] mm, compaction: fully assume capture is not NULL in compact_zone_order() Andrew Morton
2020-04-02 4:10 ` [patch 131/155] mm/compaction: really limit compact_unevictable_allowed to 0 and 1 Andrew Morton
2020-04-02 4:10 ` [patch 132/155] mm/compaction: Disable compact_unevictable_allowed on RT Andrew Morton
2020-04-02 4:10 ` [patch 133/155] mm/compaction.c: clean code by removing unnecessary assignment Andrew Morton
2020-04-02 4:10 ` [patch 134/155] mm/mempolicy: support MPOL_MF_STRICT for huge page mapping Andrew Morton
2020-04-02 4:10 ` [patch 135/155] mm/mempolicy: check hugepage migration is supported by arch in vma_migratable() Andrew Morton
2020-04-02 4:10 ` [patch 136/155] mm: mempolicy: use VM_BUG_ON_VMA in queue_pages_test_walk() Andrew Morton
2020-04-02 4:10 ` [patch 137/155] mm: mempolicy: require at least one nodeid for MPOL_PREFERRED Andrew Morton
2020-04-02 4:11 ` [patch 138/155] mm/memblock.c: remove redundant assignment to variable max_addr Andrew Morton
2020-04-02 4:11 ` [patch 139/155] hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization Andrew Morton
2020-04-02 4:11 ` [patch 140/155] hugetlbfs: Use i_mmap_rwsem to address page fault/truncate race Andrew Morton
2020-04-02 4:11 ` [patch 141/155] hugetlb_cgroup: add hugetlb_cgroup reservation counter Andrew Morton
2020-04-02 4:11 ` Andrew Morton [this message]
2020-04-02 4:11 ` [patch 143/155] mm/hugetlb_cgroup: fix hugetlb_cgroup migration Andrew Morton
2020-04-02 4:11 ` [patch 144/155] hugetlb_cgroup: add reservation accounting for private mappings Andrew Morton
2020-04-02 4:11 ` [patch 145/155] hugetlb: disable region_add file_region coalescing Andrew Morton
2020-04-02 4:11 ` [patch 146/155] hugetlb_cgroup: add accounting for shared mappings Andrew Morton
2020-04-02 4:11 ` [patch 147/155] hugetlb_cgroup: support noreserve mappings Andrew Morton
2020-04-02 4:11 ` [patch 148/155] hugetlb: support file_region coalescing again Andrew Morton
2020-04-02 4:11 ` [patch 149/155] hugetlb_cgroup: add hugetlb_cgroup reservation tests Andrew Morton
2020-04-02 4:11 ` [patch 150/155] hugetlb_cgroup: add hugetlb_cgroup reservation docs Andrew Morton
2020-04-02 4:11 ` [patch 151/155] mm/hugetlb.c: clean code by removing unnecessary initialization Andrew Morton
2020-04-02 4:11 ` [patch 152/155] mm/hugetlb: remove unnecessary memory fetch in PageHeadHuge() Andrew Morton
2020-04-02 4:11 ` [patch 153/155] selftests/vm: fix map_hugetlb length used for testing read and write Andrew Morton
2020-04-02 4:11 ` [patch 154/155] mm/hugetlb: fix build failure with HUGETLB_PAGE but not HUGEBTLBFS Andrew Morton
2020-04-02 4:11 ` [patch 155/155] include/linux/huge_mm.h: check PageTail in hpage_nr_pages even when !THP Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200402041115.Pg_TV1gbu%akpm@linux-foundation.org \
--to=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=gthelen@google.com \
--cc=linux-mm@kvack.org \
--cc=mike.kravetz@oracle.com \
--cc=mm-commits@vger.kernel.org \
--cc=rientjes@google.com \
--cc=sandipan@linux.ibm.com \
--cc=shakeelb@google.com \
--cc=shuah@kernel.org \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).