From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, david@fromorbit.com, guro@fb.com,
hannes@cmpxchg.org, ktkhai@virtuozzo.com, linux-mm@kvack.org,
mhocko@suse.com, mm-commits@vger.kernel.org, shakeelb@google.com,
shy828301@gmail.com, torvalds@linux-foundation.org,
vbabka@suse.cz
Subject: [patch 063/143] mm: vmscan: consolidate shrinker_maps handling code
Date: Tue, 04 May 2021 18:36:11 -0700 [thread overview]
Message-ID: <20210505013611.RFDjFmCFZ%akpm@linux-foundation.org> (raw)
In-Reply-To: <20210504183219.a3cc46aee4013d77402276c5@linux-foundation.org>
From: Yang Shi <shy828301@gmail.com>
Subject: mm: vmscan: consolidate shrinker_maps handling code
The shrinker map management is not purely memcg specific, it is at the
intersection between memory cgroup and shrinkers. It's allocation and
assignment of a structure, and the only memcg bit is the map is being
stored in a memcg structure. So move the shrinker_maps handling code into
vmscan.c for tighter integration with shrinker code, and remove the
"memcg_" prefix. There is no functional change.
Link: https://lkml.kernel.org/r/20210311190845.9708-3-shy828301@gmail.com
Signed-off-by: Yang Shi <shy828301@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Acked-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
include/linux/memcontrol.h | 11 +-
mm/huge_memory.c | 4 -
mm/list_lru.c | 6 -
mm/memcontrol.c | 130 ----------------------------------
mm/vmscan.c | 132 ++++++++++++++++++++++++++++++++++-
5 files changed, 142 insertions(+), 141 deletions(-)
--- a/include/linux/memcontrol.h~mm-vmscan-consolidate-shrinker_maps-handling-code
+++ a/include/linux/memcontrol.h
@@ -1610,10 +1610,9 @@ static inline bool mem_cgroup_under_sock
return false;
}
-extern int memcg_expand_shrinker_maps(int new_id);
-
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id);
+int alloc_shrinker_maps(struct mem_cgroup *memcg);
+void free_shrinker_maps(struct mem_cgroup *memcg);
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
#else
#define mem_cgroup_sockets_enabled 0
static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
@@ -1623,8 +1622,8 @@ static inline bool mem_cgroup_under_sock
return false;
}
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
- int nid, int shrinker_id)
+static inline void set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id)
{
}
#endif
--- a/mm/huge_memory.c~mm-vmscan-consolidate-shrinker_maps-handling-code
+++ a/mm/huge_memory.c
@@ -2830,8 +2830,8 @@ void deferred_split_huge_page(struct pag
ds_queue->split_queue_len++;
#ifdef CONFIG_MEMCG
if (memcg)
- memcg_set_shrinker_bit(memcg, page_to_nid(page),
- deferred_split_shrinker.id);
+ set_shrinker_bit(memcg, page_to_nid(page),
+ deferred_split_shrinker.id);
#endif
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
--- a/mm/list_lru.c~mm-vmscan-consolidate-shrinker_maps-handling-code
+++ a/mm/list_lru.c
@@ -125,8 +125,8 @@ bool list_lru_add(struct list_lru *lru,
list_add_tail(item, &l->list);
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
- memcg_set_shrinker_bit(memcg, nid,
- lru_shrinker_id(lru));
+ set_shrinker_bit(memcg, nid,
+ lru_shrinker_id(lru));
nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
@@ -540,7 +540,7 @@ static void memcg_drain_list_lru_node(st
if (src->nr_items) {
dst->nr_items += src->nr_items;
- memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
+ set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
src->nr_items = 0;
}
--- a/mm/memcontrol.c~mm-vmscan-consolidate-shrinker_maps-handling-code
+++ a/mm/memcontrol.c
@@ -400,130 +400,6 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabl
EXPORT_SYMBOL(memcg_kmem_enabled_key);
#endif
-static int memcg_shrinker_map_size;
-static DEFINE_MUTEX(memcg_shrinker_map_mutex);
-
-static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
-{
- kvfree(container_of(head, struct memcg_shrinker_map, rcu));
-}
-
-static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
- int size, int old_size)
-{
- struct memcg_shrinker_map *new, *old;
- struct mem_cgroup_per_node *pn;
- int nid;
-
- lockdep_assert_held(&memcg_shrinker_map_mutex);
-
- for_each_node(nid) {
- pn = memcg->nodeinfo[nid];
- old = rcu_dereference_protected(pn->shrinker_map, true);
- /* Not yet online memcg */
- if (!old)
- return 0;
-
- new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
- if (!new)
- return -ENOMEM;
-
- /* Set all old bits, clear all new bits */
- memset(new->map, (int)0xff, old_size);
- memset((void *)new->map + old_size, 0, size - old_size);
-
- rcu_assign_pointer(pn->shrinker_map, new);
- call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
- }
-
- return 0;
-}
-
-static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
-{
- struct mem_cgroup_per_node *pn;
- struct memcg_shrinker_map *map;
- int nid;
-
- if (mem_cgroup_is_root(memcg))
- return;
-
- for_each_node(nid) {
- pn = memcg->nodeinfo[nid];
- map = rcu_dereference_protected(pn->shrinker_map, true);
- kvfree(map);
- rcu_assign_pointer(pn->shrinker_map, NULL);
- }
-}
-
-static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
-{
- struct memcg_shrinker_map *map;
- int nid, size, ret = 0;
-
- if (mem_cgroup_is_root(memcg))
- return 0;
-
- mutex_lock(&memcg_shrinker_map_mutex);
- size = memcg_shrinker_map_size;
- for_each_node(nid) {
- map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
- if (!map) {
- memcg_free_shrinker_maps(memcg);
- ret = -ENOMEM;
- break;
- }
- rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
- }
- mutex_unlock(&memcg_shrinker_map_mutex);
-
- return ret;
-}
-
-int memcg_expand_shrinker_maps(int new_id)
-{
- int size, old_size, ret = 0;
- struct mem_cgroup *memcg;
-
- size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
- old_size = memcg_shrinker_map_size;
- if (size <= old_size)
- return 0;
-
- mutex_lock(&memcg_shrinker_map_mutex);
- if (!root_mem_cgroup)
- goto unlock;
-
- for_each_mem_cgroup(memcg) {
- if (mem_cgroup_is_root(memcg))
- continue;
- ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
- if (ret) {
- mem_cgroup_iter_break(NULL, memcg);
- goto unlock;
- }
- }
-unlock:
- if (!ret)
- memcg_shrinker_map_size = size;
- mutex_unlock(&memcg_shrinker_map_mutex);
- return ret;
-}
-
-void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
-{
- if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
- struct memcg_shrinker_map *map;
-
- rcu_read_lock();
- map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
- /* Pairs with smp mb in shrink_slab() */
- smp_mb__before_atomic();
- set_bit(shrinker_id, map->map);
- rcu_read_unlock();
- }
-}
-
/**
* mem_cgroup_css_from_page - css of the memcg associated with a page
* @page: page of interest
@@ -5242,11 +5118,11 @@ static int mem_cgroup_css_online(struct
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
/*
- * A memcg must be visible for memcg_expand_shrinker_maps()
+ * A memcg must be visible for expand_shrinker_maps()
* by the time the maps are allocated. So, we allocate maps
* here, when for_each_mem_cgroup() can't skip it.
*/
- if (memcg_alloc_shrinker_maps(memcg)) {
+ if (alloc_shrinker_maps(memcg)) {
mem_cgroup_id_remove(memcg);
return -ENOMEM;
}
@@ -5310,7 +5186,7 @@ static void mem_cgroup_css_free(struct c
vmpressure_cleanup(&memcg->vmpressure);
cancel_work_sync(&memcg->high_work);
mem_cgroup_remove_from_trees(memcg);
- memcg_free_shrinker_maps(memcg);
+ free_shrinker_maps(memcg);
memcg_free_kmem(memcg);
mem_cgroup_free(memcg);
}
--- a/mm/vmscan.c~mm-vmscan-consolidate-shrinker_maps-handling-code
+++ a/mm/vmscan.c
@@ -185,6 +185,132 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_MEMCG
+
+static int memcg_shrinker_map_size;
+static DEFINE_MUTEX(memcg_shrinker_map_mutex);
+
+static void free_shrinker_map_rcu(struct rcu_head *head)
+{
+ kvfree(container_of(head, struct memcg_shrinker_map, rcu));
+}
+
+static int expand_one_shrinker_map(struct mem_cgroup *memcg,
+ int size, int old_size)
+{
+ struct memcg_shrinker_map *new, *old;
+ struct mem_cgroup_per_node *pn;
+ int nid;
+
+ lockdep_assert_held(&memcg_shrinker_map_mutex);
+
+ for_each_node(nid) {
+ pn = memcg->nodeinfo[nid];
+ old = rcu_dereference_protected(pn->shrinker_map, true);
+ /* Not yet online memcg */
+ if (!old)
+ return 0;
+
+ new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
+ if (!new)
+ return -ENOMEM;
+
+ /* Set all old bits, clear all new bits */
+ memset(new->map, (int)0xff, old_size);
+ memset((void *)new->map + old_size, 0, size - old_size);
+
+ rcu_assign_pointer(pn->shrinker_map, new);
+ call_rcu(&old->rcu, free_shrinker_map_rcu);
+ }
+
+ return 0;
+}
+
+void free_shrinker_maps(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup_per_node *pn;
+ struct memcg_shrinker_map *map;
+ int nid;
+
+ if (mem_cgroup_is_root(memcg))
+ return;
+
+ for_each_node(nid) {
+ pn = memcg->nodeinfo[nid];
+ map = rcu_dereference_protected(pn->shrinker_map, true);
+ kvfree(map);
+ rcu_assign_pointer(pn->shrinker_map, NULL);
+ }
+}
+
+int alloc_shrinker_maps(struct mem_cgroup *memcg)
+{
+ struct memcg_shrinker_map *map;
+ int nid, size, ret = 0;
+
+ if (mem_cgroup_is_root(memcg))
+ return 0;
+
+ mutex_lock(&memcg_shrinker_map_mutex);
+ size = memcg_shrinker_map_size;
+ for_each_node(nid) {
+ map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
+ if (!map) {
+ free_shrinker_maps(memcg);
+ ret = -ENOMEM;
+ break;
+ }
+ rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
+ }
+ mutex_unlock(&memcg_shrinker_map_mutex);
+
+ return ret;
+}
+
+static int expand_shrinker_maps(int new_id)
+{
+ int size, old_size, ret = 0;
+ struct mem_cgroup *memcg;
+
+ size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
+ old_size = memcg_shrinker_map_size;
+ if (size <= old_size)
+ return 0;
+
+ mutex_lock(&memcg_shrinker_map_mutex);
+ if (!root_mem_cgroup)
+ goto unlock;
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ if (mem_cgroup_is_root(memcg))
+ continue;
+ ret = expand_one_shrinker_map(memcg, size, old_size);
+ if (ret) {
+ mem_cgroup_iter_break(NULL, memcg);
+ goto unlock;
+ }
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+unlock:
+ if (!ret)
+ memcg_shrinker_map_size = size;
+ mutex_unlock(&memcg_shrinker_map_mutex);
+ return ret;
+}
+
+void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
+{
+ if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
+ struct memcg_shrinker_map *map;
+
+ rcu_read_lock();
+ map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
+ /* Pairs with smp mb in shrink_slab() */
+ smp_mb__before_atomic();
+ set_bit(shrinker_id, map->map);
+ rcu_read_unlock();
+ }
+}
+
/*
* We allow subsystems to populate their shrinker-related
* LRU lists before register_shrinker_prepared() is called
@@ -212,7 +338,7 @@ static int prealloc_memcg_shrinker(struc
goto unlock;
if (id >= shrinker_nr_max) {
- if (memcg_expand_shrinker_maps(id)) {
+ if (expand_shrinker_maps(id)) {
idr_remove(&shrinker_idr, id);
goto unlock;
}
@@ -590,7 +716,7 @@ static unsigned long shrink_slab_memcg(g
* case, we invoke the shrinker one more time and reset
* the bit if it reports that it is not empty anymore.
* The memory barrier here pairs with the barrier in
- * memcg_set_shrinker_bit():
+ * set_shrinker_bit():
*
* list_lru_add() shrink_slab_memcg()
* list_add_tail() clear_bit()
@@ -602,7 +728,7 @@ static unsigned long shrink_slab_memcg(g
if (ret == SHRINK_EMPTY)
ret = 0;
else
- memcg_set_shrinker_bit(memcg, nid, i);
+ set_shrinker_bit(memcg, nid, i);
}
freed += ret;
_
next prev parent reply other threads:[~2021-05-05 1:36 UTC|newest]
Thread overview: 146+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-05 1:32 incoming Andrew Morton
2021-05-05 1:32 ` [patch 001/143] mm: introduce and use mapping_empty() Andrew Morton
2021-05-05 1:32 ` [patch 002/143] mm: stop accounting shadow entries Andrew Morton
2021-05-05 1:32 ` [patch 003/143] dax: account DAX entries as nrpages Andrew Morton
2021-05-05 1:32 ` [patch 004/143] mm: remove nrexceptional from inode Andrew Morton
2021-05-05 1:32 ` [patch 005/143] mm: remove nrexceptional from inode: remove BUG_ON Andrew Morton
2021-05-05 1:33 ` [patch 006/143] hugetlb: pass vma into huge_pte_alloc() and huge_pmd_share() Andrew Morton
2021-05-05 1:33 ` [patch 007/143] hugetlb/userfaultfd: forbid huge pmd sharing when uffd enabled Andrew Morton
2021-05-05 1:33 ` [patch 008/143] mm/hugetlb: move flush_hugetlb_tlb_range() into hugetlb.h Andrew Morton
2021-05-05 1:33 ` [patch 009/143] hugetlb/userfaultfd: unshare all pmds for hugetlbfs when register wp Andrew Morton
2021-05-05 1:33 ` [patch 010/143] mm/hugetlb: remove redundant reservation check condition in alloc_huge_page() Andrew Morton
2021-05-05 1:33 ` [patch 011/143] mm: generalize HUGETLB_PAGE_SIZE_VARIABLE Andrew Morton
2021-05-05 1:33 ` [patch 012/143] mm/hugetlb: use some helper functions to cleanup code Andrew Morton
2021-05-05 1:33 ` [patch 013/143] mm/hugetlb: optimize the surplus state transfer code in move_hugetlb_state() Andrew Morton
2021-05-05 1:33 ` [patch 014/143] mm/hugetlb_cgroup: remove unnecessary VM_BUG_ON_PAGE in hugetlb_cgroup_migrate() Andrew Morton
2021-05-05 1:33 ` [patch 015/143] mm/hugetlb: simplify the code when alloc_huge_page() failed in hugetlb_no_page() Andrew Morton
2021-05-05 1:33 ` [patch 016/143] mm/hugetlb: avoid calculating fault_mutex_hash in truncate_op case Andrew Morton
2021-05-05 1:33 ` [patch 017/143] khugepaged: remove unneeded return value of khugepaged_collapse_pte_mapped_thps() Andrew Morton
2021-05-05 1:33 ` [patch 018/143] khugepaged: reuse the smp_wmb() inside __SetPageUptodate() Andrew Morton
2021-05-05 1:33 ` [patch 019/143] khugepaged: use helper khugepaged_test_exit() in __khugepaged_enter() Andrew Morton
2021-05-05 1:33 ` [patch 020/143] khugepaged: fix wrong result value for trace_mm_collapse_huge_page_isolate() Andrew Morton
2021-05-05 1:33 ` [patch 021/143] mm/huge_memory.c: remove unnecessary local variable ret2 Andrew Morton
2021-05-05 1:33 ` [patch 022/143] mm/huge_memory.c: rework the function vma_adjust_trans_huge() Andrew Morton
2021-05-05 1:33 ` [patch 023/143] mm/huge_memory.c: make get_huge_zero_page() return bool Andrew Morton
2021-05-05 1:33 ` [patch 024/143] mm/huge_memory.c: rework the function do_huge_pmd_numa_page() slightly Andrew Morton
2021-05-05 1:34 ` [patch 025/143] mm/huge_memory.c: remove redundant PageCompound() check Andrew Morton
2021-05-05 1:34 ` [patch 026/143] mm/huge_memory.c: remove unused macro TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG Andrew Morton
2021-05-05 1:34 ` [patch 027/143] mm/huge_memory.c: use helper function migration_entry_to_page() Andrew Morton
2021-05-05 1:34 ` [patch 028/143] mm/khugepaged.c: replace barrier() with READ_ONCE() for a selective variable Andrew Morton
2021-05-05 1:34 ` [patch 029/143] khugepaged: use helper function range_in_vma() in collapse_pte_mapped_thp() Andrew Morton
2021-05-05 1:34 ` [patch 030/143] khugepaged: remove unnecessary out label in collapse_huge_page() Andrew Morton
2021-05-05 1:34 ` [patch 031/143] khugepaged: remove meaningless !pte_present() check in khugepaged_scan_pmd() Andrew Morton
2021-05-05 1:34 ` [patch 032/143] mm: huge_memory: a new debugfs interface for splitting THP tests Andrew Morton
2021-05-05 1:34 ` [patch 033/143] mm: huge_memory: debugfs for file-backed THP split Andrew Morton
2021-05-05 1:34 ` [patch 034/143] mm/hugeltb: remove redundant VM_BUG_ON() in region_add() Andrew Morton
2021-05-05 1:34 ` [patch 035/143] mm/hugeltb: simplify the return code of __vma_reservation_common() Andrew Morton
2021-05-05 1:34 ` [patch 036/143] mm/hugeltb: clarify (chg - freed) won't go negative in hugetlb_unreserve_pages() Andrew Morton
2021-05-05 1:34 ` [patch 037/143] mm/hugeltb: handle the error case in hugetlb_fix_reserve_counts() Andrew Morton
2021-05-05 1:34 ` [patch 038/143] mm/hugetlb: remove unused variable pseudo_vma in remove_inode_hugepages() Andrew Morton
2021-05-05 1:34 ` [patch 039/143] mm/cma: change cma mutex to irq safe spinlock Andrew Morton
2021-05-05 1:34 ` [patch 040/143] hugetlb: no need to drop hugetlb_lock to call cma_release Andrew Morton
2021-05-05 1:34 ` [patch 041/143] hugetlb: add per-hstate mutex to synchronize user adjustments Andrew Morton
2021-05-05 1:34 ` [patch 042/143] hugetlb: create remove_hugetlb_page() to separate functionality Andrew Morton
2021-05-05 1:34 ` [patch 043/143] hugetlb: call update_and_free_page without hugetlb_lock Andrew Morton
2021-05-05 1:35 ` [patch 044/143] hugetlb: change free_pool_huge_page to remove_pool_huge_page Andrew Morton
2021-05-05 1:35 ` [patch 045/143] hugetlb: make free_huge_page irq safe Andrew Morton
2021-05-05 1:35 ` [patch 046/143] hugetlb: add lockdep_assert_held() calls for hugetlb_lock Andrew Morton
2021-05-05 1:35 ` [patch 047/143] mm,page_alloc: bail out earlier on -ENOMEM in alloc_contig_migrate_range Andrew Morton
2021-05-05 1:35 ` [patch 048/143] mm,compaction: let isolate_migratepages_{range,block} return error codes Andrew Morton
2021-05-05 1:35 ` [patch 049/143] mm,hugetlb: drop clearing of flag from prep_new_huge_page Andrew Morton
2021-05-05 1:35 ` [patch 050/143] mm,hugetlb: split prep_new_huge_page functionality Andrew Morton
2021-05-05 1:35 ` [patch 051/143] mm: make alloc_contig_range handle free hugetlb pages Andrew Morton
2021-05-05 1:35 ` [patch 052/143] mm: make alloc_contig_range handle in-use " Andrew Morton
2021-05-05 1:35 ` [patch 053/143] mm,page_alloc: drop unnecessary checks from pfn_range_valid_contig Andrew Morton
2021-05-05 1:35 ` [patch 054/143] userfaultfd: add minor fault registration mode Andrew Morton
2021-05-05 1:35 ` [patch 055/143] userfaultfd: disable huge PMD sharing for MINOR registered VMAs Andrew Morton
2021-05-05 1:35 ` [patch 056/143] userfaultfd: hugetlbfs: only compile UFFD helpers if config enabled Andrew Morton
2021-05-05 1:35 ` [patch 057/143] userfaultfd: add UFFDIO_CONTINUE ioctl Andrew Morton
2021-05-05 1:35 ` [patch 058/143] userfaultfd: update documentation to describe minor fault handling Andrew Morton
2021-05-05 1:35 ` [patch 059/143] userfaultfd/selftests: add test exercising " Andrew Morton
2021-05-05 1:36 ` [patch 060/143] mm/vmscan: move RECLAIM* bits to uapi header Andrew Morton
2021-05-05 1:36 ` [patch 061/143] mm/vmscan: replace implicit RECLAIM_ZONE checks with explicit checks Andrew Morton
2021-05-05 1:36 ` [patch 062/143] mm: vmscan: use nid from shrink_control for tracepoint Andrew Morton
2021-05-05 1:36 ` Andrew Morton [this message]
2021-05-05 1:36 ` [patch 064/143] mm: vmscan: use shrinker_rwsem to protect shrinker_maps allocation Andrew Morton
2021-05-05 1:36 ` [patch 065/143] mm: vmscan: remove memcg_shrinker_map_size Andrew Morton
2021-05-05 1:36 ` [patch 066/143] mm: vmscan: use kvfree_rcu instead of call_rcu Andrew Morton
2021-05-05 1:36 ` [patch 067/143] mm: memcontrol: rename shrinker_map to shrinker_info Andrew Morton
2021-05-05 1:36 ` [patch 068/143] mm: vmscan: add shrinker_info_protected() helper Andrew Morton
2021-05-05 1:36 ` [patch 069/143] mm: vmscan: use a new flag to indicate shrinker is registered Andrew Morton
2021-05-05 1:36 ` [patch 070/143] mm: vmscan: add per memcg shrinker nr_deferred Andrew Morton
2021-05-05 1:36 ` [patch 071/143] mm: vmscan: use per memcg nr_deferred of shrinker Andrew Morton
2021-05-05 1:36 ` [patch 072/143] mm: vmscan: don't need allocate shrinker->nr_deferred for memcg aware shrinkers Andrew Morton
2021-05-05 1:36 ` [patch 073/143] mm: memcontrol: reparent nr_deferred when memcg offline Andrew Morton
2021-05-05 1:36 ` [patch 074/143] mm: vmscan: shrink deferred objects proportional to priority Andrew Morton
2021-05-05 1:36 ` [patch 075/143] mm/compaction: remove unused variable sysctl_compact_memory Andrew Morton
2021-05-05 1:36 ` [patch 076/143] mm: compaction: update the COMPACT[STALL|FAIL] events properly Andrew Morton
2021-05-05 1:36 ` [patch 077/143] mm: disable LRU pagevec during the migration temporarily Andrew Morton
2021-05-05 1:36 ` [patch 078/143] mm: replace migrate_[prep|finish] with lru_cache_[disable|enable] Andrew Morton
2021-05-05 1:37 ` [patch 079/143] mm: fs: invalidate BH LRU during page migration Andrew Morton
2021-05-05 1:37 ` [patch 080/143] mm/migrate.c: make putback_movable_page() static Andrew Morton
2021-05-05 1:37 ` [patch 081/143] mm/migrate.c: remove unnecessary rc != MIGRATEPAGE_SUCCESS check in 'else' case Andrew Morton
2021-05-05 1:37 ` [patch 082/143] mm/migrate.c: fix potential indeterminate pte entry in migrate_vma_insert_page() Andrew Morton
2021-05-05 1:37 ` [patch 083/143] mm/migrate.c: use helper migrate_vma_collect_skip() in migrate_vma_collect_hole() Andrew Morton
2021-05-05 1:37 ` [patch 084/143] Revert "mm: migrate: skip shared exec THP for NUMA balancing" Andrew Morton
2021-05-05 1:37 ` [patch 085/143] mm: vmstat: add cma statistics Andrew Morton
2021-05-05 1:37 ` [patch 086/143] mm: cma: use pr_err_ratelimited for CMA warning Andrew Morton
2021-05-05 1:37 ` [patch 087/143] mm: cma: add trace events for CMA alloc perf testing Andrew Morton
2021-05-05 1:37 ` [patch 088/143] mm: cma: support sysfs Andrew Morton
2021-05-05 1:37 ` [patch 089/143] mm: cma: add the CMA instance name to cma trace events Andrew Morton
2021-05-05 1:37 ` [patch 090/143] mm: use proper type for cma_[alloc|release] Andrew Morton
2021-05-05 1:37 ` [patch 091/143] ksm: remove redundant VM_BUG_ON_PAGE() on stable_tree_search() Andrew Morton
2021-05-05 1:37 ` [patch 092/143] ksm: use GET_KSM_PAGE_NOLOCK to get ksm page in remove_rmap_item_from_tree() Andrew Morton
2021-05-05 1:37 ` [patch 093/143] ksm: remove dedicated macro KSM_FLAG_MASK Andrew Morton
2021-05-05 1:37 ` [patch 094/143] ksm: fix potential missing rmap_item for stable_node Andrew Morton
2021-05-05 1:37 ` [patch 095/143] mm/ksm: remove unused parameter from remove_trailing_rmap_items() Andrew Morton
2021-05-05 1:37 ` [patch 096/143] mm: restore node stat checking in /proc/sys/vm/stat_refresh Andrew Morton
2021-05-05 1:37 ` [patch 097/143] mm: no more EINVAL from /proc/sys/vm/stat_refresh Andrew Morton
2021-05-05 1:37 ` [patch 098/143] mm: /proc/sys/vm/stat_refresh skip checking known negative stats Andrew Morton
2021-05-05 1:38 ` [patch 099/143] mm: /proc/sys/vm/stat_refresh stop checking monotonic numa stats Andrew Morton
2021-05-05 1:38 ` [patch 100/143] x86/mm: track linear mapping split events Andrew Morton
2021-05-05 1:38 ` [patch 101/143] mm/mmap.c: don't unlock VMAs in remap_file_pages() Andrew Morton
2021-05-05 1:38 ` [patch 102/143] mm: generalize ARCH_HAS_CACHE_LINE_SIZE Andrew Morton
2021-05-05 1:38 ` [patch 104/143] mm: generalize ARCH_ENABLE_MEMORY_[HOTPLUG|HOTREMOVE] Andrew Morton
2021-05-05 1:38 ` [patch 105/143] mm: drop redundant ARCH_ENABLE_[HUGEPAGE|THP]_MIGRATION Andrew Morton
2021-05-05 1:38 ` [patch 108/143] mm/util.c: reduce mem_dump_obj() object size Andrew Morton
2021-05-05 1:38 ` [patch 109/143] mm/util.c: fix typo Andrew Morton
2021-05-05 1:38 ` [patch 110/143] mm/gup: don't pin migrated cma pages in movable zone Andrew Morton
2021-05-05 1:38 ` [patch 111/143] mm/gup: check every subpage of a compound page during isolation Andrew Morton
2021-05-05 1:38 ` [patch 112/143] mm/gup: return an error on migration failure Andrew Morton
2021-05-05 1:38 ` [patch 113/143] mm/gup: check for isolation errors Andrew Morton
2021-05-05 1:38 ` [patch 114/143] mm cma: rename PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN Andrew Morton
2021-05-05 1:38 ` [patch 115/143] mm: apply per-task gfp constraints in fast path Andrew Morton
2021-05-05 1:39 ` [patch 116/143] mm: honor PF_MEMALLOC_PIN for all movable pages Andrew Morton
2021-05-05 1:39 ` [patch 117/143] mm/gup: do not migrate zero page Andrew Morton
2021-05-05 1:39 ` [patch 118/143] mm/gup: migrate pinned pages out of movable zone Andrew Morton
2021-05-05 1:39 ` [patch 119/143] memory-hotplug.rst: add a note about ZONE_MOVABLE and page pinning Andrew Morton
2021-05-05 1:39 ` [patch 120/143] mm/gup: change index type to long as it counts pages Andrew Morton
2021-05-05 1:39 ` [patch 121/143] mm/gup: longterm pin migration cleanup Andrew Morton
2021-05-05 1:39 ` [patch 122/143] selftests/vm: gup_test: fix test flag Andrew Morton
2021-05-05 1:39 ` [patch 123/143] selftests/vm: gup_test: test faulting in kernel, and verify pinnable pages Andrew Morton
2021-05-05 1:39 ` [patch 124/143] mm/memory_hotplug: remove broken locking of zone PCP structures during hot remove Andrew Morton
2021-05-05 1:39 ` [patch 125/143] drivers/base/memory: introduce memory_block_{online,offline} Andrew Morton
2021-05-05 1:39 ` [patch 126/143] mm,memory_hotplug: relax fully spanned sections check Andrew Morton
2021-05-05 1:39 ` [patch 127/143] mm,memory_hotplug: factor out adjusting present pages into adjust_present_page_count() Andrew Morton
2021-05-05 1:39 ` [patch 128/143] mm,memory_hotplug: allocate memmap from the added memory range Andrew Morton
2021-05-05 1:39 ` [patch 129/143] acpi,memhotplug: enable MHP_MEMMAP_ON_MEMORY when supported Andrew Morton
2021-05-05 1:39 ` [patch 130/143] mm,memory_hotplug: add kernel boot option to enable memmap_on_memory Andrew Morton
2021-05-05 1:39 ` [patch 131/143] x86/Kconfig: introduce ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE Andrew Morton
2021-05-05 1:39 ` [patch 132/143] arm64/Kconfig: " Andrew Morton
2021-05-05 1:39 ` [patch 133/143] mm/zswap.c: switch from strlcpy to strscpy Andrew Morton
2021-05-05 1:40 ` [patch 134/143] mm/zsmalloc: use BUG_ON instead of if condition followed by BUG Andrew Morton
2021-05-05 1:40 ` [patch 135/143] iov_iter: lift memzero_page() to highmem.h Andrew Morton
2021-05-05 1:40 ` [patch 136/143] btrfs: use memzero_page() instead of open coded kmap pattern Andrew Morton
2021-05-05 1:40 ` [patch 137/143] mm/highmem.c: fix coding style issue Andrew Morton
2021-05-05 1:40 ` [patch 138/143] mm/mempool: minor coding style tweaks Andrew Morton
2021-05-05 1:40 ` [patch 139/143] mm/process_vm_access.c: remove duplicate include Andrew Morton
2021-05-05 1:40 ` [patch 140/143] kfence: zero guard page after out-of-bounds access Andrew Morton
2021-05-05 1:40 ` [patch 141/143] kfence: await for allocation using wait_event Andrew Morton
2021-05-05 1:40 ` [patch 142/143] kfence: maximize allocation wait timeout duration Andrew Morton
2021-05-05 1:40 ` [patch 143/143] kfence: use power-efficient work queue to run delayed work Andrew Morton
2021-05-05 1:47 ` incoming Linus Torvalds
2021-05-05 3:16 ` incoming Andrew Morton
2021-05-05 17:10 ` incoming Linus Torvalds
2021-05-05 17:44 ` incoming Andrew Morton
2021-05-06 3:19 ` incoming Anshuman Khandual
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210505013611.RFDjFmCFZ%akpm@linux-foundation.org \
--to=akpm@linux-foundation.org \
--cc=david@fromorbit.com \
--cc=guro@fb.com \
--cc=hannes@cmpxchg.org \
--cc=ktkhai@virtuozzo.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=mm-commits@vger.kernel.org \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=torvalds@linux-foundation.org \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).