mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, alex.williamson@redhat.com,
	alexander.h.duyck@linux.intel.com, corbet@lwn.net,
	dan.j.williams@intel.com, daniel.m.jordan@oracle.com,
	dave.hansen@linux.intel.com, david@redhat.com, elliott@hpe.com,
	herbert@gondor.apana.org.au, jgg@ziepe.ca, josh@joshtriplett.org,
	ktkhai@virtuozzo.com, linux-mm@kvack.org, mhocko@kernel.org,
	mm-commits@vger.kernel.org, pasha.tatashin@soleen.com,
	pavel@ucw.cz, peterz@infradead.org, rdunlap@infradead.org,
	shile.zhang@linux.alibaba.com, steffen.klassert@secunet.com,
	steven.sistare@oracle.com, tj@kernel.org,
	torvalds@linux-foundation.org, ziy@nvidia.com
Subject: [patch 054/131] padata: allocate work structures for parallel jobs from a pool
Date: Wed, 03 Jun 2020 15:59:39 -0700	[thread overview]
Message-ID: <20200603225939.HQTPjz-NM%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200603155549.e041363450869eaae4c7f05b@linux-foundation.org>

From: Daniel Jordan <daniel.m.jordan@oracle.com>
Subject: padata: allocate work structures for parallel jobs from a pool

padata allocates per-CPU, per-instance work structs for parallel jobs.  A
do_parallel call assigns a job to a sequence number and hashes the number
to a CPU, where the job will eventually run using the corresponding work.

This approach fit with how padata used to bind a job to each CPU
round-robin, makes less sense after commit bfde23ce200e6 ("padata: unbind
parallel jobs from specific CPUs") because a work isn't bound to a
particular CPU anymore, and isn't needed at all for multithreaded jobs
because they don't have sequence numbers.

Replace the per-CPU works with a preallocated pool, which allows sharing
them between existing padata users and the upcoming multithreaded user. 
The pool will also facilitate setting NUMA-aware concurrency limits with
later users.

The pool is sized according to the number of possible CPUs.  With this
limit, MAX_OBJ_NUM no longer makes sense, so remove it.

If the global pool is exhausted, a parallel job is run in the current task
instead to throttle a system trying to do too much in parallel.

Link: http://lkml.kernel.org/r/20200527173608.2885243-4-daniel.m.jordan@oracle.com
Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Tested-by: Josh Triplett <josh@joshtriplett.org>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Robert Elliott <elliott@hpe.com>
Cc: Shile Zhang <shile.zhang@linux.alibaba.com>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 include/linux/padata.h |    8 --
 kernel/padata.c        |  118 +++++++++++++++++++++++++--------------
 2 files changed, 78 insertions(+), 48 deletions(-)

--- a/include/linux/padata.h~padata-allocate-work-structures-for-parallel-jobs-from-a-pool
+++ a/include/linux/padata.h
@@ -24,7 +24,6 @@
  * @list: List entry, to attach to the padata lists.
  * @pd: Pointer to the internal control structure.
  * @cb_cpu: Callback cpu for serializatioon.
- * @cpu: Cpu for parallelization.
  * @seq_nr: Sequence number of the parallelized data object.
  * @info: Used to pass information from the parallel to the serial function.
  * @parallel: Parallel execution function.
@@ -34,7 +33,6 @@ struct padata_priv {
 	struct list_head	list;
 	struct parallel_data	*pd;
 	int			cb_cpu;
-	int			cpu;
 	unsigned int		seq_nr;
 	int			info;
 	void                    (*parallel)(struct padata_priv *padata);
@@ -68,15 +66,11 @@ struct padata_serial_queue {
 /**
  * struct padata_parallel_queue - The percpu padata parallel queue
  *
- * @parallel: List to wait for parallelization.
  * @reorder: List to wait for reordering after parallel processing.
- * @work: work struct for parallelization.
  * @num_obj: Number of objects that are processed by this cpu.
  */
 struct padata_parallel_queue {
-       struct padata_list    parallel;
        struct padata_list    reorder;
-       struct work_struct    work;
        atomic_t              num_obj;
 };
 
@@ -111,7 +105,7 @@ struct parallel_data {
 	struct padata_parallel_queue	__percpu *pqueue;
 	struct padata_serial_queue	__percpu *squeue;
 	atomic_t			refcnt;
-	atomic_t			seq_nr;
+	unsigned int			seq_nr;
 	unsigned int			processed;
 	int				cpu;
 	struct padata_cpumask		cpumask;
--- a/kernel/padata.c~padata-allocate-work-structures-for-parallel-jobs-from-a-pool
+++ a/kernel/padata.c
@@ -32,7 +32,15 @@
 #include <linux/sysfs.h>
 #include <linux/rcupdate.h>
 
-#define MAX_OBJ_NUM 1000
+struct padata_work {
+	struct work_struct	pw_work;
+	struct list_head	pw_list;  /* padata_free_works linkage */
+	void			*pw_data;
+};
+
+static DEFINE_SPINLOCK(padata_works_lock);
+static struct padata_work *padata_works;
+static LIST_HEAD(padata_free_works);
 
 static void padata_free_pd(struct parallel_data *pd);
 
@@ -58,30 +66,44 @@ static int padata_cpu_hash(struct parall
 	return padata_index_to_cpu(pd, cpu_index);
 }
 
-static void padata_parallel_worker(struct work_struct *parallel_work)
+static struct padata_work *padata_work_alloc(void)
 {
-	struct padata_parallel_queue *pqueue;
-	LIST_HEAD(local_list);
+	struct padata_work *pw;
 
-	local_bh_disable();
-	pqueue = container_of(parallel_work,
-			      struct padata_parallel_queue, work);
+	lockdep_assert_held(&padata_works_lock);
 
-	spin_lock(&pqueue->parallel.lock);
-	list_replace_init(&pqueue->parallel.list, &local_list);
-	spin_unlock(&pqueue->parallel.lock);
+	if (list_empty(&padata_free_works))
+		return NULL;	/* No more work items allowed to be queued. */
 
-	while (!list_empty(&local_list)) {
-		struct padata_priv *padata;
+	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
+	list_del(&pw->pw_list);
+	return pw;
+}
 
-		padata = list_entry(local_list.next,
-				    struct padata_priv, list);
+static void padata_work_init(struct padata_work *pw, work_func_t work_fn,
+			     void *data)
+{
+	INIT_WORK(&pw->pw_work, work_fn);
+	pw->pw_data = data;
+}
 
-		list_del_init(&padata->list);
+static void padata_work_free(struct padata_work *pw)
+{
+	lockdep_assert_held(&padata_works_lock);
+	list_add(&pw->pw_list, &padata_free_works);
+}
 
-		padata->parallel(padata);
-	}
+static void padata_parallel_worker(struct work_struct *parallel_work)
+{
+	struct padata_work *pw = container_of(parallel_work, struct padata_work,
+					      pw_work);
+	struct padata_priv *padata = pw->pw_data;
 
+	local_bh_disable();
+	padata->parallel(padata);
+	spin_lock(&padata_works_lock);
+	padata_work_free(pw);
+	spin_unlock(&padata_works_lock);
 	local_bh_enable();
 }
 
@@ -105,9 +127,9 @@ int padata_do_parallel(struct padata_she
 		       struct padata_priv *padata, int *cb_cpu)
 {
 	struct padata_instance *pinst = ps->pinst;
-	int i, cpu, cpu_index, target_cpu, err;
-	struct padata_parallel_queue *queue;
+	int i, cpu, cpu_index, err;
 	struct parallel_data *pd;
+	struct padata_work *pw;
 
 	rcu_read_lock_bh();
 
@@ -135,25 +157,25 @@ int padata_do_parallel(struct padata_she
 	if ((pinst->flags & PADATA_RESET))
 		goto out;
 
-	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
-		goto out;
-
-	err = 0;
 	atomic_inc(&pd->refcnt);
 	padata->pd = pd;
 	padata->cb_cpu = *cb_cpu;
 
-	padata->seq_nr = atomic_inc_return(&pd->seq_nr);
-	target_cpu = padata_cpu_hash(pd, padata->seq_nr);
-	padata->cpu = target_cpu;
-	queue = per_cpu_ptr(pd->pqueue, target_cpu);
-
-	spin_lock(&queue->parallel.lock);
-	list_add_tail(&padata->list, &queue->parallel.list);
-	spin_unlock(&queue->parallel.lock);
+	rcu_read_unlock_bh();
 
-	queue_work(pinst->parallel_wq, &queue->work);
+	spin_lock(&padata_works_lock);
+	padata->seq_nr = ++pd->seq_nr;
+	pw = padata_work_alloc();
+	spin_unlock(&padata_works_lock);
+	if (pw) {
+		padata_work_init(pw, padata_parallel_worker, padata);
+		queue_work(pinst->parallel_wq, &pw->pw_work);
+	} else {
+		/* Maximum works limit exceeded, run in the current task. */
+		padata->parallel(padata);
+	}
 
+	return 0;
 out:
 	rcu_read_unlock_bh();
 
@@ -324,8 +346,9 @@ static void padata_serial_worker(struct
 void padata_do_serial(struct padata_priv *padata)
 {
 	struct parallel_data *pd = padata->pd;
+	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
 	struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
-							   padata->cpu);
+							   hashed_cpu);
 	struct padata_priv *cur;
 
 	spin_lock(&pqueue->reorder.lock);
@@ -416,8 +439,6 @@ static void padata_init_pqueues(struct p
 		pqueue = per_cpu_ptr(pd->pqueue, cpu);
 
 		__padata_list_init(&pqueue->reorder);
-		__padata_list_init(&pqueue->parallel);
-		INIT_WORK(&pqueue->work, padata_parallel_worker);
 		atomic_set(&pqueue->num_obj, 0);
 	}
 }
@@ -451,7 +472,7 @@ static struct parallel_data *padata_allo
 
 	padata_init_pqueues(pd);
 	padata_init_squeues(pd);
-	atomic_set(&pd->seq_nr, -1);
+	pd->seq_nr = -1;
 	atomic_set(&pd->refcnt, 1);
 	spin_lock_init(&pd->lock);
 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
@@ -1053,6 +1074,7 @@ EXPORT_SYMBOL(padata_free_shell);
 
 void __init padata_init(void)
 {
+	unsigned int i, possible_cpus;
 #ifdef CONFIG_HOTPLUG_CPU
 	int ret;
 
@@ -1064,13 +1086,27 @@ void __init padata_init(void)
 
 	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
 				      NULL, padata_cpu_dead);
-	if (ret < 0) {
-		cpuhp_remove_multi_state(hp_online);
-		goto err;
-	}
+	if (ret < 0)
+		goto remove_online_state;
+#endif
+
+	possible_cpus = num_possible_cpus();
+	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
+				     GFP_KERNEL);
+	if (!padata_works)
+		goto remove_dead_state;
+
+	for (i = 0; i < possible_cpus; ++i)
+		list_add(&padata_works[i].pw_list, &padata_free_works);
 
 	return;
+
+remove_dead_state:
+#ifdef CONFIG_HOTPLUG_CPU
+	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
+remove_online_state:
+	cpuhp_remove_multi_state(hp_online);
 err:
-	pr_warn("padata: initialization failed\n");
 #endif
+	pr_warn("padata: initialization failed\n");
 }
_

  parent reply	other threads:[~2020-06-03 22:59 UTC|newest]

Thread overview: 138+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-03 22:55 incoming Andrew Morton
2020-06-03 22:56 ` [patch 001/131] mm/slub: fix a memory leak in sysfs_slab_add() Andrew Morton
2020-06-03 22:56 ` [patch 002/131] mm/memcg: optimize memory.numa_stat like memory.stat Andrew Morton
2020-06-03 22:56 ` [patch 003/131] mm/gup: move __get_user_pages_fast() down a few lines in gup.c Andrew Morton
2020-06-03 22:56 ` [patch 004/131] mm/gup: refactor and de-duplicate gup_fast() code Andrew Morton
2020-06-03 22:56 ` [patch 005/131] mm/gup: introduce pin_user_pages_fast_only() Andrew Morton
2020-06-03 22:56 ` [patch 006/131] drm/i915: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-03 22:56 ` [patch 007/131] mm/gup: might_lock_read(mmap_sem) in get_user_pages_fast() Andrew Morton
2020-06-03 22:56 ` [patch 008/131] kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE Andrew Morton
2020-06-03 22:56 ` [patch 009/131] string.h: fix incompatibility between FORTIFY_SOURCE and KASAN Andrew Morton
2020-06-03 22:56 ` [patch 010/131] mm: clarify __GFP_MEMALLOC usage Andrew Morton
2020-06-03 22:56 ` [patch 011/131] mm: memblock: replace dereferences of memblock_region.nid with API calls Andrew Morton
2020-06-03 22:56 ` [patch 012/131] mm: make early_pfn_to_nid() and related defintions close to each other Andrew Morton
2020-06-03 22:57 ` [patch 013/131] mm: remove CONFIG_HAVE_MEMBLOCK_NODE_MAP option Andrew Morton
2020-06-03 22:57 ` [patch 014/131] mm: free_area_init: use maximal zone PFNs rather than zone sizes Andrew Morton
2020-06-03 22:57 ` [patch 015/131] mm: use free_area_init() instead of free_area_init_nodes() Andrew Morton
2020-06-03 22:57 ` [patch 016/131] alpha: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 017/131] arm: " Andrew Morton
2020-06-03 22:57 ` [patch 018/131] arm64: simplify detection of memory zone boundaries for UMA configs Andrew Morton
2020-06-03 22:57 ` [patch 019/131] csky: simplify detection of memory zone boundaries Andrew Morton
2020-06-03 22:57 ` [patch 020/131] m68k: mm: " Andrew Morton
2020-06-03 22:57 ` [patch 021/131] parisc: " Andrew Morton
2020-06-03 22:57 ` [patch 022/131] sparc32: " Andrew Morton
2020-06-03 22:57 ` [patch 023/131] unicore32: " Andrew Morton
2020-06-03 22:57 ` [patch 024/131] xtensa: " Andrew Morton
2020-06-03 22:57 ` [patch 025/131] mm: memmap_init: iterate over memblock regions rather that check each PFN Andrew Morton
2020-06-03 22:57 ` [patch 026/131] mm: remove early_pfn_in_nid() and CONFIG_NODES_SPAN_OTHER_NODES Andrew Morton
2020-06-03 22:58 ` [patch 027/131] mm: free_area_init: allow defining max_zone_pfn in descending order Andrew Morton
2020-06-03 22:58 ` [patch 028/131] mm: rename free_area_init_node() to free_area_init_memoryless_node() Andrew Morton
2020-06-03 22:58 ` [patch 029/131] mm: clean up free_area_init_node() and its helpers Andrew Morton
2020-06-03 22:58 ` [patch 030/131] mm: simplify find_min_pfn_with_active_regions() Andrew Morton
2020-06-03 22:58 ` [patch 031/131] docs/vm: update memory-models documentation Andrew Morton
2020-06-03 22:58 ` [patch 032/131] mm/page_alloc.c: bad_[reason|flags] is not necessary when PageHWPoison Andrew Morton
2020-06-03 22:58 ` [patch 033/131] mm/page_alloc.c: bad_flags is not necessary for bad_page() Andrew Morton
2020-06-03 22:58 ` [patch 034/131] mm/page_alloc.c: rename free_pages_check_bad() to check_free_page_bad() Andrew Morton
2020-06-03 22:58 ` [patch 035/131] mm/page_alloc.c: rename free_pages_check() to check_free_page() Andrew Morton
2020-06-03 22:58 ` [patch 036/131] mm/page_alloc.c: extract check_[new|free]_page_bad() common part to page_bad_reason() Andrew Morton
2020-06-03 22:58 ` [patch 037/131] mm,page_alloc,cma: conditionally prefer cma pageblocks for movable allocations Andrew Morton
2020-06-03 22:58 ` [patch 038/131] mm/page_alloc.c: remove unused free_bootmem_with_active_regions Andrew Morton
2020-06-03 22:58 ` [patch 039/131] mm/page_alloc.c: only tune sysctl_lowmem_reserve_ratio value once when changing it Andrew Morton
2020-06-03 22:58 ` [patch 040/131] mm/page_alloc.c: clear out zone->lowmem_reserve[] if the zone is empty Andrew Morton
2020-06-03 22:58 ` [patch 041/131] mm/vmstat.c: do not show lowmem reserve protection information of empty zone Andrew Morton
2020-06-03 22:58 ` [patch 042/131] mm/page_alloc: use ac->high_zoneidx for classzone_idx Andrew Morton
2020-06-03 22:59 ` [patch 043/131] mm/page_alloc: integrate classzone_idx and high_zoneidx Andrew Morton
2020-06-03 22:59 ` [patch 044/131] mm/page_alloc.c: use NODE_MASK_NONE in build_zonelists() Andrew Morton
2020-06-03 22:59 ` [patch 045/131] mm: rename gfpflags_to_migratetype to gfp_migratetype for same convention Andrew Morton
2020-06-03 22:59 ` [patch 046/131] mm/page_alloc.c: reset numa stats for boot pagesets Andrew Morton
2020-06-03 22:59 ` [patch 047/131] mm, page_alloc: reset the zone->watermark_boost early Andrew Morton
2020-06-03 22:59 ` [patch 048/131] mm/page_alloc: restrict and formalize compound_page_dtors[] Andrew Morton
2020-06-03 22:59 ` [patch 049/131] mm/pagealloc.c: call touch_nmi_watchdog() on max order boundaries in deferred init Andrew Morton
2020-06-03 22:59 ` [patch 050/131] mm: initialize deferred pages with interrupts enabled Andrew Morton
2020-06-03 22:59 ` [patch 051/131] mm: call cond_resched() from deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 052/131] padata: remove exit routine Andrew Morton
2020-06-03 22:59 ` [patch 053/131] padata: initialize earlier Andrew Morton
2020-06-03 22:59 ` Andrew Morton [this message]
2020-06-03 22:59 ` [patch 055/131] padata: add basic support for multithreaded jobs Andrew Morton
2020-06-03 22:59 ` [patch 056/131] mm: don't track number of pages during deferred initialization Andrew Morton
2020-06-03 22:59 ` [patch 057/131] mm: parallelize deferred_init_memmap() Andrew Morton
2020-06-03 22:59 ` [patch 058/131] mm: make deferred init's max threads arch-specific Andrew Morton
2020-06-03 22:59 ` [patch 059/131] padata: document multithreaded jobs Andrew Morton
2020-06-03 23:00 ` [patch 060/131] mm/page_alloc.c: add missing newline Andrew Morton
2020-06-03 23:00 ` [patch 061/131] khugepaged: add self test Andrew Morton
2020-06-03 23:00 ` [patch 062/131] khugepaged: do not stop collapse if less than half PTEs are referenced Andrew Morton
2020-06-03 23:00 ` [patch 063/131] khugepaged: drain all LRU caches before scanning pages Andrew Morton
2020-06-03 23:00 ` [patch 064/131] khugepaged: drain LRU add pagevec after swapin Andrew Morton
2020-06-03 23:00 ` [patch 065/131] khugepaged: allow to collapse a page shared across fork Andrew Morton
2020-06-03 23:00 ` [patch 066/131] khugepaged: allow to collapse PTE-mapped compound pages Andrew Morton
2020-06-03 23:00 ` [patch 067/131] thp: change CoW semantics for anon-THP Andrew Morton
2020-06-03 23:00 ` [patch 068/131] khugepaged: introduce 'max_ptes_shared' tunable Andrew Morton
2020-06-03 23:00 ` [patch 069/131] hugetlbfs: add arch_hugetlb_valid_size Andrew Morton
2020-06-03 23:00 ` [patch 070/131] hugetlbfs: move hugepagesz= parsing to arch independent code Andrew Morton
2020-06-03 23:00 ` [patch 071/131] hugetlbfs: remove hugetlb_add_hstate() warning for existing hstate Andrew Morton
2020-06-03 23:00 ` [patch 072/131] hugetlbfs: clean up command line processing Andrew Morton
2020-06-03 23:00 ` [patch 073/131] hugetlbfs: fix changes to " Andrew Morton
2020-06-03 23:00 ` [patch 074/131] mm/hugetlb: avoid unnecessary check on pud and pmd entry in huge_pte_offset Andrew Morton
2020-06-03 23:00 ` [patch 075/131] arm64/mm: drop __HAVE_ARCH_HUGE_PTEP_GET Andrew Morton
2020-06-03 23:01 ` [patch 076/131] mm/hugetlb: define a generic fallback for is_hugepage_only_range() Andrew Morton
2020-06-03 23:01 ` [patch 077/131] mm/hugetlb: define a generic fallback for arch_clear_hugepage_flags() Andrew Morton
2020-06-03 23:01 ` [patch 078/131] mm: simplify calling a compound page destructor Andrew Morton
2020-06-03 23:01 ` [patch 079/131] mm/vmscan.c: use update_lru_size() in update_lru_sizes() Andrew Morton
2020-06-03 23:01 ` [patch 080/131] mm/vmscan: count layzfree pages and fix nr_isolated_* mismatch Andrew Morton
2020-06-03 23:01 ` [patch 081/131] mm/vmscan.c: change prototype for shrink_page_list Andrew Morton
2020-06-03 23:01 ` [patch 082/131] mm/vmscan: update the comment of should_continue_reclaim() Andrew Morton
2020-06-03 23:01 ` [patch 083/131] mm: fix NUMA node file count error in replace_page_cache() Andrew Morton
2020-06-03 23:01 ` [patch 084/131] mm: memcontrol: fix stat-corrupting race in charge moving Andrew Morton
2020-06-03 23:01 ` [patch 085/131] mm: memcontrol: drop @compound parameter from memcg charging API Andrew Morton
2020-06-03 23:01 ` [patch 086/131] mm: shmem: remove rare optimization when swapin races with hole punching Andrew Morton
2020-06-03 23:01 ` [patch 087/131] mm: memcontrol: move out cgroup swaprate throttling Andrew Morton
2020-06-03 23:01 ` [patch 088/131] mm: memcontrol: convert page cache to a new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:01 ` [patch 089/131] mm: memcontrol: prepare uncharging for removal of private page type counters Andrew Morton
2020-06-03 23:01 ` [patch 090/131] mm: memcontrol: prepare move_account " Andrew Morton
2020-06-03 23:01 ` [patch 091/131] mm: memcontrol: prepare cgroup vmstat infrastructure for native anon counters Andrew Morton
2020-06-03 23:01 ` [patch 092/131] mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters Andrew Morton
2020-06-03 23:01 ` [patch 093/131] mm: memcontrol: switch to native NR_ANON_MAPPED counter Andrew Morton
2020-06-03 23:02 ` [patch 094/131] mm: memcontrol: switch to native NR_ANON_THPS counter Andrew Morton
2020-06-03 23:02 ` [patch 095/131] mm: memcontrol: convert anon and file-thp to new mem_cgroup_charge() API Andrew Morton
2020-06-03 23:02 ` [patch 096/131] mm: memcontrol: drop unused try/commit/cancel charge API Andrew Morton
2020-06-03 23:02 ` [patch 097/131] mm: memcontrol: prepare swap controller setup for integration Andrew Morton
2020-06-03 23:02 ` [patch 098/131] mm: memcontrol: make swap tracking an integral part of memory control Andrew Morton
2020-06-03 23:02 ` [patch 099/131] mm: memcontrol: charge swapin pages on instantiation Andrew Morton
2020-06-03 23:02 ` [patch 100/131] mm: memcontrol: document the new swap control behavior Andrew Morton
2020-06-03 23:02 ` [patch 101/131] mm: memcontrol: delete unused lrucare handling Andrew Morton
2020-06-03 23:02 ` [patch 102/131] mm: memcontrol: update page->mem_cgroup stability rules Andrew Morton
2020-06-03 23:02 ` [patch 103/131] mm: fix LRU balancing effect of new transparent huge pages Andrew Morton
2020-06-03 23:02 ` [patch 104/131] mm: keep separate anon and file statistics on page reclaim activity Andrew Morton
2020-06-03 23:02 ` [patch 105/131] mm: allow swappiness that prefers reclaiming anon over the file workingset Andrew Morton
2020-06-03 23:02 ` [patch 106/131] mm: fold and remove lru_cache_add_anon() and lru_cache_add_file() Andrew Morton
2020-06-03 23:02 ` [patch 107/131] mm: workingset: let cache workingset challenge anon Andrew Morton
2020-06-03 23:02 ` [patch 108/131] mm: remove use-once cache bias from LRU balancing Andrew Morton
2020-06-03 23:02 ` [patch 109/131] mm: vmscan: drop unnecessary div0 avoidance rounding in get_scan_count() Andrew Morton
2020-06-03 23:02 ` [patch 110/131] mm: base LRU balancing on an explicit cost model Andrew Morton
2020-06-03 23:02 ` [patch 111/131] mm: deactivations shouldn't bias the LRU balance Andrew Morton
2020-06-03 23:03 ` [patch 112/131] mm: only count actual rotations as LRU reclaim cost Andrew Morton
2020-06-03 23:03 ` [patch 113/131] mm: balance LRU lists based on relative thrashing Andrew Morton
2020-06-03 23:03 ` [patch 114/131] mm: vmscan: determine anon/file pressure balance at the reclaim root Andrew Morton
2020-06-03 23:03 ` [patch 115/131] mm: vmscan: reclaim writepage is IO cost Andrew Morton
2020-06-03 23:03 ` [patch 116/131] mm: vmscan: limit the range of LRU type balancing Andrew Morton
2020-06-03 23:03 ` [patch 117/131] mm: swap: fix vmstats for huge pages Andrew Morton
2020-06-03 23:03 ` [patch 118/131] mm: swap: memcg: fix memcg stats " Andrew Morton
2020-06-03 23:03 ` [patch 119/131] tools/vm/page_owner_sort.c: filter out unneeded line Andrew Morton
2020-06-03 23:03 ` [patch 120/131] mm, mempolicy: fix up gup usage in lookup_node Andrew Morton
2020-06-03 23:03 ` [patch 121/131] include/linux/memblock.h: fix minor typo and unclear comment Andrew Morton
2020-06-03 23:03 ` [patch 122/131] sparc32: register memory occupied by kernel as memblock.memory Andrew Morton
2020-06-03 23:03 ` [patch 123/131] hugetlbfs: get unmapped area below TASK_UNMAPPED_BASE for hugetlbfs Andrew Morton
2020-06-03 23:03 ` [patch 124/131] mm: thp: don't need to drain lru cache when splitting and mlocking THP Andrew Morton
2020-06-03 23:03 ` [patch 125/131] powerpc/mm: drop platform defined pmd_mknotpresent() Andrew Morton
2020-06-03 23:03 ` [patch 126/131] mm/thp: rename pmd_mknotpresent() as pmd_mkinvalid() Andrew Morton
2020-06-03 23:03 ` [patch 127/131] drivers/base/memory.c: cache memory blocks in xarray to accelerate lookup Andrew Morton
2020-06-03 23:03 ` [patch 128/131] mm: add DEBUG_WX support Andrew Morton
2020-06-03 23:03 ` [patch 129/131] riscv: support DEBUG_WX Andrew Morton
2020-06-03 23:03 ` [patch 130/131] x86: mm: use ARCH_HAS_DEBUG_WX instead of arch defined Andrew Morton
2020-06-03 23:04 ` [patch 131/131] arm64: " Andrew Morton
2020-06-04  0:00 ` + lib-test-get_count_order-long-in-test_bitopsc-fix.patch added to -mm tree Andrew Morton
2020-06-04  0:54 ` mmotm 2020-06-03-17-54 uploaded Andrew Morton
2020-06-04 18:03 ` + mm-vmalloc-fix-a-typo-in-comment.patch added to -mm tree Andrew Morton
2020-06-04 19:59 ` + memory_hotplug-disable-the-functionality-for-32b.patch " Andrew Morton
2020-06-04 21:30 ` + mm-utilc-remove-the-vm_warn_once-for-vm_committed_as-underflow-check.patch " Andrew Morton
2020-06-04 21:39 ` [folded-merged] mm-page_alloc-skip-waternark_boost-for-atomic-order-0-allocations-fix.patch removed from " Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200603225939.HQTPjz-NM%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=alex.williamson@redhat.com \
    --cc=alexander.h.duyck@linux.intel.com \
    --cc=corbet@lwn.net \
    --cc=dan.j.williams@intel.com \
    --cc=daniel.m.jordan@oracle.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=elliott@hpe.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=jgg@ziepe.ca \
    --cc=josh@joshtriplett.org \
    --cc=ktkhai@virtuozzo.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=mm-commits@vger.kernel.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=pavel@ucw.cz \
    --cc=peterz@infradead.org \
    --cc=rdunlap@infradead.org \
    --cc=shile.zhang@linux.alibaba.com \
    --cc=steffen.klassert@secunet.com \
    --cc=steven.sistare@oracle.com \
    --cc=tj@kernel.org \
    --cc=torvalds@linux-foundation.org \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).