mm-commits.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, catalin.marinas@arm.com,
	kirill.shtuemov@linux.intel.com, linux-mm@kvack.org,
	linux@roeck-us.net, mick@ics.forth.gr,
	mm-commits@vger.kernel.org, rafael.j.wysocki@intel.com,
	rmk+kernel@armlinux.org.uk, robh@kernel.org, rppt@linux.ibm.com,
	torvalds@linux-foundation.org
Subject: [patch 197/212] memblock: make memblock_find_in_range method private
Date: Thu, 02 Sep 2021 15:00:26 -0700	[thread overview]
Message-ID: <20210902220026.q4AhgXDR6%akpm@linux-foundation.org> (raw)
In-Reply-To: <20210902144820.78957dff93d7bea620d55a89@linux-foundation.org>

From: Mike Rapoport <rppt@linux.ibm.com>
Subject: memblock: make memblock_find_in_range method private

There are a lot of uses of memblock_find_in_range() along with
memblock_reserve() from the times memblock allocation APIs did not exist.

memblock_find_in_range() is the very core of memblock allocations, so any
future changes to its internal behaviour would mandate updates of all the
users outside memblock.

Replace the calls to memblock_find_in_range() with an equivalent calls to
memblock_phys_alloc() and memblock_phys_alloc_range() and make
memblock_find_in_range() private method of memblock.

This simplifies the callers, ensures that (unlikely) errors in
memblock_reserve() are handled and improves maintainability of
memblock_find_in_range().

Link: https://lkml.kernel.org/r/20210816122622.30279-1-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>		[arm64]
Acked-by: Kirill A. Shutemov <kirill.shtuemov@linux.intel.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>	[ACPI]
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Acked-by: Nick Kossifidis <mick@ics.forth.gr>			[riscv]
Tested-by: Guenter Roeck <linux@roeck-us.net>
Acked-by: Rob Herring <robh@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 arch/arm/kernel/setup.c           |   20 ++++--------
 arch/arm64/kvm/hyp/reserved_mem.c |    9 +----
 arch/arm64/mm/init.c              |   38 +++++++-----------------
 arch/mips/kernel/setup.c          |   14 +++-----
 arch/riscv/mm/init.c              |   44 ++++++++--------------------
 arch/s390/kernel/setup.c          |    9 +++--
 arch/x86/kernel/aperture_64.c     |    5 +--
 arch/x86/mm/init.c                |   23 +++++++++-----
 arch/x86/mm/numa.c                |    5 +--
 arch/x86/mm/numa_emulation.c      |    5 +--
 arch/x86/realmode/init.c          |    2 -
 drivers/acpi/tables.c             |    5 +--
 drivers/base/arch_numa.c          |    5 ---
 drivers/of/of_reserved_mem.c      |   12 +++++--
 include/linux/memblock.h          |    2 -
 mm/memblock.c                     |    2 -
 16 files changed, 82 insertions(+), 118 deletions(-)

--- a/arch/arm64/kvm/hyp/reserved_mem.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/arm64/kvm/hyp/reserved_mem.c
@@ -92,12 +92,10 @@ void __init kvm_hyp_reserve(void)
 	 * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
 	 */
 	hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
-	hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
-					      ALIGN(hyp_mem_size, PMD_SIZE),
-					      PMD_SIZE);
+	hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
+					   PMD_SIZE);
 	if (!hyp_mem_base)
-		hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
-						      hyp_mem_size, PAGE_SIZE);
+		hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
 	else
 		hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
 
@@ -105,7 +103,6 @@ void __init kvm_hyp_reserve(void)
 		kvm_err("Failed to reserve hyp memory\n");
 		return;
 	}
-	memblock_reserve(hyp_mem_base, hyp_mem_size);
 
 	kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
 		 hyp_mem_base);
--- a/arch/arm64/mm/init.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/arm64/mm/init.c
@@ -74,6 +74,7 @@ phys_addr_t arm64_dma_phys_limit __ro_af
 static void __init reserve_crashkernel(void)
 {
 	unsigned long long crash_base, crash_size;
+	unsigned long long crash_max = arm64_dma_phys_limit;
 	int ret;
 
 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
@@ -84,33 +85,18 @@ static void __init reserve_crashkernel(v
 
 	crash_size = PAGE_ALIGN(crash_size);
 
-	if (crash_base == 0) {
-		/* Current arm64 boot protocol requires 2MB alignment */
-		crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
-				crash_size, SZ_2M);
-		if (crash_base == 0) {
-			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
-				crash_size);
-			return;
-		}
-	} else {
-		/* User specifies base address explicitly. */
-		if (!memblock_is_region_memory(crash_base, crash_size)) {
-			pr_warn("cannot reserve crashkernel: region is not memory\n");
-			return;
-		}
-
-		if (memblock_is_region_reserved(crash_base, crash_size)) {
-			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
-			return;
-		}
-
-		if (!IS_ALIGNED(crash_base, SZ_2M)) {
-			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
-			return;
-		}
+	/* User specifies base address explicitly. */
+	if (crash_base)
+		crash_max = crash_base + crash_size;
+
+	/* Current arm64 boot protocol requires 2MB alignment */
+	crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+					       crash_base, crash_max);
+	if (!crash_base) {
+		pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
+			crash_size);
+		return;
 	}
-	memblock_reserve(crash_base, crash_size);
 
 	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
 		crash_base, crash_base + crash_size, crash_size >> 20);
--- a/arch/arm/kernel/setup.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/arm/kernel/setup.c
@@ -1012,31 +1012,25 @@ static void __init reserve_crashkernel(v
 		unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
 		if (crash_max > lowmem_max)
 			crash_max = lowmem_max;
-		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
-						    crash_size, CRASH_ALIGN);
+
+		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+						       CRASH_ALIGN, crash_max);
 		if (!crash_base) {
 			pr_err("crashkernel reservation failed - No suitable area found.\n");
 			return;
 		}
 	} else {
+		unsigned long long crash_max = crash_base + crash_size;
 		unsigned long long start;
 
-		start = memblock_find_in_range(crash_base,
-					       crash_base + crash_size,
-					       crash_size, SECTION_SIZE);
-		if (start != crash_base) {
+		start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
+						  crash_base, crash_max);
+		if (!start) {
 			pr_err("crashkernel reservation failed - memory is in use.\n");
 			return;
 		}
 	}
 
-	ret = memblock_reserve(crash_base, crash_size);
-	if (ret < 0) {
-		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
-			(unsigned long)crash_base);
-		return;
-	}
-
 	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 		(unsigned long)(crash_size >> 20),
 		(unsigned long)(crash_base >> 20),
--- a/arch/mips/kernel/setup.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/mips/kernel/setup.c
@@ -452,8 +452,9 @@ static void __init mips_parse_crashkerne
 		return;
 
 	if (crash_base <= 0) {
-		crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX,
-							crash_size, CRASH_ALIGN);
+		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+						       CRASH_ALIGN,
+						       CRASH_ADDR_MAX);
 		if (!crash_base) {
 			pr_warn("crashkernel reservation failed - No suitable area found.\n");
 			return;
@@ -461,8 +462,9 @@ static void __init mips_parse_crashkerne
 	} else {
 		unsigned long long start;
 
-		start = memblock_find_in_range(crash_base, crash_base + crash_size,
-						crash_size, 1);
+		start = memblock_phys_alloc_range(crash_size, 1,
+						  crash_base,
+						  crash_base + crash_size);
 		if (start != crash_base) {
 			pr_warn("Invalid memory region reserved for crash kernel\n");
 			return;
@@ -656,10 +658,6 @@ static void __init arch_mem_init(char **
 	mips_reserve_vmcore();
 
 	mips_parse_crashkernel();
-#ifdef CONFIG_KEXEC
-	if (crashk_res.start != crashk_res.end)
-		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
-#endif
 	device_tree_init();
 
 	/*
--- a/arch/riscv/mm/init.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/riscv/mm/init.c
@@ -819,38 +819,22 @@ static void __init reserve_crashkernel(v
 
 	crash_size = PAGE_ALIGN(crash_size);
 
-	if (crash_base == 0) {
-		/*
-		 * Current riscv boot protocol requires 2MB alignment for
-		 * RV64 and 4MB alignment for RV32 (hugepage size)
-		 */
-		crash_base = memblock_find_in_range(search_start, search_end,
-						    crash_size, PMD_SIZE);
-
-		if (crash_base == 0) {
-			pr_warn("crashkernel: couldn't allocate %lldKB\n",
-				crash_size >> 10);
-			return;
-		}
-	} else {
-		/* User specifies base address explicitly. */
-		if (!memblock_is_region_memory(crash_base, crash_size)) {
-			pr_warn("crashkernel: requested region is not memory\n");
-			return;
-		}
-
-		if (memblock_is_region_reserved(crash_base, crash_size)) {
-			pr_warn("crashkernel: requested region is reserved\n");
-			return;
-		}
-
+	if (crash_base) {
+		search_start = crash_base;
+		search_end = crash_base + crash_size;
+	}
 
-		if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
-			pr_warn("crashkernel: requested region is misaligned\n");
-			return;
-		}
+	/*
+	 * Current riscv boot protocol requires 2MB alignment for
+	 * RV64 and 4MB alignment for RV32 (hugepage size)
+	 */
+	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+					       search_start, search_end);
+	if (crash_base == 0) {
+		pr_warn("crashkernel: couldn't allocate %lldKB\n",
+			crash_size >> 10);
+		return;
 	}
-	memblock_reserve(crash_base, crash_size);
 
 	pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
 		crash_base, crash_base + crash_size, crash_size >> 20);
--- a/arch/s390/kernel/setup.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/s390/kernel/setup.c
@@ -626,8 +626,9 @@ static void __init reserve_crashkernel(v
 			return;
 		}
 		low = crash_base ?: low;
-		crash_base = memblock_find_in_range(low, high, crash_size,
-						    KEXEC_CRASH_MEM_ALIGN);
+		crash_base = memblock_phys_alloc_range(crash_size,
+						       KEXEC_CRASH_MEM_ALIGN,
+						       low, high);
 	}
 
 	if (!crash_base) {
@@ -636,8 +637,10 @@ static void __init reserve_crashkernel(v
 		return;
 	}
 
-	if (register_memory_notifier(&kdump_mem_nb))
+	if (register_memory_notifier(&kdump_mem_nb)) {
+		memblock_free(crash_base, crash_size);
 		return;
+	}
 
 	if (!OLDMEM_BASE && MACHINE_IS_VM)
 		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
--- a/arch/x86/kernel/aperture_64.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/x86/kernel/aperture_64.c
@@ -109,14 +109,13 @@ static u32 __init allocate_aperture(void
 	 * memory. Unfortunately we cannot move it up because that would
 	 * make the IOMMU useless.
 	 */
-	addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
-				      aper_size, aper_size);
+	addr = memblock_phys_alloc_range(aper_size, aper_size,
+					 GART_MIN_ADDR, GART_MAX_ADDR);
 	if (!addr) {
 		pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n",
 		       addr, addr + aper_size - 1, aper_size >> 10);
 		return 0;
 	}
-	memblock_reserve(addr, aper_size);
 	pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n",
 		addr, addr + aper_size - 1, aper_size >> 10);
 	register_nosave_region(addr >> PAGE_SHIFT,
--- a/arch/x86/mm/init.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/x86/mm/init.c
@@ -127,14 +127,12 @@ __ref void *alloc_low_pages(unsigned int
 		unsigned long ret = 0;
 
 		if (min_pfn_mapped < max_pfn_mapped) {
-			ret = memblock_find_in_range(
+			ret = memblock_phys_alloc_range(
+					PAGE_SIZE * num, PAGE_SIZE,
 					min_pfn_mapped << PAGE_SHIFT,
-					max_pfn_mapped << PAGE_SHIFT,
-					PAGE_SIZE * num , PAGE_SIZE);
+					max_pfn_mapped << PAGE_SHIFT);
 		}
-		if (ret)
-			memblock_reserve(ret, PAGE_SIZE * num);
-		else if (can_use_brk_pgt)
+		if (!ret && can_use_brk_pgt)
 			ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
 
 		if (!ret)
@@ -610,8 +608,17 @@ static void __init memory_map_top_down(u
 	unsigned long addr;
 	unsigned long mapped_ram_size = 0;
 
-	/* xen has big range in reserved near end of ram, skip it at first.*/
-	addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
+	/*
+	 * Systems that have many reserved areas near top of the memory,
+	 * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
+	 * require lots of 4K mappings which may exhaust pgt_buf.
+	 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
+	 * there is enough mapped memory that can be allocated from
+	 * memblock.
+	 */
+	addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
+					 map_end);
+	memblock_free(addr, PMD_SIZE);
 	real_end = addr + PMD_SIZE;
 
 	/* step_size need to be small so pgt_buf from BRK could cover it */
--- a/arch/x86/mm/numa.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/x86/mm/numa.c
@@ -376,15 +376,14 @@ static int __init numa_alloc_distance(vo
 	cnt++;
 	size = cnt * cnt * sizeof(numa_distance[0]);
 
-	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
-				      size, PAGE_SIZE);
+	phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
+					 PFN_PHYS(max_pfn_mapped));
 	if (!phys) {
 		pr_warn("Warning: can't allocate distance table!\n");
 		/* don't retry until explicitly reset */
 		numa_distance = (void *)1LU;
 		return -ENOMEM;
 	}
-	memblock_reserve(phys, size);
 
 	numa_distance = __va(phys);
 	numa_distance_cnt = cnt;
--- a/arch/x86/mm/numa_emulation.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/x86/mm/numa_emulation.c
@@ -447,13 +447,12 @@ void __init numa_emulation(struct numa_m
 	if (numa_dist_cnt) {
 		u64 phys;
 
-		phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
-					      phys_size, PAGE_SIZE);
+		phys = memblock_phys_alloc_range(phys_size, PAGE_SIZE, 0,
+						 PFN_PHYS(max_pfn_mapped));
 		if (!phys) {
 			pr_warn("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
 			goto no_emu;
 		}
-		memblock_reserve(phys, phys_size);
 		phys_dist = __va(phys);
 
 		for (i = 0; i < numa_dist_cnt; i++)
--- a/arch/x86/realmode/init.c~memblock-make-memblock_find_in_range-method-private
+++ a/arch/x86/realmode/init.c
@@ -28,7 +28,7 @@ void __init reserve_real_mode(void)
 	WARN_ON(slab_is_available());
 
 	/* Has to be under 1M so we can execute real-mode AP code. */
-	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+	mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
 	if (!mem)
 		pr_info("No sub-1M memory is available for the trampoline\n");
 	else
--- a/drivers/acpi/tables.c~memblock-make-memblock_find_in_range-method-private
+++ a/drivers/acpi/tables.c
@@ -583,8 +583,8 @@ void __init acpi_table_upgrade(void)
 	}
 
 	acpi_tables_addr =
-		memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS,
-				       all_tables_size, PAGE_SIZE);
+		memblock_phys_alloc_range(all_tables_size, PAGE_SIZE,
+					  0, ACPI_TABLE_UPGRADE_MAX_PHYS);
 	if (!acpi_tables_addr) {
 		WARN_ON(1);
 		return;
@@ -599,7 +599,6 @@ void __init acpi_table_upgrade(void)
 	 * Both memblock_reserve and e820__range_add (via arch_reserve_mem_area)
 	 * works fine.
 	 */
-	memblock_reserve(acpi_tables_addr, all_tables_size);
 	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
 	/*
--- a/drivers/base/arch_numa.c~memblock-make-memblock_find_in_range-method-private
+++ a/drivers/base/arch_numa.c
@@ -279,13 +279,10 @@ static int __init numa_alloc_distance(vo
 	int i, j;
 
 	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
-	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
-				      size, PAGE_SIZE);
+	phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0, PFN_PHYS(max_pfn));
 	if (WARN_ON(!phys))
 		return -ENOMEM;
 
-	memblock_reserve(phys, size);
-
 	numa_distance = __va(phys);
 	numa_distance_cnt = nr_node_ids;
 
--- a/drivers/of/of_reserved_mem.c~memblock-make-memblock_find_in_range-method-private
+++ a/drivers/of/of_reserved_mem.c
@@ -33,18 +33,22 @@ static int __init early_init_dt_alloc_re
 	phys_addr_t *res_base)
 {
 	phys_addr_t base;
+	int err = 0;
 
 	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
 	align = !align ? SMP_CACHE_BYTES : align;
-	base = memblock_find_in_range(start, end, size, align);
+	base = memblock_phys_alloc_range(size, align, start, end);
 	if (!base)
 		return -ENOMEM;
 
 	*res_base = base;
-	if (nomap)
-		return memblock_mark_nomap(base, size);
+	if (nomap) {
+		err = memblock_mark_nomap(base, size);
+		if (err)
+			memblock_free(base, size);
+	}
 
-	return memblock_reserve(base, size);
+	return err;
 }
 
 /*
--- a/include/linux/memblock.h~memblock-make-memblock_find_in_range-method-private
+++ a/include/linux/memblock.h
@@ -99,8 +99,6 @@ void memblock_discard(void);
 static inline void memblock_discard(void) {}
 #endif
 
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
-				   phys_addr_t size, phys_addr_t align);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
--- a/mm/memblock.c~memblock-make-memblock_find_in_range-method-private
+++ a/mm/memblock.c
@@ -315,7 +315,7 @@ static phys_addr_t __init_memblock membl
  * Return:
  * Found address on success, 0 on failure.
  */
-phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
+static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 					phys_addr_t end, phys_addr_t size,
 					phys_addr_t align)
 {
_

  parent reply	other threads:[~2021-09-02 22:00 UTC|newest]

Thread overview: 253+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02 21:48 incoming Andrew Morton
2021-09-02 21:49 ` incoming Andrew Morton
2021-09-02 21:49 ` [patch 001/212] ia64: fix typo in a comment Andrew Morton
2021-09-02 21:50 ` [patch 002/212] ia64: fix #endif comment for reserve_elfcorehdr() Andrew Morton
2021-09-02 21:50 ` [patch 003/212] ia64: make reserve_elfcorehdr() static Andrew Morton
2021-09-02 21:50 ` [patch 004/212] ia64: make num_rsvd_regions static Andrew Morton
2021-09-02 21:50 ` [patch 005/212] ocfs2: remove an unnecessary condition Andrew Morton
2021-09-02 21:50 ` [patch 006/212] ocfs2: quota_local: fix possible uninitialized-variable access in ocfs2_local_read_info() Andrew Morton
2021-09-02 21:50 ` [patch 007/212] ocfs2: ocfs2_downconvert_lock failure results in deadlock Andrew Morton
2021-09-02 21:50 ` [patch 008/212] arch/csky/kernel/probes/kprobes.c: fix bugon.cocci warnings Andrew Morton
2021-09-02 21:50 ` [patch 009/212] mm, slub: don't call flush_all() from slab_debug_trace_open() Andrew Morton
2021-09-02 21:50 ` [patch 010/212] mm, slub: allocate private object map for debugfs listings Andrew Morton
2021-09-02 21:50 ` [patch 011/212] mm, slub: allocate private object map for validate_slab_cache() Andrew Morton
2021-09-02 21:50 ` [patch 012/212] mm, slub: don't disable irq for debug_check_no_locks_freed() Andrew Morton
2021-09-02 21:50 ` [patch 013/212] mm, slub: remove redundant unfreeze_partials() from put_cpu_partial() Andrew Morton
2021-09-02 21:50 ` [patch 014/212] mm, slub: unify cmpxchg_double_slab() and __cmpxchg_double_slab() Andrew Morton
2021-09-02 21:50 ` [patch 015/212] mm, slub: extract get_partial() from new_slab_objects() Andrew Morton
2021-09-02 21:50 ` [patch 016/212] mm, slub: dissolve new_slab_objects() into ___slab_alloc() Andrew Morton
2021-09-02 21:50 ` [patch 017/212] mm, slub: return slab page from get_partial() and set c->page afterwards Andrew Morton
2021-09-02 21:50 ` [patch 018/212] mm, slub: restructure new page checks in ___slab_alloc() Andrew Morton
2021-09-02 21:50 ` [patch 019/212] mm, slub: simplify kmem_cache_cpu and tid setup Andrew Morton
2021-09-02 21:50 ` [patch 020/212] mm, slub: move disabling/enabling irqs to ___slab_alloc() Andrew Morton
2021-09-02 21:51 ` [patch 021/212] mm, slub: do initial checks in ___slab_alloc() with irqs enabled Andrew Morton
2021-09-02 21:51 ` [patch 022/212] mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc() Andrew Morton
2021-09-02 21:51 ` [patch 023/212] mm, slub: restore irqs around calling new_slab() Andrew Morton
2021-09-02 21:51 ` [patch 024/212] mm, slub: validate slab from partial list or page allocator before making it cpu slab Andrew Morton
2021-09-02 21:51 ` [patch 025/212] mm, slub: check new pages with restored irqs Andrew Morton
2021-09-02 21:51 ` [patch 026/212] mm, slub: stop disabling irqs around get_partial() Andrew Morton
2021-09-02 21:51 ` [patch 027/212] mm, slub: move reset of c->page and freelist out of deactivate_slab() Andrew Morton
2021-09-02 21:51 ` [patch 028/212] mm, slub: make locking in deactivate_slab() irq-safe Andrew Morton
2021-09-02 21:51 ` [patch 029/212] mm, slub: call deactivate_slab() without disabling irqs Andrew Morton
2021-09-02 21:51 ` [patch 030/212] mm, slub: move irq control into unfreeze_partials() Andrew Morton
2021-09-02 21:51 ` [patch 031/212] mm, slub: discard slabs in unfreeze_partials() without irqs disabled Andrew Morton
2021-09-02 21:51 ` [patch 032/212] mm, slub: detach whole partial list at once in unfreeze_partials() Andrew Morton
2021-09-02 21:51 ` [patch 033/212] mm, slub: separate detaching of partial list in unfreeze_partials() from unfreezing Andrew Morton
2021-09-02 21:51 ` [patch 034/212] mm, slub: only disable irq with spin_lock in __unfreeze_partials() Andrew Morton
2021-09-02 21:51 ` [patch 035/212] mm, slub: don't disable irqs in slub_cpu_dead() Andrew Morton
2021-09-02 21:51 ` [patch 036/212] mm, slab: make flush_slab() possible to call with irqs enabled Andrew Morton
2021-09-02 23:15   ` Linus Torvalds
2021-09-02 23:34     ` Linus Torvalds
2021-09-02 23:51       ` Linus Torvalds
2021-09-03  5:26         ` Vlastimil Babka
2021-09-03  6:22           ` Mike Galbraith
2021-09-03  7:03             ` Vlastimil Babka
2021-09-03 11:14               ` Vlastimil Babka
2021-09-03 16:18             ` Linus Torvalds
2021-09-02 21:51 ` [patch 037/212] mm: slub: move flush_cpu_slab() invocations __free_slab() invocations out of IRQ context Andrew Morton
2021-09-02 21:51 ` [patch 038/212] mm: slub: make object_map_lock a raw_spinlock_t Andrew Morton
2021-09-02 21:52 ` [patch 039/212] mm, slub: optionally save/restore irqs in slab_[un]lock()/ Andrew Morton
2021-09-02 21:52 ` [patch 040/212] mm, slub: make slab_lock() disable irqs with PREEMPT_RT Andrew Morton
2021-09-02 21:52 ` [patch 041/212] mm, slub: protect put_cpu_partial() with disabled irqs instead of cmpxchg Andrew Morton
2021-09-02 21:52 ` [patch 042/212] mm, slub: use migrate_disable() on PREEMPT_RT Andrew Morton
2021-09-02 21:52 ` [patch 043/212] mm, slub: convert kmem_cpu_slab protection to local_lock Andrew Morton
2021-09-02 21:52 ` [patch 044/212] mm/debug_vm_pgtable: introduce struct pgtable_debug_args Andrew Morton
2021-09-02 21:52 ` [patch 045/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in basic tests Andrew Morton
2021-09-02 21:52 ` [patch 046/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in leaf and savewrite tests Andrew Morton
2021-09-02 21:52 ` [patch 047/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in protnone and devmap tests Andrew Morton
2021-09-02 21:52 ` [patch 048/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in soft_dirty and swap tests Andrew Morton
2021-09-02 21:52 ` [patch 049/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in migration and thp tests Andrew Morton
2021-09-02 21:52 ` [patch 050/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in PTE modifying tests Andrew Morton
2021-09-02 21:52 ` [patch 051/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in PMD " Andrew Morton
2021-09-02 21:52 ` [patch 052/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in PUD " Andrew Morton
2021-09-02 21:52 ` [patch 053/212] mm/debug_vm_pgtable: use struct pgtable_debug_args in PGD and P4D " Andrew Morton
2021-09-02 21:52 ` [patch 054/212] mm/debug_vm_pgtable: remove unused code Andrew Morton
2021-09-02 21:52 ` [patch 055/212] mm/debug_vm_pgtable: fix corrupted page flag Andrew Morton
2021-09-02 21:52 ` [patch 056/212] mm: report a more useful address for reclaim acquisition Andrew Morton
2021-09-02 21:53 ` [patch 057/212] mm: add kernel_misc_reclaimable in show_free_areas Andrew Morton
2021-09-02 21:53 ` [patch 058/212] writeback: track number of inodes under writeback Andrew Morton
2021-09-02 21:53 ` [patch 059/212] writeback: reliably update bandwidth estimation Andrew Morton
2021-09-02 21:53 ` [patch 060/212] writeback: fix bandwidth estimate for spiky workload Andrew Morton
2021-09-02 21:53 ` [patch 061/212] writeback: rename domain_update_bandwidth() Andrew Morton
2021-09-02 21:53 ` [patch 062/212] writeback: use READ_ONCE for unlocked reads of writeback stats Andrew Morton
2021-09-02 21:53 ` [patch 063/212] mm: remove irqsave/restore locking from contexts with irqs enabled Andrew Morton
2021-09-02 21:53 ` [patch 064/212] fs: drop_caches: fix skipping over shadow cache inodes Andrew Morton
2021-09-02 21:53 ` [patch 065/212] fs: inode: count invalidated shadow pages in pginodesteal Andrew Morton
2021-09-02 21:53 ` [patch 066/212] writeback: memcg: simplify cgroup_writeback_by_id Andrew Morton
2021-09-02 21:53 ` [patch 067/212] include/linux/buffer_head.h: fix boolreturn.cocci warnings Andrew Morton
2021-09-02 21:53 ` [patch 068/212] mm: gup: remove set but unused local variable major Andrew Morton
2021-09-02 21:53 ` [patch 069/212] mm: gup: remove unneed local variable orig_refs Andrew Morton
2021-09-02 21:53 ` [patch 070/212] mm: gup: remove useless BUG_ON in __get_user_pages() Andrew Morton
2021-09-02 21:53 ` [patch 071/212] mm: gup: fix potential pgmap refcnt leak in __gup_device_huge() Andrew Morton
2021-09-03 16:35   ` Linus Torvalds
2021-09-03 17:55     ` John Hubbard
2021-09-03 19:01       ` Linus Torvalds
2021-09-02 21:53 ` [patch 072/212] mm: gup: use helper PAGE_ALIGNED in populate_vma_page_range() Andrew Morton
2021-09-02 21:53 ` [patch 073/212] mm/gup: documentation corrections for gup/pup Andrew Morton
2021-09-02 21:53 ` [patch 074/212] mm/gup: small refactoring: simplify try_grab_page() Andrew Morton
2021-09-02 21:53 ` [patch 075/212] mm/gup: remove try_get_page(), call try_get_compound_head() directly Andrew Morton
2021-09-02 23:23   ` John Hubbard
2021-09-02 21:53 ` [patch 076/212] fs, mm: fix race in unlinking swapfile Andrew Morton
2021-09-02 21:54 ` [patch 077/212] mm: delete unused get_kernel_page() Andrew Morton
2021-09-02 21:54 ` [patch 078/212] shmem: use raw_spinlock_t for ->stat_lock Andrew Morton
2021-09-02 21:54 ` [patch 079/212] shmem: remove unneeded variable ret Andrew Morton
2021-09-02 21:54 ` [patch 080/212] shmem: remove unneeded header file Andrew Morton
2021-09-02 21:54 ` [patch 081/212] shmem: remove unneeded function forward declaration Andrew Morton
2021-09-02 21:54 ` [patch 082/212] shmem: include header file to declare swap_info Andrew Morton
2021-09-02 21:54 ` [patch 083/212] huge tmpfs: fix fallocate(vanilla) advance over huge pages Andrew Morton
2021-09-02 21:54 ` [patch 084/212] huge tmpfs: fix split_huge_page() after FALLOC_FL_KEEP_SIZE Andrew Morton
2021-09-02 21:54 ` [patch 085/212] huge tmpfs: remove shrinklist addition from shmem_setattr() Andrew Morton
2021-09-02 21:54 ` [patch 086/212] huge tmpfs: revert shmem's use of transhuge_vma_enabled() Andrew Morton
2021-09-02 21:54 ` [patch 087/212] huge tmpfs: move shmem_huge_enabled() upwards Andrew Morton
2021-09-02 21:54 ` [patch 088/212] huge tmpfs: SGP_NOALLOC to stop collapse_file() on race Andrew Morton
2021-09-02 21:54 ` [patch 089/212] huge tmpfs: shmem_is_huge(vma, inode, index) Andrew Morton
2021-09-02 21:54 ` [patch 090/212] huge tmpfs: decide stat.st_blksize by shmem_is_huge() Andrew Morton
2021-09-02 21:54 ` [patch 091/212] shmem: shmem_writepage() split unlikely i915 THP Andrew Morton
2021-09-02 21:54 ` [patch 092/212] mm, memcg: add mem_cgroup_disabled checks in vmpressure and swap-related functions Andrew Morton
2021-09-02 21:54 ` [patch 093/212] mm, memcg: inline mem_cgroup_{charge/uncharge} to improve disabled memcg config Andrew Morton
2021-09-02 21:54 ` [patch 094/212] mm, memcg: inline swap-related functions " Andrew Morton
2021-09-02 21:54 ` [patch 095/212] memcg: enable accounting for pids in nested pid namespaces Andrew Morton
2021-09-02 21:55 ` [patch 096/212] memcg: switch lruvec stats to rstat Andrew Morton
2021-09-02 21:55 ` [patch 097/212] memcg: infrastructure to flush memcg stats Andrew Morton
2021-09-05 12:44   ` [memcg] 45208c9105: aim7.jobs-per-min -14.0% regression kernel test robot
2021-09-05 22:15     ` Shakeel Butt
2021-09-07  3:30       ` Feng Tang
2021-09-10  0:43         ` Shakeel Butt
2021-09-10  1:08           ` Feng Tang
2021-09-10  1:19             ` Shakeel Butt
2021-09-10  2:34               ` Feng Tang
2021-09-10  4:17                 ` Shakeel Butt
2021-09-02 21:55 ` [patch 098/212] memcg: charge fs_context and legacy_fs_context Andrew Morton
2021-09-02 21:55 ` [patch 099/212] memcg: enable accounting for mnt_cache entries Andrew Morton
2021-09-02 21:55 ` [patch 100/212] memcg: enable accounting for pollfd and select bits arrays Andrew Morton
2021-09-05 13:27   ` [memcg] fa4e6b1ad5: will-it-scale.per_thread_ops -15.4% regression kernel test robot
2021-09-02 21:55 ` [patch 101/212] memcg: enable accounting for file lock caches Andrew Morton
2021-09-05 13:08   ` [memcg] 059dd9003a: will-it-scale.per_process_ops -39.8% regression kernel test robot
2021-09-02 21:55 ` [patch 102/212] memcg: enable accounting for fasync_cache Andrew Morton
2021-09-02 21:55 ` [patch 103/212] memcg: enable accounting for new namesapces and struct nsproxy Andrew Morton
2021-09-02 21:55 ` [patch 104/212] memcg: enable accounting of ipc resources Andrew Morton
2021-09-02 21:55 ` [patch 105/212] memcg: enable accounting for signals Andrew Morton
2021-09-02 21:55 ` [patch 106/212] memcg: enable accounting for posix_timers_cache slab Andrew Morton
2021-09-02 21:55 ` [patch 107/212] memcg: enable accounting for ldt_struct objects Andrew Morton
2021-09-02 21:55 ` [patch 108/212] memcg: cleanup racy sum avoidance code Andrew Morton
2021-09-02 21:55 ` [patch 109/212] memcg: replace in_interrupt() by !in_task() in active_memcg() Andrew Morton
2021-09-02 21:55 ` [patch 110/212] mm: memcontrol: set the correct memcg swappiness restriction Andrew Morton
2021-09-02 21:55 ` [patch 111/212] mm, memcg: remove unused functions Andrew Morton
2021-09-02 21:55 ` [patch 112/212] mm, memcg: save some atomic ops when flush is already true Andrew Morton
2021-09-02 21:56 ` [patch 113/212] memcg: fix up drain_local_stock comment Andrew Morton
2021-09-02 21:56 ` [patch 114/212] memcg: make memcg->event_list_lock irqsafe Andrew Morton
2021-09-02 21:56 ` [patch 115/212] selftests/vm: use kselftest skip code for skipped tests Andrew Morton
2021-09-02 21:56 ` [patch 116/212] selftests: Fix spelling mistake "cann't" -> "cannot" Andrew Morton
2021-09-02 21:56 ` [patch 117/212] lazy tlb: introduce lazy mm refcount helper functions Andrew Morton
2021-09-02 21:56 ` [patch 118/212] lazy tlb: allow lazy tlb mm refcounting to be configurable Andrew Morton
2021-09-02 21:56 ` [patch 119/212] lazy tlb: shoot lazies, a non-refcounting lazy tlb option Andrew Morton
2021-09-02 22:28   ` Andy Lutomirski
2021-09-02 22:50     ` Linus Torvalds
2021-09-02 22:53       ` Andrew Morton
2021-09-03  0:35         ` Andy Lutomirski
2021-09-03  0:46         ` Nicholas Piggin
2021-09-03  5:11           ` Andy Lutomirski
2021-09-03  5:44             ` Nicholas Piggin
2021-09-03 23:48               ` Nicholas Piggin
2021-09-03  0:48     ` Nicholas Piggin
2021-09-02 21:56 ` [patch 120/212] powerpc/64s: enable MMU_LAZY_TLB_SHOOTDOWN Andrew Morton
2021-09-02 21:56 ` [patch 121/212] mmc: JZ4740: remove the flush_kernel_dcache_page call in jz4740_mmc_read_data Andrew Morton
2021-09-02 21:56 ` [patch 122/212] mmc: mmc_spi: replace flush_kernel_dcache_page with flush_dcache_page Andrew Morton
2021-09-02 21:56 ` [patch 123/212] scatterlist: " Andrew Morton
2021-09-02 21:56 ` [patch 124/212] mm: remove flush_kernel_dcache_page Andrew Morton
2021-09-02 21:56 ` [patch 125/212] mm,do_huge_pmd_numa_page: remove unnecessary TLB flushing code Andrew Morton
2021-09-02 21:56 ` [patch 126/212] mm: change fault_in_pages_* to have an unsigned size parameter Andrew Morton
2021-09-02 21:56 ` [patch 127/212] mm/pagemap: add mmap_assert_locked() annotations to find_vma*() Andrew Morton
2021-09-02 21:56 ` [patch 128/212] remap_file_pages: Use vma_lookup() instead of find_vma() Andrew Morton
2021-09-02 21:56 ` [patch 129/212] mm/mremap: fix memory account on do_munmap() failure Andrew Morton
2021-09-02 21:56 ` [patch 130/212] mm/bootmem_info.c: mark __init on register_page_bootmem_info_section Andrew Morton
2021-09-02 21:56 ` [patch 131/212] mm: sparse: pass section_nr to section_mark_present Andrew Morton
2021-09-02 21:57 ` [patch 132/212] mm: sparse: pass section_nr to find_memory_block Andrew Morton
2021-09-02 21:57 ` [patch 133/212] mm: sparse: remove __section_nr() function Andrew Morton
2021-09-02 21:57 ` [patch 134/212] mm/sparse: set SECTION_NID_SHIFT to 6 Andrew Morton
2021-09-02 21:57 ` [patch 135/212] include/linux/mmzone.h: avoid a warning in sparse memory support Andrew Morton
2021-09-02 21:57 ` [patch 136/212] mm/sparse: clarify pgdat_to_phys Andrew Morton
2021-09-02 21:57 ` [patch 137/212] mm/vmalloc: use batched page requests in bulk-allocator Andrew Morton
2021-09-02 21:57 ` [patch 138/212] mm/vmalloc: remove gfpflags_allow_blocking() check Andrew Morton
2021-09-02 21:57 ` [patch 139/212] lib/test_vmalloc.c: add a new 'nr_pages' parameter Andrew Morton
2021-09-02 21:57 ` [patch 140/212] mm/vmalloc: fix wrong behavior in vread Andrew Morton
2021-09-02 21:57 ` [patch 141/212] mm/kasan: move kasan.fault to mm/kasan/report.c Andrew Morton
2021-09-02 21:57 ` [patch 142/212] kasan: test: rework kmalloc_oob_right Andrew Morton
2021-09-02 21:57 ` [patch 143/212] kasan: test: avoid writing invalid memory Andrew Morton
2021-09-02 21:57 ` [patch 144/212] kasan: test: avoid corrupting memory via memset Andrew Morton
2021-09-02 21:57 ` [patch 145/212] kasan: test: disable kmalloc_memmove_invalid_size for HW_TAGS Andrew Morton
2021-09-02 21:57 ` [patch 146/212] kasan: test: only do kmalloc_uaf_memset for generic mode Andrew Morton
2021-09-02 21:57 ` [patch 147/212] kasan: test: clean up ksize_uaf Andrew Morton
2021-09-02 21:57 ` [patch 148/212] kasan: test: avoid corrupting memory in copy_user_test Andrew Morton
2021-09-02 21:57 ` [patch 149/212] kasan: test: avoid corrupting memory in kasan_rcu_uaf Andrew Morton
2021-09-02 21:57 ` [patch 150/212] mm/page_alloc: always initialize memory map for the holes Andrew Morton
2021-09-02 21:57 ` [patch 151/212] microblaze: simplify pte_alloc_one_kernel() Andrew Morton
2021-09-02 21:58 ` [patch 152/212] mm: introduce memmap_alloc() to unify memory map allocation Andrew Morton
2021-09-02 21:58 ` [patch 153/212] memblock: stop poisoning raw allocations Andrew Morton
2021-09-02 21:58 ` [patch 154/212] mm/page_alloc.c: fix 'zone_id' may be used uninitialized in this function warning Andrew Morton
2021-09-02 21:58 ` [patch 155/212] mm/page_alloc: make alloc_node_mem_map() __init rather than __ref Andrew Morton
2021-09-02 21:58 ` [patch 156/212] mm/page_alloc.c: use in_task() Andrew Morton
2021-09-02 21:58 ` [patch 157/212] mm/page_isolation: tracing: trace all test_pages_isolated failures Andrew Morton
2021-09-02 21:58 ` [patch 158/212] mm/hwpoison: remove unneeded variable unmap_success Andrew Morton
2021-09-02 21:58 ` [patch 159/212] mm/hwpoison: fix potential pte_unmap_unlock pte error Andrew Morton
2021-09-02 21:58 ` [patch 160/212] mm/hwpoison: change argument struct page **hpagep to *hpage Andrew Morton
2021-09-02 21:58 ` [patch 161/212] mm/hwpoison: fix some obsolete comments Andrew Morton
2021-09-02 21:58 ` [patch 162/212] mm: hwpoison: don't drop slab caches for offlining non-LRU page Andrew Morton
2021-09-02 21:58 ` [patch 163/212] doc: hwpoison: correct the support for hugepage Andrew Morton
2021-09-02 21:58 ` [patch 164/212] mm: hwpoison: dump page for unhandlable page Andrew Morton
2021-09-02 21:58 ` [patch 165/212] mm: fix panic caused by __page_handle_poison() Andrew Morton
2021-09-02 21:58 ` [patch 166/212] hugetlb: simplify prep_compound_gigantic_page ref count racing code Andrew Morton
2021-09-02 21:58 ` [patch 167/212] hugetlb: drop ref count earlier after page allocation Andrew Morton
2021-09-02 21:58 ` [patch 168/212] hugetlb: before freeing hugetlb page set dtor to appropriate value Andrew Morton
2021-09-02 21:58 ` [patch 169/212] hugetlb: fix hugetlb cgroup refcounting during vma split Andrew Morton
2021-09-02 21:58 ` [patch 170/212] userfaultfd: change mmap_changing to atomic Andrew Morton
2021-09-02 21:58 ` [patch 171/212] userfaultfd: prevent concurrent API initialization Andrew Morton
2021-09-02 21:59 ` [patch 172/212] selftests/vm/userfaultfd: wake after copy failure Andrew Morton
2021-09-02 21:59 ` [patch 173/212] mm/numa: automatically generate node migration order Andrew Morton
2021-09-02 21:59 ` [patch 174/212] mm/migrate: update node demotion order on hotplug events Andrew Morton
2021-09-05 13:59   ` [mm/migrate] 9eeb73028c: stress-ng.memhotplug.ops_per_sec -53.8% regression kernel test robot
2021-09-06  1:53     ` Huang, Ying
2021-09-06  3:57       ` Dave Hansen
2021-09-06  5:57         ` Huang, Ying
2021-09-06  6:09           ` Chen Yu
2021-09-06  8:31             ` Huang, Ying
2021-09-17  3:14     ` Huang, Ying
2021-10-21 14:29       ` Oliver Sang
2021-09-02 21:59 ` [patch 175/212] mm/migrate: enable returning precise migrate_pages() success count Andrew Morton
2021-09-02 21:59 ` [patch 176/212] mm/migrate: demote pages during reclaim Andrew Morton
2021-09-02 21:59 ` [patch 177/212] mm/vmscan: add page demotion counter Andrew Morton
2021-09-02 21:59 ` [patch 178/212] mm/vmscan: add helper for querying ability to age anonymous pages Andrew Morton
2021-09-02 21:59 ` [patch 179/212] mm/vmscan: Consider anonymous pages without swap Andrew Morton
2021-09-02 21:59 ` [patch 180/212] mm/vmscan: never demote for memcg reclaim Andrew Morton
2021-09-02 21:59 ` [patch 181/212] mm/migrate: add sysfs interface to enable reclaim migration Andrew Morton
2021-09-02 21:59 ` [patch 182/212] mm/vmpressure: replace vmpressure_to_css() with vmpressure_to_memcg() Andrew Morton
2021-09-02 21:59 ` [patch 183/212] mm/vmscan: remove the PageDirty check after MADV_FREE pages are page_ref_freezed Andrew Morton
2021-09-02 21:59 ` [patch 184/212] mm/vmscan: remove misleading setting to sc->priority Andrew Morton
2021-09-02 21:59 ` [patch 185/212] mm/vmscan: remove unneeded return value of kswapd_run() Andrew Morton
2021-09-02 21:59 ` [patch 186/212] mm/vmscan: add 'else' to remove check_pending label Andrew Morton
2021-09-02 21:59 ` [patch 187/212] mm, vmscan: guarantee drop_slab_node() termination Andrew Morton
2021-09-02 21:59 ` [patch 188/212] mm: compaction: optimize proactive compaction deferrals Andrew Morton
2021-09-02 21:59 ` [patch 189/212] mm: compaction: support triggering of proactive compaction by user Andrew Morton
2021-09-02 22:00 ` [patch 190/212] mm/mempolicy: use readable NUMA_NO_NODE macro instead of magic number Andrew Morton
2021-09-02 22:00 ` [patch 191/212] mm/mempolicy: add MPOL_PREFERRED_MANY for multiple preferred nodes Andrew Morton
2021-09-02 22:00 ` [patch 192/212] mm/memplicy: add page allocation function for MPOL_PREFERRED_MANY policy Andrew Morton
2021-09-02 22:00 ` [patch 193/212] mm/hugetlb: add support for mempolicy MPOL_PREFERRED_MANY Andrew Morton
2021-09-02 22:00 ` [patch 194/212] mm/mempolicy: advertise new MPOL_PREFERRED_MANY Andrew Morton
2021-09-02 22:00 ` [patch 195/212] mm/mempolicy: unify the create() func for bind/interleave/prefer-many policies Andrew Morton
2021-09-02 22:00 ` [patch 196/212] mm/mempolicy.c: use in_task() in mempolicy_slab_node() Andrew Morton
2021-09-02 22:00 ` Andrew Morton [this message]
2021-09-02 22:00 ` [patch 198/212] mm: introduce process_mrelease system call Andrew Morton
2021-09-02 22:00 ` [patch 199/212] mm: wire up syscall process_mrelease Andrew Morton
2021-09-02 22:00 ` [patch 200/212] mm/migrate: correct kernel-doc notation Andrew Morton
2021-09-02 22:00 ` [patch 201/212] selftests: vm: add KSM merge test Andrew Morton
2021-09-02 22:00 ` [patch 202/212] selftests: vm: add KSM unmerge test Andrew Morton
2021-09-02 22:00 ` [patch 203/212] selftests: vm: add KSM zero page merging test Andrew Morton
2021-09-02 22:00 ` [patch 204/212] selftests: vm: add KSM merging across nodes test Andrew Morton
2021-09-02 22:00 ` [patch 205/212] mm: KSM: fix data type Andrew Morton
2021-09-02 22:00 ` [patch 206/212] selftests: vm: add KSM merging time test Andrew Morton
2021-09-02 22:00 ` [patch 207/212] selftests: vm: add COW time test for KSM pages Andrew Morton
2021-09-02 22:01 ` [patch 208/212] mm/percpu,c: remove obsolete comments of pcpu_chunk_populated() Andrew Morton
2021-09-02 22:01 ` [patch 209/212] mm/vmstat: correct some wrong comments Andrew Morton
2021-09-02 22:01 ` [patch 210/212] mm/vmstat: simplify the array size calculation Andrew Morton
2021-09-02 22:01 ` [patch 211/212] mm/vmstat: remove unneeded return value Andrew Morton
2021-09-02 22:01 ` [patch 212/212] mm/madvise: add MADV_WILLNEED to process_madvise() Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210902220026.q4AhgXDR6%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=catalin.marinas@arm.com \
    --cc=kirill.shtuemov@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux@roeck-us.net \
    --cc=mick@ics.forth.gr \
    --cc=mm-commits@vger.kernel.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=rmk+kernel@armlinux.org.uk \
    --cc=robh@kernel.org \
    --cc=rppt@linux.ibm.com \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).