From: Andrew Morton <akpm@linux-foundation.org>
To: abdhalee@linux.vnet.ibm.com, akpm@linux-foundation.org,
arnd@arndb.de, christophe.leroy@csgroup.eu, geert@linux-m68k.org,
jcmvbkbc@gmail.com, joro@8bytes.org, jroedel@suse.de,
linux-mm@kvack.org, luto@kernel.org, mm-commits@vger.kernel.org,
penberg@kernel.org, peterz@infradead.org, rostedt@goodmis.org,
rppt@linux.ibm.com, sathnaga@linux.vnet.ibm.com,
sfr@canb.auug.org.au, shorne@gmail.com,
torvalds@linux-foundation.org, willy@infradead.org
Subject: [patch 102/163] mm: move lib/ioremap.c to mm/
Date: Thu, 06 Aug 2020 23:22:51 -0700 [thread overview]
Message-ID: <20200807062251.amozo-C91%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200806231643.a2711a608dd0f18bff2caf2b@linux-foundation.org>
From: Mike Rapoport <rppt@linux.ibm.com>
Subject: mm: move lib/ioremap.c to mm/
The functionality in lib/ioremap.c deals with pagetables, vmalloc and
caches, so it naturally belongs to mm/ Moving it there will also allow
declaring p?d_alloc_track functions in an header file inside mm/ rather
than having those declarations in include/linux/mm.h
Link: http://lkml.kernel.org/r/20200627143453.31835-8-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Pekka Enberg <penberg@kernel.org>
Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
lib/Makefile | 1
lib/ioremap.c | 287 ------------------------------------------------
mm/Makefile | 2
mm/ioremap.c | 287 ++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 288 insertions(+), 289 deletions(-)
--- a/lib/ioremap.c
+++ /dev/null
@@ -1,287 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/export.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-static int __read_mostly ioremap_p4d_capable;
-static int __read_mostly ioremap_pud_capable;
-static int __read_mostly ioremap_pmd_capable;
-static int __read_mostly ioremap_huge_disabled;
-
-static int __init set_nohugeiomap(char *str)
-{
- ioremap_huge_disabled = 1;
- return 0;
-}
-early_param("nohugeiomap", set_nohugeiomap);
-
-void __init ioremap_huge_init(void)
-{
- if (!ioremap_huge_disabled) {
- if (arch_ioremap_p4d_supported())
- ioremap_p4d_capable = 1;
- if (arch_ioremap_pud_supported())
- ioremap_pud_capable = 1;
- if (arch_ioremap_pmd_supported())
- ioremap_pmd_capable = 1;
- }
-}
-
-static inline int ioremap_p4d_enabled(void)
-{
- return ioremap_p4d_capable;
-}
-
-static inline int ioremap_pud_enabled(void)
-{
- return ioremap_pud_capable;
-}
-
-static inline int ioremap_pmd_enabled(void)
-{
- return ioremap_pmd_capable;
-}
-
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int ioremap_p4d_enabled(void) { return 0; }
-static inline int ioremap_pud_enabled(void) { return 0; }
-static inline int ioremap_pmd_enabled(void) { return 0; }
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel_track(pmd, addr, mask);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- *mask |= PGTBL_PTE_MODIFIED;
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PMD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PMD_MODIFIED;
- continue;
- }
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, PUD_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc_track(&init_mm, p4d, addr, mask);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_PUD_MODIFIED;
- continue;
- }
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(addr, P4D_SIZE))
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
- pgtbl_mod_mask *mask)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
- *mask |= PGTBL_P4D_MODIFIED;
- continue;
- }
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-int ioremap_page_range(unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
- pgtbl_mod_mask mask = 0;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
- &mask);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
-
- if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
- arch_sync_kernel_mappings(start, end);
-
- return err;
-}
-
-#ifdef CONFIG_GENERIC_IOREMAP
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
-{
- unsigned long offset, vaddr;
- phys_addr_t last_addr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP,
- __builtin_return_address(0));
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-#endif /* CONFIG_GENERIC_IOREMAP */
--- a/lib/Makefile~mm-move-lib-ioremapc-to-mm
+++ a/lib/Makefile
@@ -37,7 +37,6 @@ lib-y := ctype.o string.o vsprintf.o cmd
nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
-lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
--- /dev/null
+++ a/mm/ioremap.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+#include <linux/export.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int __read_mostly ioremap_p4d_capable;
+static int __read_mostly ioremap_pud_capable;
+static int __read_mostly ioremap_pmd_capable;
+static int __read_mostly ioremap_huge_disabled;
+
+static int __init set_nohugeiomap(char *str)
+{
+ ioremap_huge_disabled = 1;
+ return 0;
+}
+early_param("nohugeiomap", set_nohugeiomap);
+
+void __init ioremap_huge_init(void)
+{
+ if (!ioremap_huge_disabled) {
+ if (arch_ioremap_p4d_supported())
+ ioremap_p4d_capable = 1;
+ if (arch_ioremap_pud_supported())
+ ioremap_pud_capable = 1;
+ if (arch_ioremap_pmd_supported())
+ ioremap_pmd_capable = 1;
+ }
+}
+
+static inline int ioremap_p4d_enabled(void)
+{
+ return ioremap_p4d_capable;
+}
+
+static inline int ioremap_pud_enabled(void)
+{
+ return ioremap_pud_capable;
+}
+
+static inline int ioremap_pmd_enabled(void)
+{
+ return ioremap_pmd_capable;
+}
+
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int ioremap_p4d_enabled(void) { return 0; }
+static inline int ioremap_pud_enabled(void) { return 0; }
+static inline int ioremap_pmd_enabled(void) { return 0; }
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
+{
+ pte_t *pte;
+ u64 pfn;
+
+ pfn = phys_addr >> PAGE_SHIFT;
+ pte = pte_alloc_kernel_track(pmd, addr, mask);
+ if (!pte)
+ return -ENOMEM;
+ do {
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ *mask |= PGTBL_PTE_MODIFIED;
+ return 0;
+}
+
+static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_pmd_enabled())
+ return 0;
+
+ if ((end - addr) != PMD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(addr, PMD_SIZE))
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PMD_SIZE))
+ return 0;
+
+ if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
+ return 0;
+
+ return pmd_set_huge(pmd, phys_addr, prot);
+}
+
+static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_PMD_MODIFIED;
+ continue;
+ }
+
+ if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
+ return -ENOMEM;
+ } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_pud_enabled())
+ return 0;
+
+ if ((end - addr) != PUD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(addr, PUD_SIZE))
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PUD_SIZE))
+ return 0;
+
+ if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
+ return 0;
+
+ return pud_set_huge(pud, phys_addr, prot);
+}
+
+static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_alloc_track(&init_mm, p4d, addr, mask);
+ if (!pud)
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+
+ if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_PUD_MODIFIED;
+ continue;
+ }
+
+ if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
+ return -ENOMEM;
+ } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_p4d_enabled())
+ return 0;
+
+ if ((end - addr) != P4D_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(addr, P4D_SIZE))
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, P4D_SIZE))
+ return 0;
+
+ if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
+ return 0;
+
+ return p4d_set_huge(p4d, phys_addr, prot);
+}
+
+static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ pgtbl_mod_mask *mask)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
+ *mask |= PGTBL_P4D_MODIFIED;
+ continue;
+ }
+
+ if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
+ return -ENOMEM;
+ } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+int ioremap_page_range(unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+{
+ pgd_t *pgd;
+ unsigned long start;
+ unsigned long next;
+ int err;
+ pgtbl_mod_mask mask = 0;
+
+ might_sleep();
+ BUG_ON(addr >= end);
+
+ start = addr;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
+ &mask);
+ if (err)
+ break;
+ } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
+
+ flush_cache_vmap(start, end);
+
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ arch_sync_kernel_mappings(start, end);
+
+ return err;
+}
+
+#ifdef CONFIG_GENERIC_IOREMAP
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+{
+ unsigned long offset, vaddr;
+ phys_addr_t last_addr;
+ struct vm_struct *area;
+
+ /* Disallow wrap-around or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /* Page-align mappings */
+ offset = addr & (~PAGE_MASK);
+ addr -= offset;
+ size = PAGE_ALIGN(size + offset);
+
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
+ if (!area)
+ return NULL;
+ vaddr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ return (void __iomem *)(vaddr + offset);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+#endif /* CONFIG_GENERIC_IOREMAP */
--- a/mm/Makefile~mm-move-lib-ioremapc-to-mm
+++ a/mm/Makefile
@@ -38,7 +38,7 @@ mmu-y := nommu.o
mmu-$(CONFIG_MMU) := highmem.o memory.o mincore.o \
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
msync.o page_vma_mapped.o pagewalk.o \
- pgtable-generic.o rmap.o vmalloc.o
+ pgtable-generic.o rmap.o vmalloc.o ioremap.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
_
next prev parent reply other threads:[~2020-08-07 6:22 UTC|newest]
Thread overview: 136+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-07 6:16 incoming Andrew Morton
2020-08-07 6:17 ` [patch 001/163] mm/memory.c: avoid access flag update TLB flush for retried page fault Andrew Morton
2020-08-07 18:17 ` Linus Torvalds
2020-08-07 20:53 ` Yang Shi
2020-08-08 4:33 ` Linus Torvalds
2020-08-10 17:48 ` Yang Shi
2020-08-10 18:57 ` Linus Torvalds
2020-08-07 6:17 ` [patch 002/163] mm/migrate: fix migrate_pgmap_owner w/o CONFIG_MMU_NOTIFIER Andrew Morton
2020-08-07 6:17 ` [patch 003/163] mm/shuffle: don't move pages between zones and don't read garbage memmaps Andrew Morton
2020-08-07 6:17 ` [patch 004/163] mm: fix kthread_use_mm() vs TLB invalidate Andrew Morton
2020-08-07 6:17 ` [patch 005/163] kthread: remove incorrect comment in kthread_create_on_cpu() Andrew Morton
2020-08-07 6:17 ` [patch 006/163] tools/: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07 6:17 ` [patch 007/163] tools/testing/selftests/cgroup/cgroup_util.c: cg_read_strcmp: fix null pointer dereference Andrew Morton
2020-08-07 6:17 ` [patch 008/163] scripts/tags.sh: collect compiled source precisely Andrew Morton
2020-08-07 6:17 ` [patch 009/163] scripts/bloat-o-meter: Support comparing library archives Andrew Morton
2020-08-07 6:17 ` [patch 010/163] scripts/decode_stacktrace.sh: skip missing symbols Andrew Morton
2020-08-07 6:17 ` [patch 011/163] scripts/decode_stacktrace.sh: guess basepath if not specified Andrew Morton
2020-08-07 6:17 ` [patch 012/163] scripts/decode_stacktrace.sh: guess path to modules Andrew Morton
2020-08-07 6:17 ` [patch 013/163] scripts/decode_stacktrace.sh: guess path to vmlinux by release name Andrew Morton
2020-08-07 6:17 ` [patch 014/163] const_structs.checkpatch: add regulator_ops Andrew Morton
2020-08-07 6:17 ` [patch 015/163] scripts/spelling.txt: add more spellings to spelling.txt Andrew Morton
2020-08-07 6:17 ` [patch 016/163] ntfs: fix ntfs_test_inode and ntfs_init_locked_inode function type Andrew Morton
2020-08-07 6:17 ` [patch 017/163] ocfs2: fix remounting needed after setfacl command Andrew Morton
2020-08-07 6:17 ` [patch 018/163] ocfs2: suballoc.h: delete a duplicated word Andrew Morton
2020-08-07 6:18 ` [patch 019/163] ocfs2: change slot number type s16 to u16 Andrew Morton
2020-08-07 6:18 ` [patch 020/163] ocfs2: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07 6:18 ` [patch 021/163] ocfs2: fix unbalanced locking Andrew Morton
2020-08-07 6:18 ` [patch 022/163] mm, treewide: rename kzfree() to kfree_sensitive() Andrew Morton
2020-08-07 6:18 ` [patch 023/163] mm: ksize() should silently accept a NULL pointer Andrew Morton
2020-08-07 6:18 ` [patch 024/163] mm/slab: expand CONFIG_SLAB_FREELIST_HARDENED to include SLAB Andrew Morton
2020-08-07 6:18 ` [patch 025/163] mm/slab: add naive detection of double free Andrew Morton
2020-08-07 6:18 ` [patch 026/163] mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order Andrew Morton
2020-08-07 6:18 ` [patch 027/163] mm/slab.c: update outdated kmem_list3 in a comment Andrew Morton
2020-08-07 6:18 ` [patch 028/163] mm, slub: extend slub_debug syntax for multiple blocks Andrew Morton
2020-08-07 6:18 ` [patch 029/163] mm, slub: make some slub_debug related attributes read-only Andrew Morton
2020-08-07 6:18 ` [patch 030/163] mm, slub: remove runtime allocation order changes Andrew Morton
2020-08-07 6:18 ` [patch 031/163] mm, slub: make remaining slub_debug related attributes read-only Andrew Morton
2020-08-07 6:18 ` [patch 032/163] mm, slub: make reclaim_account attribute read-only Andrew Morton
2020-08-07 6:18 ` [patch 033/163] mm, slub: introduce static key for slub_debug() Andrew Morton
2020-08-07 6:18 ` [patch 034/163] mm, slub: introduce kmem_cache_debug_flags() Andrew Morton
2020-08-07 6:18 ` [patch 035/163] mm, slub: extend checks guarded by slub_debug static key Andrew Morton
2020-08-07 6:19 ` [patch 036/163] mm, slab/slub: move and improve cache_from_obj() Andrew Morton
2020-08-07 6:19 ` [patch 037/163] mm, slab/slub: improve error reporting and overhead of cache_from_obj() Andrew Morton
2020-08-07 6:19 ` [patch 038/163] mm/slub.c: drop lockdep_assert_held() from put_map() Andrew Morton
2020-08-07 6:19 ` [patch 039/163] mm, kcsan: instrument SLAB/SLUB free with "ASSERT_EXCLUSIVE_ACCESS" Andrew Morton
2020-08-07 6:19 ` [patch 040/163] mm/debug_vm_pgtable: add tests validating arch helpers for core MM features Andrew Morton
2020-08-07 6:19 ` [patch 041/163] mm/debug_vm_pgtable: add tests validating advanced arch page table helpers Andrew Morton
2020-08-07 6:19 ` [patch 042/163] mm/debug_vm_pgtable: add debug prints for individual tests Andrew Morton
2020-08-07 6:19 ` [patch 043/163] Documentation/mm: add descriptions for arch page table helpers Andrew Morton
2020-08-07 6:19 ` [patch 044/163] mm/debug: handle page->mapping better in dump_page Andrew Morton
2020-08-07 6:19 ` [patch 045/163] mm/debug: dump compound page information on a second line Andrew Morton
2020-08-07 6:19 ` [patch 046/163] mm/debug: print head flags in dump_page Andrew Morton
2020-08-07 6:19 ` [patch 047/163] mm/debug: switch dump_page to get_kernel_nofault Andrew Morton
2020-08-07 6:19 ` [patch 048/163] mm/debug: print the inode number in dump_page Andrew Morton
2020-08-07 6:19 ` [patch 049/163] mm/debug: print hashed address of struct page Andrew Morton
2020-08-07 6:19 ` [patch 050/163] mm, dump_page: do not crash with bad compound_mapcount() Andrew Morton
2020-08-07 6:19 ` [patch 051/163] mm: filemap: clear idle flag for writes Andrew Morton
2020-08-07 6:19 ` [patch 052/163] mm: filemap: add missing FGP_ flags in kerneldoc comment for pagecache_get_page Andrew Morton
2020-08-07 6:20 ` [patch 053/163] mm/gup.c: fix the comment of return value for populate_vma_page_range() Andrew Morton
2020-08-07 6:20 ` [patch 054/163] mm/swap_slots.c: simplify alloc_swap_slot_cache() Andrew Morton
2020-08-07 6:20 ` [patch 055/163] mm/swap_slots.c: simplify enable_swap_slots_cache() Andrew Morton
2020-08-07 6:20 ` [patch 056/163] mm/swap_slots.c: remove redundant check for swap_slot_cache_initialized Andrew Morton
2020-08-07 6:20 ` [patch 057/163] mm: swap: fix kerneldoc of swap_vma_readahead() Andrew Morton
2020-08-07 6:20 ` [patch 058/163] mm/page_io.c: use blk_io_schedule() for avoiding task hung in sync io Andrew Morton
2020-08-07 6:20 ` [patch 059/163] tmpfs: per-superblock i_ino support Andrew Morton
2020-08-07 6:20 ` [patch 060/163] tmpfs: support 64-bit inums per-sb Andrew Morton
2020-08-07 6:20 ` [patch 061/163] mm: kmem: make memcg_kmem_enabled() irreversible Andrew Morton
2020-08-07 6:20 ` [patch 062/163] mm: memcg: factor out memcg- and lruvec-level changes out of __mod_lruvec_state() Andrew Morton
2020-08-07 6:20 ` [patch 063/163] mm: memcg: prepare for byte-sized vmstat items Andrew Morton
2020-08-07 6:20 ` [patch 064/163] mm: memcg: convert vmstat slab counters to bytes Andrew Morton
2020-08-07 6:20 ` [patch 065/163] mm: slub: implement SLUB version of obj_to_index() Andrew Morton
2020-08-07 6:20 ` [patch 066/163] mm: memcontrol: decouple reference counting from page accounting Andrew Morton
2020-08-07 6:20 ` [patch 067/163] mm: memcg/slab: obj_cgroup API Andrew Morton
2020-08-07 6:20 ` [patch 068/163] mm: memcg/slab: allocate obj_cgroups for non-root slab pages Andrew Morton
2020-08-07 6:20 ` [patch 069/163] mm: memcg/slab: save obj_cgroup for non-root slab objects Andrew Morton
2020-08-07 6:21 ` [patch 071/163] mm: memcg/slab: deprecate memory.kmem.slabinfo Andrew Morton
2020-08-07 6:21 ` [patch 072/163] mm: memcg/slab: move memcg_kmem_bypass() to memcontrol.h Andrew Morton
2020-08-07 6:21 ` [patch 073/163] mm: memcg/slab: use a single set of kmem_caches for all accounted allocations Andrew Morton
2020-08-07 6:21 ` [patch 074/163] mm: memcg/slab: simplify memcg cache creation Andrew Morton
2020-08-07 6:21 ` [patch 075/163] mm: memcg/slab: remove memcg_kmem_get_cache() Andrew Morton
2020-08-07 6:21 ` [patch 076/163] mm: memcg/slab: deprecate slab_root_caches Andrew Morton
2020-08-07 6:21 ` [patch 077/163] mm: memcg/slab: remove redundant check in memcg_accumulate_slabinfo() Andrew Morton
2020-08-07 6:21 ` [patch 078/163] mm: memcg/slab: use a single set of kmem_caches for all allocations Andrew Morton
2020-08-07 6:21 ` [patch 081/163] mm: memcontrol: account kernel stack per node Andrew Morton
2020-08-07 6:21 ` [patch 082/163] mm: memcg/slab: remove unused argument by charge_slab_page() Andrew Morton
2020-08-07 6:21 ` [patch 083/163] mm: slab: rename (un)charge_slab_page() to (un)account_slab_page() Andrew Morton
2020-08-07 6:21 ` [patch 084/163] mm: kmem: switch to static_branch_likely() in memcg_kmem_enabled() Andrew Morton
2020-08-07 6:21 ` [patch 085/163] mm: memcontrol: avoid workload stalls when lowering memory.high Andrew Morton
2020-08-07 6:21 ` [patch 086/163] mm, memcg: reclaim more aggressively before high allocator throttling Andrew Morton
2020-08-07 6:21 ` [patch 087/163] mm, memcg: unify reclaim retry limits with page allocator Andrew Morton
2020-08-07 6:22 ` [patch 088/163] mm, memcg: avoid stale protection values when cgroup is above protection Andrew Morton
2020-08-07 6:22 ` [patch 089/163] mm, memcg: decouple e{low,min} state mutations from protection checks Andrew Morton
2020-08-07 6:22 ` [patch 090/163] memcg, oom: check memcg margin for parallel oom Andrew Morton
2020-08-07 6:22 ` [patch 091/163] mm: memcontrol: restore proper dirty throttling when memory.high changes Andrew Morton
2020-08-07 6:22 ` [patch 092/163] mm: memcontrol: don't count limit-setting reclaim as memory pressure Andrew Morton
2020-08-07 6:22 ` [patch 093/163] mm/page_counter.c: fix protection usage propagation Andrew Morton
2020-08-07 6:22 ` [patch 094/163] mm: remove redundant check non_swap_entry() Andrew Morton
2020-08-07 6:22 ` [patch 095/163] mm/memory.c: make remap_pfn_range() reject unaligned addr Andrew Morton
2020-08-07 6:22 ` [patch 096/163] mm: remove unneeded includes of <asm/pgalloc.h> Andrew Morton
2020-08-07 6:22 ` [patch 097/163] opeinrisc: switch to generic version of pte allocation Andrew Morton
2020-08-07 6:22 ` [patch 098/163] xtensa: " Andrew Morton
2020-08-07 6:22 ` [patch 099/163] asm-generic: pgalloc: provide generic pmd_alloc_one() and pmd_free_one() Andrew Morton
2020-08-07 6:22 ` [patch 100/163] asm-generic: pgalloc: provide generic pud_alloc_one() and pud_free_one() Andrew Morton
2020-08-07 6:22 ` [patch 101/163] asm-generic: pgalloc: provide generic pgd_free() Andrew Morton
2020-08-07 6:22 ` Andrew Morton [this message]
2020-08-07 6:22 ` [patch 103/163] mm: move p?d_alloc_track to separate header file Andrew Morton
2020-08-07 6:22 ` [patch 104/163] mm/mmap: optimize a branch judgment in ksys_mmap_pgoff() Andrew Morton
2020-08-07 6:23 ` [patch 105/163] proc/meminfo: avoid open coded reading of vm_committed_as Andrew Morton
2020-08-07 6:23 ` [patch 106/163] mm/util.c: make vm_memory_committed() more accurate Andrew Morton
2020-08-07 6:23 ` [patch 107/163] percpu_counter: add percpu_counter_sync() Andrew Morton
2020-08-07 6:23 ` [patch 108/163] mm: adjust vm_committed_as_batch according to vm overcommit policy Andrew Morton
2020-08-07 6:23 ` [patch 109/163] mm/sparsemem: enable vmem_altmap support in vmemmap_populate_basepages() Andrew Morton
2020-08-07 6:23 ` [patch 110/163] mm/sparsemem: enable vmem_altmap support in vmemmap_alloc_block_buf() Andrew Morton
2020-08-07 6:23 ` [patch 111/163] arm64/mm: enable vmem_altmap support for vmemmap mappings Andrew Morton
2020-08-07 6:23 ` [patch 112/163] mm: mmap: merge vma after call_mmap() if possible Andrew Morton
2020-08-07 6:23 ` [patch 113/163] mm: remove unnecessary wrapper function do_mmap_pgoff() Andrew Morton
2020-08-07 6:23 ` [patch 114/163] mm/mremap: it is sure to have enough space when extent meets requirement Andrew Morton
2020-08-07 6:23 ` [patch 115/163] mm/mremap: calculate extent in one place Andrew Morton
2020-08-07 6:23 ` [patch 116/163] mm/mremap: start addresses are properly aligned Andrew Morton
2020-08-07 6:23 ` [patch 117/163] selftests: add mincore() tests Andrew Morton
2020-08-07 6:23 ` [patch 118/163] mm/sparse: never partially remove memmap for early section Andrew Morton
2020-08-07 6:23 ` [patch 119/163] mm/sparse: only sub-section aligned range would be populated Andrew Morton
2020-08-07 6:24 ` [patch 120/163] mm/sparse: cleanup the code surrounding memory_present() Andrew Morton
2020-08-07 6:24 ` [patch 121/163] vmalloc: convert to XArray Andrew Morton
2020-08-07 6:24 ` [patch 122/163] mm/vmalloc: simplify merge_or_add_vmap_area() Andrew Morton
2020-08-07 6:25 ` [patch 153/163] mm/page_alloc: fallbacks at most has 3 elements Andrew Morton
2020-08-07 6:26 ` [patch 154/163] mm/page_alloc.c: skip setting nodemask when we are in interrupt Andrew Morton
2020-08-07 6:26 ` [patch 155/163] mm/page_alloc: fix memalloc_nocma_{save/restore} APIs Andrew Morton
2020-08-07 6:26 ` [patch 156/163] mm: thp: replace HTTP links with HTTPS ones Andrew Morton
2020-08-07 6:26 ` [patch 157/163] mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible Andrew Morton
2020-08-07 6:26 ` [patch 158/163] khugepaged: collapse_pte_mapped_thp() flush the right range Andrew Morton
2020-08-07 6:26 ` [patch 159/163] khugepaged: collapse_pte_mapped_thp() protect the pmd lock Andrew Morton
2020-08-07 6:26 ` [patch 160/163] khugepaged: retract_page_tables() remember to test exit Andrew Morton
2020-08-07 6:26 ` [patch 161/163] khugepaged: khugepaged_test_exit() check mmget_still_valid() Andrew Morton
2020-08-07 6:26 ` [patch 162/163] mm/vmscan.c: fix typo Andrew Morton
2020-08-07 6:26 ` [patch 163/163] mm: vmscan: consistent update to pgrefill Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200807062251.amozo-C91%akpm@linux-foundation.org \
--to=akpm@linux-foundation.org \
--cc=abdhalee@linux.vnet.ibm.com \
--cc=arnd@arndb.de \
--cc=christophe.leroy@csgroup.eu \
--cc=geert@linux-m68k.org \
--cc=jcmvbkbc@gmail.com \
--cc=joro@8bytes.org \
--cc=jroedel@suse.de \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mm-commits@vger.kernel.org \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=rostedt@goodmis.org \
--cc=rppt@linux.ibm.com \
--cc=sathnaga@linux.vnet.ibm.com \
--cc=sfr@canb.auug.org.au \
--cc=shorne@gmail.com \
--cc=torvalds@linux-foundation.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).