From: Anthony Yznaga <anthony.yznaga@oracle.com> To: linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: willy@infradead.org, corbet@lwn.net, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, rppt@kernel.org, akpm@linux-foundation.org, hughd@google.com, ebiederm@xmission.com, keescook@chromium.org, ardb@kernel.org, nivedita@alum.mit.edu, jroedel@suse.de, masahiroy@kernel.org, nathan@kernel.org, terrelln@fb.com, vincenzo.frascino@arm.com, martin.b.radev@gmail.com, andreyknvl@google.com, daniel.kiper@oracle.com, rafael.j.wysocki@intel.com, dan.j.williams@intel.com, Jonathan.Cameron@huawei.com, bhe@redhat.com, rminnich@gmail.com, ashish.kalra@amd.com, guro@fb.com, hannes@cmpxchg.org, mhocko@kernel.org, iamjoonsoo.kim@lge.com, vbabka@suse.cz, alex.shi@linux.alibaba.com, david@redhat.com, richard.weiyang@gmail.com, vdavydov.dev@gmail.com, graf@amazon.com, jason.zeng@intel.com, lei.l.li@intel.com, daniel.m.jordan@oracle.com, steven.sistare@oracle.com, linux-fsdevel@vger.kernel.org, linux-doc@vger.kernel.org, kexec@lists.infradead.org Subject: [RFC v2 31/43] memblock, mm: defer initialization of preserved pages Date: Tue, 30 Mar 2021 14:36:06 -0700 [thread overview] Message-ID: <1617140178-8773-32-git-send-email-anthony.yznaga@oracle.com> (raw) In-Reply-To: <1617140178-8773-1-git-send-email-anthony.yznaga@oracle.com> Preserved pages are represented in the memblock reserved list, but page structs for pages in the reserved list are initialized early while boot is single threaded which means that a large number of preserved pages can impact boot time. To mitigate, defer initialization of preserved pages by skipping them when other reserved pages are initialized and initializing them later with a separate kernel thread. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- arch/x86/mm/init_64.c | 1 - include/linux/mm.h | 2 +- mm/memblock.c | 11 +++++++++-- mm/page_alloc.c | 55 +++++++++++++++++++++++++++++++++++++++++++-------- 4 files changed, 57 insertions(+), 12 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 69bd71996b8b..8efb2fb2a88b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1294,7 +1294,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - pkram_cleanup(); totalram_pages_add(pkram_reserved_pages); /* * Must be done after boot memory is put on freelist, because here we diff --git a/include/linux/mm.h b/include/linux/mm.h index 64a71bf20536..2a93b2a6ec8d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2337,7 +2337,7 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) diff --git a/mm/memblock.c b/mm/memblock.c index afaefa8fc6ab..461ea0f85495 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2007,11 +2007,18 @@ static unsigned long __init free_low_memory_core_early(void) unsigned long count = 0; phys_addr_t start, end; u64 i; + struct memblock_region *r; memblock_clear_hotplug(0, -1); - for_each_reserved_mem_range(i, &start, &end) - reserve_bootmem_region(start, end); + for_each_reserved_mem_region(r) { + if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT) && memblock_is_preserved(r)) + continue; + + start = r->base; + end = r->base + r->size; + reserve_bootmem_region(start, end, NUMA_NO_NODE); + } /* * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cfc72873961d..999fcc8fe907 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -72,6 +72,7 @@ #include <linux/padata.h> #include <linux/khugepaged.h> #include <linux/buffer_head.h> +#include <linux/pkram.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -1475,15 +1476,18 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -static void __meminit init_reserved_page(unsigned long pfn) +static void __meminit init_reserved_page(unsigned long pfn, int nid) { pg_data_t *pgdat; - int nid, zid; + int zid; - if (!early_page_uninitialised(pfn)) - return; + if (nid == NUMA_NO_NODE) { + if (!early_page_uninitialised(pfn)) + return; + + nid = early_pfn_to_nid(pfn); + } - nid = early_pfn_to_nid(pfn); pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { @@ -1495,7 +1499,7 @@ static void __meminit init_reserved_page(unsigned long pfn) __init_single_page(pfn_to_page(pfn), pfn, zid, nid); } #else -static inline void init_reserved_page(unsigned long pfn) +static inline void init_reserved_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -1506,7 +1510,7 @@ static inline void init_reserved_page(unsigned long pfn) * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ -void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) +void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); @@ -1515,7 +1519,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn); + init_reserved_page(start_pfn, nid); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); @@ -2008,6 +2012,35 @@ static int __init deferred_init_memmap(void *data) return 0; } +#ifdef CONFIG_PKRAM +static int __init deferred_init_preserved(void *dummy) +{ + unsigned long start = jiffies; + unsigned long nr_pages = 0; + struct memblock_region *r; + phys_addr_t spa, epa; + int nid; + + for_each_reserved_mem_region(r) { + if (!memblock_is_preserved(r)) + continue; + + spa = r->base; + epa = r->base + r->size; + nid = memblock_get_region_node(r); + + reserve_bootmem_region(spa, epa, nid); + nr_pages += ((epa - spa) >> PAGE_SHIFT); + } + + pr_info("initialised %lu preserved pages in %ums\n", nr_pages, + jiffies_to_msecs(jiffies - start)); + + pgdat_init_report_one_done(); + return 0; +} +#endif /* CONFIG_PKRAM */ + /* * If this zone has deferred pages, try to grow it by initializing enough * deferred pages to satisfy the allocation specified by order, rounded up to @@ -2107,6 +2140,10 @@ void __init page_alloc_init_late(void) /* There will be num_node_state(N_MEMORY) threads */ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); +#ifdef CONFIG_PKRAM + atomic_inc(&pgdat_init_n_undone); + kthread_run(deferred_init_preserved, NULL, "pgdatainit_preserved"); +#endif for_each_node_state(nid, N_MEMORY) { kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } @@ -2114,6 +2151,8 @@ void __init page_alloc_init_late(void) /* Block until all are initialised */ wait_for_completion(&pgdat_init_all_done_comp); + pkram_cleanup(); + /* * The number of managed pages has changed due to the initialisation * so the pcpu batch and high limits needs to be updated or the limits -- 1.8.3.1
WARNING: multiple messages have this Message-ID (diff)
From: Anthony Yznaga <anthony.yznaga@oracle.com> To: linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: willy@infradead.org, corbet@lwn.net, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, rppt@kernel.org, akpm@linux-foundation.org, hughd@google.com, ebiederm@xmission.com, keescook@chromium.org, ardb@kernel.org, nivedita@alum.mit.edu, jroedel@suse.de, masahiroy@kernel.org, nathan@kernel.org, terrelln@fb.com, vincenzo.frascino@arm.com, martin.b.radev@gmail.com, andreyknvl@google.com, daniel.kiper@oracle.com, rafael.j.wysocki@intel.com, dan.j.williams@intel.com, Jonathan.Cameron@huawei.com, bhe@redhat.com, rminnich@gmail.com, ashish.kalra@amd.com, guro@fb.com, hannes@cmpxchg.org, mhocko@kernel.org, iamjoonsoo.kim@lge.com, vbabka@suse.cz, alex.shi@linux.alibaba.com, david@redhat.com, richard.weiyang@gmail.com, vdavydov.dev@gmail.com, graf@amazon.com, jason.zeng@intel.com, lei.l.li@intel.com, daniel.m.jordan@oracle.com, steven.sistare@oracle.com, linux-fsdevel@vger.kernel.org, linux-doc@vger.kernel.org, kexec@lists.infradead.org Subject: [RFC v2 31/43] memblock, mm: defer initialization of preserved pages Date: Tue, 30 Mar 2021 14:36:06 -0700 [thread overview] Message-ID: <1617140178-8773-32-git-send-email-anthony.yznaga@oracle.com> (raw) In-Reply-To: <1617140178-8773-1-git-send-email-anthony.yznaga@oracle.com> Preserved pages are represented in the memblock reserved list, but page structs for pages in the reserved list are initialized early while boot is single threaded which means that a large number of preserved pages can impact boot time. To mitigate, defer initialization of preserved pages by skipping them when other reserved pages are initialized and initializing them later with a separate kernel thread. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- arch/x86/mm/init_64.c | 1 - include/linux/mm.h | 2 +- mm/memblock.c | 11 +++++++++-- mm/page_alloc.c | 55 +++++++++++++++++++++++++++++++++++++++++++-------- 4 files changed, 57 insertions(+), 12 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 69bd71996b8b..8efb2fb2a88b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1294,7 +1294,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - pkram_cleanup(); totalram_pages_add(pkram_reserved_pages); /* * Must be done after boot memory is put on freelist, because here we diff --git a/include/linux/mm.h b/include/linux/mm.h index 64a71bf20536..2a93b2a6ec8d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2337,7 +2337,7 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); -extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); +extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) diff --git a/mm/memblock.c b/mm/memblock.c index afaefa8fc6ab..461ea0f85495 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2007,11 +2007,18 @@ static unsigned long __init free_low_memory_core_early(void) unsigned long count = 0; phys_addr_t start, end; u64 i; + struct memblock_region *r; memblock_clear_hotplug(0, -1); - for_each_reserved_mem_range(i, &start, &end) - reserve_bootmem_region(start, end); + for_each_reserved_mem_region(r) { + if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT) && memblock_is_preserved(r)) + continue; + + start = r->base; + end = r->base + r->size; + reserve_bootmem_region(start, end, NUMA_NO_NODE); + } /* * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cfc72873961d..999fcc8fe907 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -72,6 +72,7 @@ #include <linux/padata.h> #include <linux/khugepaged.h> #include <linux/buffer_head.h> +#include <linux/pkram.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -1475,15 +1476,18 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn, } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -static void __meminit init_reserved_page(unsigned long pfn) +static void __meminit init_reserved_page(unsigned long pfn, int nid) { pg_data_t *pgdat; - int nid, zid; + int zid; - if (!early_page_uninitialised(pfn)) - return; + if (nid == NUMA_NO_NODE) { + if (!early_page_uninitialised(pfn)) + return; + + nid = early_pfn_to_nid(pfn); + } - nid = early_pfn_to_nid(pfn); pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { @@ -1495,7 +1499,7 @@ static void __meminit init_reserved_page(unsigned long pfn) __init_single_page(pfn_to_page(pfn), pfn, zid, nid); } #else -static inline void init_reserved_page(unsigned long pfn) +static inline void init_reserved_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -1506,7 +1510,7 @@ static inline void init_reserved_page(unsigned long pfn) * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ -void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) +void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); @@ -1515,7 +1519,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn); + init_reserved_page(start_pfn, nid); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); @@ -2008,6 +2012,35 @@ static int __init deferred_init_memmap(void *data) return 0; } +#ifdef CONFIG_PKRAM +static int __init deferred_init_preserved(void *dummy) +{ + unsigned long start = jiffies; + unsigned long nr_pages = 0; + struct memblock_region *r; + phys_addr_t spa, epa; + int nid; + + for_each_reserved_mem_region(r) { + if (!memblock_is_preserved(r)) + continue; + + spa = r->base; + epa = r->base + r->size; + nid = memblock_get_region_node(r); + + reserve_bootmem_region(spa, epa, nid); + nr_pages += ((epa - spa) >> PAGE_SHIFT); + } + + pr_info("initialised %lu preserved pages in %ums\n", nr_pages, + jiffies_to_msecs(jiffies - start)); + + pgdat_init_report_one_done(); + return 0; +} +#endif /* CONFIG_PKRAM */ + /* * If this zone has deferred pages, try to grow it by initializing enough * deferred pages to satisfy the allocation specified by order, rounded up to @@ -2107,6 +2140,10 @@ void __init page_alloc_init_late(void) /* There will be num_node_state(N_MEMORY) threads */ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); +#ifdef CONFIG_PKRAM + atomic_inc(&pgdat_init_n_undone); + kthread_run(deferred_init_preserved, NULL, "pgdatainit_preserved"); +#endif for_each_node_state(nid, N_MEMORY) { kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } @@ -2114,6 +2151,8 @@ void __init page_alloc_init_late(void) /* Block until all are initialised */ wait_for_completion(&pgdat_init_all_done_comp); + pkram_cleanup(); + /* * The number of managed pages has changed due to the initialisation * so the pcpu batch and high limits needs to be updated or the limits -- 1.8.3.1 _______________________________________________ kexec mailing list kexec@lists.infradead.org http://lists.infradead.org/mailman/listinfo/kexec
next prev parent reply other threads:[~2021-03-30 21:30 UTC|newest] Thread overview: 94+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-03-30 21:35 [RFC v2 00/43] PKRAM: Preserved-over-Kexec RAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 01/43] mm: add PKRAM API stubs and Kconfig Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-31 18:43 ` Randy Dunlap 2021-03-31 18:43 ` Randy Dunlap 2021-03-31 20:28 ` Anthony Yznaga 2021-03-31 20:28 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 02/43] mm: PKRAM: implement node load and save functions Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 03/43] mm: PKRAM: implement object " Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 04/43] mm: PKRAM: implement page stream operations Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 05/43] mm: PKRAM: support preserving transparent hugepages Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 06/43] mm: PKRAM: implement byte stream operations Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 07/43] mm: PKRAM: link nodes by pfn before reboot Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 08/43] mm: PKRAM: introduce super block Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 09/43] PKRAM: track preserved pages in a physical mapping pagetable Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 10/43] PKRAM: pass a list of preserved ranges to the next kernel Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 11/43] PKRAM: prepare for adding preserved ranges to memblock reserved Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 12/43] mm: PKRAM: reserve preserved memory at boot Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 13/43] PKRAM: free the preserved ranges list Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 14/43] PKRAM: prevent inadvertent use of a stale superblock Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 15/43] PKRAM: provide a way to ban pages from use by PKRAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 16/43] kexec: PKRAM: prevent kexec clobbering preserved pages in some cases Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 17/43] PKRAM: provide a way to check if a memory range has preserved pages Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 18/43] kexec: PKRAM: avoid clobbering already " Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 19/43] mm: PKRAM: allow preserved memory to be freed from userspace Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 20/43] PKRAM: disable feature when running the kdump kernel Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 21/43] x86/KASLR: PKRAM: support physical kaslr Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 22/43] x86/boot/compressed/64: use 1GB pages for mappings Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 23/43] mm: shmem: introduce shmem_insert_page Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:35 ` [RFC v2 24/43] mm: shmem: enable saving to PKRAM Anthony Yznaga 2021-03-30 21:35 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 25/43] mm: shmem: prevent swapping of PKRAM-enabled tmpfs pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 26/43] mm: shmem: specify the mm to use when inserting pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 27/43] mm: shmem: when inserting, handle pages already charged to a memcg Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 28/43] x86/mm/numa: add numa_isolate_memblocks() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 29/43] PKRAM: ensure memblocks with preserved pages init'd for numa Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 30/43] memblock: PKRAM: mark memblocks that contain preserved pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga [this message] 2021-03-30 21:36 ` [RFC v2 31/43] memblock, mm: defer initialization of " Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 32/43] shmem: preserve shmem files a chunk at a time Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 33/43] PKRAM: atomically add and remove link pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 34/43] shmem: PKRAM: multithread preserving and restoring shmem pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 35/43] shmem: introduce shmem_insert_pages() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 36/43] PKRAM: add support for loading pages in bulk Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 37/43] shmem: PKRAM: enable bulk loading of preserved pages into shmem Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 38/43] mm: implement splicing a list of pages to the LRU Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 39/43] shmem: optimize adding pages to the LRU in shmem_insert_pages() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 40/43] shmem: initial support for adding multiple pages to pagecache Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 41/43] XArray: add xas_export_node() and xas_import_node() Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 42/43] shmem: reduce time holding xa_lock when inserting pages Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-03-30 21:36 ` [RFC v2 43/43] PKRAM: improve index alignment of pkram_link entries Anthony Yznaga 2021-03-30 21:36 ` Anthony Yznaga 2021-06-05 13:39 ` [RFC v2 00/43] PKRAM: Preserved-over-Kexec RAM Pavel Tatashin 2021-06-05 13:39 ` Pavel Tatashin
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1617140178-8773-32-git-send-email-anthony.yznaga@oracle.com \ --to=anthony.yznaga@oracle.com \ --cc=Jonathan.Cameron@huawei.com \ --cc=akpm@linux-foundation.org \ --cc=alex.shi@linux.alibaba.com \ --cc=andreyknvl@google.com \ --cc=ardb@kernel.org \ --cc=ashish.kalra@amd.com \ --cc=bhe@redhat.com \ --cc=bp@alien8.de \ --cc=corbet@lwn.net \ --cc=dan.j.williams@intel.com \ --cc=daniel.kiper@oracle.com \ --cc=daniel.m.jordan@oracle.com \ --cc=dave.hansen@linux.intel.com \ --cc=david@redhat.com \ --cc=ebiederm@xmission.com \ --cc=graf@amazon.com \ --cc=guro@fb.com \ --cc=hannes@cmpxchg.org \ --cc=hpa@zytor.com \ --cc=hughd@google.com \ --cc=iamjoonsoo.kim@lge.com \ --cc=jason.zeng@intel.com \ --cc=jroedel@suse.de \ --cc=keescook@chromium.org \ --cc=kexec@lists.infradead.org \ --cc=lei.l.li@intel.com \ --cc=linux-doc@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=luto@kernel.org \ --cc=martin.b.radev@gmail.com \ --cc=masahiroy@kernel.org \ --cc=mhocko@kernel.org \ --cc=mingo@redhat.com \ --cc=nathan@kernel.org \ --cc=nivedita@alum.mit.edu \ --cc=peterz@infradead.org \ --cc=rafael.j.wysocki@intel.com \ --cc=richard.weiyang@gmail.com \ --cc=rminnich@gmail.com \ --cc=rppt@kernel.org \ --cc=steven.sistare@oracle.com \ --cc=terrelln@fb.com \ --cc=tglx@linutronix.de \ --cc=vbabka@suse.cz \ --cc=vdavydov.dev@gmail.com \ --cc=vincenzo.frascino@arm.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.