* [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist
@ 2019-05-15 13:19 Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 2/5] mm: large system hash avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Nicholas Piggin @ 2019-05-15 13:19 UTC (permalink / raw)
To: linuxppc-dev, linux-mm; +Cc: Nicholas Piggin, Linus Torvalds
The kernel currently clamps large system hashes to MAX_ORDER when
hashdist is not set, which is rather arbitrary.
vmalloc space is limited on 32-bit machines, but this shouldn't
result in much more used because of small physical memory.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
mm/page_alloc.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59661106da16..1683d54d6405 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7978,7 +7978,7 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (hashdist) {
+ } else if (get_order(size) >= MAX_ORDER || hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
} else {
/*
@@ -7986,10 +7986,8 @@ void *__init alloc_large_system_hash(const char *tablename,
* some pages at the end of hash table which
* alloc_pages_exact() automatically does
*/
- if (get_order(size) < MAX_ORDER) {
- table = alloc_pages_exact(size, gfp_flags);
- kmemleak_alloc(table, size, 1, gfp_flags);
- }
+ table = alloc_pages_exact(size, gfp_flags);
+ kmemleak_alloc(table, size, 1, gfp_flags);
}
} while (!table && size > PAGE_SIZE && --log2qty);
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH 2/5] mm: large system hash avoid vmap for non-NUMA machines when hashdist
2019-05-15 13:19 [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
@ 2019-05-15 13:19 ` Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 3/5] mm/vmalloc: Hugepage vmalloc mappings Nicholas Piggin
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2019-05-15 13:19 UTC (permalink / raw)
To: linuxppc-dev, linux-mm; +Cc: Nicholas Piggin, Linus Torvalds
hashdist currently always uses vmalloc when hashdist is true. When
there is only 1 online node and size <= MAX_ORDER, vmalloc can be
avoided.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
mm/page_alloc.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1683d54d6405..1312d4db5602 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7978,7 +7978,8 @@ void *__init alloc_large_system_hash(const char *tablename,
else
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
- } else if (get_order(size) >= MAX_ORDER || hashdist) {
+ } else if (get_order(size) >= MAX_ORDER ||
+ (hashdist && num_online_nodes() > 1)) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
} else {
/*
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH 3/5] mm/vmalloc: Hugepage vmalloc mappings
2019-05-15 13:19 [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 2/5] mm: large system hash avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
@ 2019-05-15 13:19 ` Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 4/5] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 5/5] powerpc/64s/radix: iomap use huge page mappings Nicholas Piggin
3 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2019-05-15 13:19 UTC (permalink / raw)
To: linuxppc-dev, linux-mm; +Cc: Nicholas Piggin, Linus Torvalds
This appears to help cached git diff performance by about 5% on a
POWER9 (with 32MB dentry cache hash).
Profiling git diff dTLB misses with a vanilla kernel:
81.75% git [kernel.vmlinux] [k] __d_lookup_rcu
7.21% git [kernel.vmlinux] [k] strncpy_from_user
1.77% git [kernel.vmlinux] [k] find_get_entry
1.59% git [kernel.vmlinux] [k] kmem_cache_free
40,168 dTLB-miss
0.100342754 seconds time elapsed
After this patch (and the subsequent powerpc HUGE_VMAP patches), the
dentry cache hash gets mapped with 2MB pages:
2,987 dTLB-miss
0.095933138 seconds time elapsed
elapsed time improvement isn't too scientific but seems consistent,
TLB misses certainly improves an order of magnitude. My laptop
takes a lot of misses here too, so x86 would be interesting to test,
I think it should just work there.
---
include/linux/vmalloc.h | 1 +
mm/vmalloc.c | 87 +++++++++++++++++++++++++++--------------
2 files changed, 59 insertions(+), 29 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c6eebb839552..029635560306 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -42,6 +42,7 @@ struct vm_struct {
unsigned long size;
unsigned long flags;
struct page **pages;
+ unsigned int page_shift;
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e5e9e1fcac01..c9ba88768bca 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -216,32 +216,34 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
{
- pgd_t *pgd;
- unsigned long next;
unsigned long addr = start;
- int err = 0;
- int nr = 0;
+ unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift);
- BUG_ON(addr >= end);
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
+ for (i = 0; i < nr; i++) {
+ int err;
+
+ err = ioremap_page_range(addr,
+ addr + (PAGE_SIZE << page_shift),
+ __pa(page_address(pages[i])), prot);
if (err)
return err;
- } while (pgd++, addr = next, addr != end);
+
+ addr += PAGE_SIZE << page_shift;
+ }
return nr;
}
static int vmap_page_range(unsigned long start, unsigned long end,
- pgprot_t prot, struct page **pages)
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
{
int ret;
- ret = vmap_page_range_noflush(start, end, prot, pages);
+ ret = vmap_page_range_noflush(start, end, prot, pages, page_shift);
flush_cache_vmap(start, end);
return ret;
}
@@ -1189,7 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
addr = va->va_start;
mem = (void *)addr;
}
- if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ if (vmap_page_range(addr, addr + size, prot, pages, 0) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
@@ -1305,7 +1307,7 @@ void __init vmalloc_init(void)
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
- return vmap_page_range_noflush(addr, addr + size, prot, pages);
+ return vmap_page_range_noflush(addr, addr + size, prot, pages, 0);
}
/**
@@ -1352,7 +1354,7 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
unsigned long end = addr + get_vm_area_size(area);
int err;
- err = vmap_page_range(addr, end, prot, pages);
+ err = vmap_page_range(addr, end, prot, pages, 0);
return err > 0 ? 0 : err;
}
@@ -1395,8 +1397,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
if (flags & VM_IOREMAP)
- align = 1ul << clamp_t(int, get_count_order_long(size),
- PAGE_SHIFT, IOREMAP_MAX_ORDER);
+ align = max(align,
+ 1ul << clamp_t(int, get_count_order_long(size),
+ PAGE_SHIFT, IOREMAP_MAX_ORDER));
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
@@ -1608,7 +1611,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
struct page *page = area->pages[i];
BUG_ON(!page);
- __free_pages(page, 0);
+ __free_pages(page, area->page_shift);
}
kvfree(area->pages);
@@ -1751,14 +1754,17 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
struct page **pages;
+ unsigned long addr = (unsigned long)area->addr;
+ unsigned long size = get_vm_area_size(area);
+ unsigned int page_shift = area->page_shift;
+ unsigned int shift = page_shift + PAGE_SHIFT;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
- 0 :
- __GFP_HIGHMEM;
+ 0 : __GFP_HIGHMEM;
- nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+ nr_pages = size >> shift;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
@@ -1779,10 +1785,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- if (node == NUMA_NO_NODE)
- page = alloc_page(alloc_mask|highmem_mask);
- else
- page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
+ page = alloc_pages_node(node,
+ alloc_mask|highmem_mask, page_shift);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@ -1794,8 +1798,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched();
}
- if (map_vm_area(area, prot, pages))
+ if (vmap_page_range(addr, addr + size, prot, pages, page_shift) < 0)
goto fail;
+
return area->addr;
fail:
@@ -1832,19 +1837,35 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
struct vm_struct *area;
void *addr;
unsigned long real_size = size;
+ unsigned long real_align = align;
+ unsigned long size_per_node;
+ unsigned int shift;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
goto fail;
+ size_per_node = size;
+ if (node == NUMA_NO_NODE)
+ size_per_node /= num_online_nodes();
+ if (size_per_node >= PMD_SIZE)
+ shift = PMD_SHIFT;
+ else
+ shift = PAGE_SHIFT;
+again:
+ align = max(real_align, 1UL << shift);
+ size = ALIGN(real_size, align);
+
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;
+ area->page_shift = shift - PAGE_SHIFT;
+
addr = __vmalloc_area_node(area, gfp_mask, prot, node);
if (!addr)
- return NULL;
+ goto fail;
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -1858,8 +1879,16 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr;
fail:
- warn_alloc(gfp_mask, NULL,
+ if (shift == PMD_SHIFT) {
+ shift = PAGE_SHIFT;
+ goto again;
+ }
+
+ if (!area) {
+ /* Warn for area allocation, page allocations already warn */
+ warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure: %lu bytes", real_size);
+ }
return NULL;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH 4/5] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP
2019-05-15 13:19 [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 2/5] mm: large system hash avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 3/5] mm/vmalloc: Hugepage vmalloc mappings Nicholas Piggin
@ 2019-05-15 13:19 ` Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 5/5] powerpc/64s/radix: iomap use huge page mappings Nicholas Piggin
3 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2019-05-15 13:19 UTC (permalink / raw)
To: linuxppc-dev, linux-mm; +Cc: Nicholas Piggin, Linus Torvalds
This does not actually enable huge vmap mappings, because powerpc/64
ioremap does not call ioremap_page_range, but it is required before
implementing huge mappings in ioremap, because the generic vunmap code
needs to cope with them.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/Kconfig | 1 +
arch/powerpc/mm/book3s64/radix_pgtable.c | 93 ++++++++++++++++++++++++
2 files changed, 94 insertions(+)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d7996cfaceca..ffac84600e0e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -166,6 +166,7 @@ config PPC
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if PPC32
select HAVE_ARCH_KGDB
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index c9bcf428dd2b..3bc9ade56277 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1122,3 +1122,96 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte);
}
+
+int __init arch_ioremap_pud_supported(void)
+{
+ return radix_enabled();
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+ return radix_enabled();
+}
+
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pud;
+ pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd;
+ int i;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte_t *pte;
+ pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+ pte_free_kernel(&init_mm, pte);
+ }
+ }
+
+ pmd_free(&init_mm, pmd);
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pmd;
+ pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+
+ pte_free_kernel(&init_mm, pte);
+
+ return 1;
+}
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [RFC PATCH 5/5] powerpc/64s/radix: iomap use huge page mappings
2019-05-15 13:19 [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
` (2 preceding siblings ...)
2019-05-15 13:19 ` [RFC PATCH 4/5] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP Nicholas Piggin
@ 2019-05-15 13:19 ` Nicholas Piggin
3 siblings, 0 replies; 5+ messages in thread
From: Nicholas Piggin @ 2019-05-15 13:19 UTC (permalink / raw)
To: linuxppc-dev, linux-mm; +Cc: Nicholas Piggin, Linus Torvalds
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 8 +++
arch/powerpc/mm/pgtable_64.c | 54 +++++++++++++++++---
2 files changed, 56 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 7dede2e34b70..93b8a99df88e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -274,6 +274,14 @@ extern unsigned long __vmalloc_end;
#define VMALLOC_START __vmalloc_start
#define VMALLOC_END __vmalloc_end
+static inline unsigned int ioremap_max_order(void)
+{
+ if (radix_enabled())
+ return PUD_SHIFT;
+ return 7 + PAGE_SHIFT; /* default from linux/vmalloc.h */
+}
+#define IOREMAP_MAX_ORDER ({ ioremap_max_order();})
+
extern unsigned long __kernel_virt_start;
extern unsigned long __kernel_virt_size;
extern unsigned long __kernel_io_start;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index d2d976ff8a0e..f660116251e6 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -112,7 +112,7 @@ unsigned long ioremap_bot = IOREMAP_BASE;
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
-void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+static void __iomem * hash__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
unsigned long i;
@@ -120,6 +120,50 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
+ for (i = 0; i < size; i += PAGE_SIZE)
+ if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
+ return NULL;
+
+ return (void __iomem *)ea;
+}
+
+static int radix__ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ while (addr != end) {
+ if (!(addr & ~PUD_MASK) && !(phys_addr & ~PUD_MASK) &&
+ end - addr >= PUD_SIZE) {
+ if (radix__map_kernel_page(addr, phys_addr, prot, PUD_SIZE))
+ return -ENOMEM;
+ addr += PUD_SIZE;
+ phys_addr += PUD_SIZE;
+
+ } else if (!(addr & ~PMD_MASK) && !(phys_addr & ~PMD_MASK) &&
+ end - addr >= PMD_SIZE) {
+ if (radix__map_kernel_page(addr, phys_addr, prot, PMD_SIZE))
+ return -ENOMEM;
+ addr += PMD_SIZE;
+ phys_addr += PMD_SIZE;
+
+ } else {
+ if (radix__map_kernel_page(addr, phys_addr, prot, PAGE_SIZE))
+ return -ENOMEM;
+ addr += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+static void __iomem * radix__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+{
+ if (radix__ioremap_page_range((unsigned long)ea, (unsigned long)ea + size, pa, prot))
+ return NULL;
+ return ea;
+}
+
+void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
+{
if ((ea + size) >= (void *)IOREMAP_END) {
pr_warn("Outside the supported range\n");
return NULL;
@@ -129,11 +173,9 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
- for (i = 0; i < size; i += PAGE_SIZE)
- if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
- return NULL;
-
- return (void __iomem *)ea;
+ if (radix_enabled())
+ return radix__ioremap_at(pa, ea, size, prot);
+ return hash__ioremap_at(pa, ea, size, prot);
}
/**
--
2.20.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2019-05-15 13:21 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-15 13:19 [RFC PATCH 1/5] mm: large system hash use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 2/5] mm: large system hash avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 3/5] mm/vmalloc: Hugepage vmalloc mappings Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 4/5] powerpc/64s/radix: Enable HAVE_ARCH_HUGE_VMAP Nicholas Piggin
2019-05-15 13:19 ` [RFC PATCH 5/5] powerpc/64s/radix: iomap use huge page mappings Nicholas Piggin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).