* [PATCH 3/4] mm: Move ioremap page table mapping function to mm/
2019-05-28 12:04 [PATCH 1/4] mm/large system hash: use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
2019-05-28 12:04 ` [PATCH 2/4] mm/large system hash: avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
@ 2019-05-28 12:04 ` Nicholas Piggin
2019-05-28 12:04 ` [PATCH 4/4] mm/vmalloc: Hugepage vmalloc mappings Nicholas Piggin
2019-05-31 18:30 ` [PATCH 1/4] mm/large system hash: use vmalloc for size > MAX_ORDER when !hashdist Linus Torvalds
3 siblings, 0 replies; 6+ messages in thread
From: Nicholas Piggin @ 2019-05-28 12:04 UTC (permalink / raw)
To: linux-mm
Cc: Nicholas Piggin, linux-arch, Toshi Kani, Linus Torvalds,
Ard Biesheuvel, Andrew Morton, Uladzislau Rezki
ioremap_page_range is a useful generic function to create a kernel
virtual mapping, move it to mm/vmalloc.c and rename it vmap_range to
be used by a subsequent change.
For clarity with this move, also:
- Rename vunmap_page_range (vmap_range's inverse) to vunmap_range.
- Rename vmap_page_range (which takes a page array) to vmap_pages.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
include/linux/vmalloc.h | 3 +
lib/ioremap.c | 173 +++---------------------------
mm/vmalloc.c | 227 ++++++++++++++++++++++++++++++++++++----
3 files changed, 228 insertions(+), 175 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 51e131245379..812bea5866d6 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -147,6 +147,9 @@ extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page **pages);
#ifdef CONFIG_MMU
+extern int vmap_range(unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift);
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 063213685563..e13946da8ec3 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -58,165 +58,24 @@ static inline int ioremap_pud_enabled(void) { return 0; }
static inline int ioremap_pmd_enabled(void) { return 0; }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pte_t *pte;
- u64 pfn;
-
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel(pmd, addr);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pmd_enabled())
- return 0;
-
- if ((end - addr) != PMD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PMD_SIZE))
- return 0;
-
- if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
- return 0;
-
- return pmd_set_huge(pmd, phys_addr, prot);
-}
-
-static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pmd_t *pmd;
- unsigned long next;
-
- pmd = pmd_alloc(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
-
- if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_pud_enabled())
- return 0;
-
- if ((end - addr) != PUD_SIZE)
- return 0;
-
- if (!IS_ALIGNED(phys_addr, PUD_SIZE))
- return 0;
-
- if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
- return 0;
-
- return pud_set_huge(pud, phys_addr, prot);
-}
-
-static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- pud_t *pud;
- unsigned long next;
-
- pud = pud_alloc(&init_mm, p4d, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
-
- if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
-static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr,
- pgprot_t prot)
-{
- if (!ioremap_p4d_enabled())
- return 0;
-
- if ((end - addr) != P4D_SIZE)
- return 0;
-
- if (!IS_ALIGNED(phys_addr, P4D_SIZE))
- return 0;
-
- if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
- return 0;
-
- return p4d_set_huge(p4d, phys_addr, prot);
-}
-
-static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
-{
- p4d_t *p4d;
- unsigned long next;
-
- p4d = p4d_alloc(&init_mm, pgd, addr);
- if (!p4d)
- return -ENOMEM;
- do {
- next = p4d_addr_end(addr, end);
-
- if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
- continue;
-
- if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
- return -ENOMEM;
- } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
- return 0;
-}
-
int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
- pgd_t *pgd;
- unsigned long start;
- unsigned long next;
- int err;
-
- might_sleep();
- BUG_ON(addr >= end);
-
- start = addr;
- pgd = pgd_offset_k(addr);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
- if (err)
- break;
- } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
-
- flush_cache_vmap(start, end);
+ unsigned int max_page_shift = PAGE_SHIFT;
+
+ /*
+ * Due to the max_page_shift parameter to vmap_range, platforms must
+ * enable all smaller sizes to take advantage of a given size,
+ * otherwise fall back to small pages.
+ */
+ if (ioremap_pmd_enabled()) {
+ max_page_shift = PMD_SHIFT;
+ if (ioremap_pud_enabled()) {
+ max_page_shift = PUD_SHIFT;
+ if (ioremap_p4d_enabled())
+ max_page_shift = P4D_SHIFT;
+ }
+ }
- return err;
+ return vmap_range(addr, end, phys_addr, prot, max_page_shift);
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 233af6936c93..6a0c97f89091 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -119,7 +119,7 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (p4d++, addr = next, addr != end);
}
-static void vunmap_page_range(unsigned long addr, unsigned long end)
+static void vunmap_range(unsigned long addr, unsigned long end)
{
pgd_t *pgd;
unsigned long next;
@@ -135,6 +135,197 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
+{
+ pte_t *pte;
+ u64 pfn;
+
+ pfn = phys_addr >> PAGE_SHIFT;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
+ do {
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ return 0;
+}
+
+static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ return 0;
+
+ if (max_page_shift < PMD_SIZE)
+ return 0;
+
+ if ((end - addr) != PMD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PMD_SIZE))
+ return 0;
+
+ if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
+ return 0;
+
+ return pmd_set_huge(pmd, phys_addr, prot);
+}
+
+static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
+ max_page_shift))
+ continue;
+
+ if (vmap_pte_range(pmd, addr, next, phys_addr, prot))
+ return -ENOMEM;
+ } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+static int vmap_try_huge_pud(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ return 0;
+
+ if (max_page_shift < PUD_SIZE)
+ return 0;
+
+ if ((end - addr) != PUD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PUD_SIZE))
+ return 0;
+
+ if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
+ return 0;
+
+ return pud_set_huge(pud, phys_addr, prot);
+}
+
+static inline int vmap_pud_range(p4d_t *p4d, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_alloc(&init_mm, p4d, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+
+ if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
+ max_page_shift))
+ continue;
+
+ if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
+ max_page_shift))
+ return -ENOMEM;
+ } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ return 0;
+
+ if (max_page_shift < P4D_SIZE)
+ return 0;
+
+ if ((end - addr) != P4D_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, P4D_SIZE))
+ return 0;
+
+ if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
+ return 0;
+
+ return p4d_set_huge(p4d, phys_addr, prot);
+}
+
+static inline int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ p4d_t *p4d;
+ unsigned long next;
+
+ p4d = p4d_alloc(&init_mm, pgd, addr);
+ if (!p4d)
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
+ max_page_shift))
+ continue;
+
+ if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
+ max_page_shift))
+ return -ENOMEM;
+ } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
+ return 0;
+}
+
+static int vmap_range_noflush(unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ pgd_t *pgd;
+ unsigned long start;
+ unsigned long next;
+ int err;
+
+ might_sleep();
+ BUG_ON(addr >= end);
+
+ start = addr;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
+ max_page_shift);
+ if (err)
+ break;
+ } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
+
+ return err;
+}
+
+int vmap_range(unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int max_page_shift)
+{
+ int ret;
+
+ ret = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
+ flush_cache_vmap(start, end);
+ return ret;
+}
+
+static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pte_t *pte;
@@ -160,7 +351,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
-static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pmd_t *pmd;
@@ -171,13 +362,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
+ if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
}
-static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
+static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
pud_t *pud;
@@ -188,13 +379,13 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
+ if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
}
-static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
+static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
{
p4d_t *p4d;
@@ -205,7 +396,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
+ if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr))
return -ENOMEM;
} while (p4d++, addr = next, addr != end);
return 0;
@@ -217,7 +408,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/
-static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+static int vmap_pages_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
@@ -230,7 +421,7 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
+ err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
@@ -238,12 +429,12 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
return nr;
}
-static int vmap_page_range(unsigned long start, unsigned long end,
+static int vmap_pages_range(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
int ret;
- ret = vmap_page_range_noflush(start, end, prot, pages);
+ ret = vmap_pages_range_noflush(start, end, prot, pages);
flush_cache_vmap(start, end);
return ret;
}
@@ -1148,7 +1339,7 @@ static void free_vmap_area(struct vmap_area *va)
*/
static void unmap_vmap_area(struct vmap_area *va)
{
- vunmap_page_range(va->va_start, va->va_end);
+ vunmap_range(va->va_start, va->va_end);
}
/*
@@ -1586,7 +1777,7 @@ static void vb_free(const void *addr, unsigned long size)
rcu_read_unlock();
BUG_ON(!vb);
- vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
+ vunmap_range((unsigned long)addr, (unsigned long)addr + size);
if (debug_pagealloc_enabled())
flush_tlb_kernel_range((unsigned long)addr,
@@ -1736,7 +1927,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
addr = va->va_start;
mem = (void *)addr;
}
- if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+ if (vmap_pages_range(addr, addr + size, prot, pages) < 0) {
vm_unmap_ram(mem, count);
return NULL;
}
@@ -1903,7 +2094,7 @@ void __init vmalloc_init(void)
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
- return vmap_page_range_noflush(addr, addr + size, prot, pages);
+ return vmap_pages_range_noflush(addr, addr + size, prot, pages);
}
/**
@@ -1922,7 +2113,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
*/
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
- vunmap_page_range(addr, addr + size);
+ vunmap_range(addr, addr + size);
}
EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
@@ -1939,7 +2130,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
unsigned long end = addr + size;
flush_cache_vunmap(addr, end);
- vunmap_page_range(addr, end);
+ vunmap_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
EXPORT_SYMBOL_GPL(unmap_kernel_range);
@@ -1950,7 +2141,7 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
unsigned long end = addr + get_vm_area_size(area);
int err;
- err = vmap_page_range(addr, end, prot, pages);
+ err = vmap_pages_range(addr, end, prot, pages);
return err > 0 ? 0 : err;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 4/4] mm/vmalloc: Hugepage vmalloc mappings
2019-05-28 12:04 [PATCH 1/4] mm/large system hash: use vmalloc for size > MAX_ORDER when !hashdist Nicholas Piggin
2019-05-28 12:04 ` [PATCH 2/4] mm/large system hash: avoid vmap for non-NUMA machines when hashdist Nicholas Piggin
2019-05-28 12:04 ` [PATCH 3/4] mm: Move ioremap page table mapping function to mm/ Nicholas Piggin
@ 2019-05-28 12:04 ` Nicholas Piggin
2019-05-31 18:30 ` [PATCH 1/4] mm/large system hash: use vmalloc for size > MAX_ORDER when !hashdist Linus Torvalds
3 siblings, 0 replies; 6+ messages in thread
From: Nicholas Piggin @ 2019-05-28 12:04 UTC (permalink / raw)
To: linux-mm
Cc: Nicholas Piggin, linux-arch, Toshi Kani, Linus Torvalds,
Ard Biesheuvel, Andrew Morton, Uladzislau Rezki
For platforms that define HAVE_ARCH_HUGE_VMAP, have vmalloc try to
allocate and map PMD-level huge pages when size is appropriate, and
fallback if unsuccessful.
Using 2MB page mappings in an 8MB dentry cache hash brings dTLB misses
for linux kernel tree `git diff` from 45,000 to 8,000 on a Kaby Lake CPU
wth mitigations=off (performance is in the noise, under 1% difference,
page tables are likely to be well cached for this workload).
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
include/linux/vmalloc.h | 1 +
mm/vmalloc.c | 132 +++++++++++++++++++++++++++++++---------
2 files changed, 105 insertions(+), 28 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 812bea5866d6..4c92dc608928 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -42,6 +42,7 @@ struct vm_struct {
unsigned long size;
unsigned long flags;
struct page **pages;
+ unsigned int page_shift;
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6a0c97f89091..34de925ed4f4 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -36,6 +36,7 @@
#include <linux/rbtree_augmented.h>
#include <linux/uaccess.h>
+#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -439,6 +440,41 @@ static int vmap_pages_range(unsigned long start, unsigned long end,
return ret;
}
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+static int vmap_hpages_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ unsigned long addr = start;
+ unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift);
+
+ for (i = 0; i < nr; i++) {
+ int err;
+
+ err = vmap_range_noflush(addr,
+ addr + (PAGE_SIZE << page_shift),
+ __pa(page_address(pages[i])), prot,
+ page_shift);
+ if (err)
+ return err;
+
+ addr += PAGE_SIZE << page_shift;
+ }
+ flush_cache_vmap(start, end);
+
+ return nr;
+}
+#else
+static int vmap_hpages_range(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ BUG_ON(page_shift != PAGE_SIZE);
+ return vmap_pages_range(start, end, prot, pages);
+}
+#endif
+
+
int is_vmalloc_or_module_addr(const void *x)
{
/*
@@ -461,7 +497,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
- pgd_t *pgd = pgd_offset_k(addr);
+ pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -473,27 +509,38 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
*/
VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
+ pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
return NULL;
+
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
return NULL;
- pud = pud_offset(p4d, addr);
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (p4d_large(*p4d))
+ return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(p4d_bad(*p4d)))
+ return NULL;
- /*
- * Don't dereference bad PUD or PMD (below) entries. This will also
- * identify huge mappings, which we may encounter on architectures
- * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
- * identified as vmalloc addresses by is_vmalloc_addr(), but are
- * not [unambiguously] associated with a struct page, so there is
- * no correct value to return for them.
- */
- WARN_ON_ONCE(pud_bad(*pud));
- if (pud_none(*pud) || pud_bad(*pud))
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud))
+ return NULL;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (pud_large(*pud))
+ return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(pud_bad(*pud)))
return NULL;
+
pmd = pmd_offset(pud, addr);
- WARN_ON_ONCE(pmd_bad(*pmd));
- if (pmd_none(*pmd) || pmd_bad(*pmd))
+ if (pmd_none(*pmd))
+ return NULL;
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+ if (pmd_large(*pmd))
+ return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+#endif
+ if (WARN_ON_ONCE(pmd_bad(*pmd)))
return NULL;
ptep = pte_offset_map(pmd, addr);
@@ -501,6 +548,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
+
return page;
}
EXPORT_SYMBOL(vmalloc_to_page);
@@ -2184,8 +2232,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
return NULL;
if (flags & VM_IOREMAP)
- align = 1ul << clamp_t(int, get_count_order_long(size),
- PAGE_SHIFT, IOREMAP_MAX_ORDER);
+ align = max(align,
+ 1ul << clamp_t(int, get_count_order_long(size),
+ PAGE_SHIFT, IOREMAP_MAX_ORDER));
area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!area))
@@ -2397,7 +2446,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
struct page *page = area->pages[i];
BUG_ON(!page);
- __free_pages(page, 0);
+ __free_pages(page, area->page_shift);
}
kvfree(area->pages);
@@ -2540,14 +2589,17 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, int node)
{
struct page **pages;
+ unsigned long addr = (unsigned long)area->addr;
+ unsigned long size = get_vm_area_size(area);
+ unsigned int page_shift = area->page_shift;
+ unsigned int shift = page_shift + PAGE_SHIFT;
unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
- 0 :
- __GFP_HIGHMEM;
+ 0 : __GFP_HIGHMEM;
- nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+ nr_pages = size >> shift;
array_size = (nr_pages * sizeof(struct page *));
area->nr_pages = nr_pages;
@@ -2568,10 +2620,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- if (node == NUMA_NO_NODE)
- page = alloc_page(alloc_mask|highmem_mask);
- else
- page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
+ page = alloc_pages_node(node,
+ alloc_mask|highmem_mask, page_shift);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@ -2583,8 +2633,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
cond_resched();
}
- if (map_vm_area(area, prot, pages))
+ if (vmap_hpages_range(addr, addr + size, prot, pages, page_shift) < 0)
goto fail;
+
return area->addr;
fail:
@@ -2618,22 +2669,39 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
- struct vm_struct *area;
+ struct vm_struct *area = NULL;
void *addr;
unsigned long real_size = size;
+ unsigned long real_align = align;
+ unsigned int shift = PAGE_SHIFT;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
goto fail;
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) {
+ unsigned long size_per_node;
+
+ size_per_node = size;
+ if (node == NUMA_NO_NODE)
+ size_per_node /= num_online_nodes();
+ if (size_per_node >= PMD_SIZE)
+ shift = PMD_SHIFT;
+ }
+again:
+ align = max(real_align, 1UL << shift);
+ size = ALIGN(real_size, align);
+
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;
+ area->page_shift = shift - PAGE_SHIFT;
+
addr = __vmalloc_area_node(area, gfp_mask, prot, node);
if (!addr)
- return NULL;
+ goto fail;
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -2647,8 +2715,16 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return addr;
fail:
- warn_alloc(gfp_mask, NULL,
+ if (shift == PMD_SHIFT) {
+ shift = PAGE_SHIFT;
+ goto again;
+ }
+
+ if (!area) {
+ /* Warn for area allocation, page allocations already warn */
+ warn_alloc(gfp_mask, NULL,
"vmalloc: allocation failure: %lu bytes", real_size);
+ }
return NULL;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread