From: Christoph Hellwig <hch@lst.de> To: Dan Williams <dan.j.williams@intel.com> Cc: linux-mm@kvack.org, "Jérôme Glisse" <jglisse@redhat.com>, linux-nvdimm@lists.01.org Subject: [PATCH 08/14] mm: merge vmem_altmap_alloc into dev_pagemap_alloc_block_buf Date: Thu, 7 Dec 2017 07:08:34 -0800 [thread overview] Message-ID: <20171207150840.28409-9-hch@lst.de> (raw) In-Reply-To: <20171207150840.28409-1-hch@lst.de> There is no clear separation between the two, so merge them. Also move the device page map argument first for the more natural calling convention. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/powerpc/mm/init_64.c | 2 +- arch/x86/mm/init_64.c | 2 +- include/linux/mm.h | 4 ++-- mm/sparse-vmemmap.c | 51 ++++++++++++++++++----------------------------- 4 files changed, 23 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 3a39a644e96c..ec706857bdd6 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -203,7 +203,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) /* altmap lookups only work at section boundaries */ altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start)); if (altmap) - p = dev_pagemap_alloc_block_buf(page_size, altmap); + p = dev_pagemap_alloc_block_buf(altmap, page_size); else p = vmemmap_alloc_block_buf(page_size, node); if (!p) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9e1b489aa826..131749080874 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1372,7 +1372,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, void *p; if (altmap) - p = dev_pagemap_alloc_block_buf(PMD_SIZE, altmap); + p = dev_pagemap_alloc_block_buf(altmap, PMD_SIZE); else p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (p) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 856869e2c119..cd3d1c00f6a3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2547,8 +2547,8 @@ pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node); -void *dev_pagemap_alloc_block_buf(unsigned long size, - struct vmem_altmap *altmap); +void *dev_pagemap_alloc_block_buf(struct vmem_altmap *pgmap, + unsigned long size); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 268b6c7dfdf4..fef41a6a9f64 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) } /** - * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation - * @altmap - reserved page pool for the allocation - * @nr_pfns - size (in pages) of the allocation + * dev_pagemap_alloc_block_buf - allocate pages from the device page map + * @pgmap: device page map + * @size: size (in bytes) of the allocation * - * Allocations are aligned to the size of the request + * Allocations are aligned to the size of the request. */ -static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, - unsigned long nr_pfns) +void * __meminit dev_pagemap_alloc_block_buf(struct vmem_altmap *pgmap, + unsigned long size) { - unsigned long pfn = vmem_altmap_next_pfn(altmap); - unsigned long nr_align; - - nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); - nr_align = ALIGN(pfn, nr_align) - pfn; - - if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) - return ULONG_MAX; - altmap->alloc += nr_pfns; - altmap->align += nr_align; - return pfn + nr_align; -} - -void * __meminit dev_pagemap_alloc_block_buf(unsigned long size, - struct vmem_altmap *altmap) -{ - unsigned long pfn, nr_pfns; - void *ptr; + unsigned long pfn, nr_pfns, nr_align; if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", @@ -141,16 +124,20 @@ void * __meminit dev_pagemap_alloc_block_buf(unsigned long size, return NULL; } + pfn = vmem_altmap_next_pfn(pgmap); nr_pfns = size >> PAGE_SHIFT; - pfn = vmem_altmap_alloc(altmap, nr_pfns); - if (pfn < ULONG_MAX) - ptr = __va(__pfn_to_phys(pfn)); - else - ptr = NULL; - pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", - __func__, pfn, altmap->alloc, altmap->align, nr_pfns); + nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); + nr_align = ALIGN(pfn, nr_align) - pfn; + if (nr_pfns + nr_align > vmem_altmap_nr_free(pgmap)) + return NULL; - return ptr; + pgmap->alloc += nr_pfns; + pgmap->align += nr_align; + pfn += nr_align; + + pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", + __func__, pfn, pgmap->alloc, pgmap->align, nr_pfns); + return __va(__pfn_to_phys(pfn)); } void __meminit vmemmap_verify(pte_t *pte, int node, -- 2.14.2 _______________________________________________ Linux-nvdimm mailing list Linux-nvdimm@lists.01.org https://lists.01.org/mailman/listinfo/linux-nvdimm
WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de> To: Dan Williams <dan.j.williams@intel.com> Cc: "Jérôme Glisse" <jglisse@redhat.com>, "Logan Gunthorpe" <logang@deltatee.com>, linux-nvdimm@lists.01.org, linux-mm@kvack.org Subject: [PATCH 08/14] mm: merge vmem_altmap_alloc into dev_pagemap_alloc_block_buf Date: Thu, 7 Dec 2017 07:08:34 -0800 [thread overview] Message-ID: <20171207150840.28409-9-hch@lst.de> (raw) In-Reply-To: <20171207150840.28409-1-hch@lst.de> There is no clear separation between the two, so merge them. Also move the device page map argument first for the more natural calling convention. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/powerpc/mm/init_64.c | 2 +- arch/x86/mm/init_64.c | 2 +- include/linux/mm.h | 4 ++-- mm/sparse-vmemmap.c | 51 ++++++++++++++++++----------------------------- 4 files changed, 23 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 3a39a644e96c..ec706857bdd6 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -203,7 +203,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) /* altmap lookups only work at section boundaries */ altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start)); if (altmap) - p = dev_pagemap_alloc_block_buf(page_size, altmap); + p = dev_pagemap_alloc_block_buf(altmap, page_size); else p = vmemmap_alloc_block_buf(page_size, node); if (!p) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9e1b489aa826..131749080874 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1372,7 +1372,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, void *p; if (altmap) - p = dev_pagemap_alloc_block_buf(PMD_SIZE, altmap); + p = dev_pagemap_alloc_block_buf(altmap, PMD_SIZE); else p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (p) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 856869e2c119..cd3d1c00f6a3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2547,8 +2547,8 @@ pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; void *vmemmap_alloc_block_buf(unsigned long size, int node); -void *dev_pagemap_alloc_block_buf(unsigned long size, - struct vmem_altmap *altmap); +void *dev_pagemap_alloc_block_buf(struct vmem_altmap *pgmap, + unsigned long size); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 268b6c7dfdf4..fef41a6a9f64 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) } /** - * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation - * @altmap - reserved page pool for the allocation - * @nr_pfns - size (in pages) of the allocation + * dev_pagemap_alloc_block_buf - allocate pages from the device page map + * @pgmap: device page map + * @size: size (in bytes) of the allocation * - * Allocations are aligned to the size of the request + * Allocations are aligned to the size of the request. */ -static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, - unsigned long nr_pfns) +void * __meminit dev_pagemap_alloc_block_buf(struct vmem_altmap *pgmap, + unsigned long size) { - unsigned long pfn = vmem_altmap_next_pfn(altmap); - unsigned long nr_align; - - nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); - nr_align = ALIGN(pfn, nr_align) - pfn; - - if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) - return ULONG_MAX; - altmap->alloc += nr_pfns; - altmap->align += nr_align; - return pfn + nr_align; -} - -void * __meminit dev_pagemap_alloc_block_buf(unsigned long size, - struct vmem_altmap *altmap) -{ - unsigned long pfn, nr_pfns; - void *ptr; + unsigned long pfn, nr_pfns, nr_align; if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", @@ -141,16 +124,20 @@ void * __meminit dev_pagemap_alloc_block_buf(unsigned long size, return NULL; } + pfn = vmem_altmap_next_pfn(pgmap); nr_pfns = size >> PAGE_SHIFT; - pfn = vmem_altmap_alloc(altmap, nr_pfns); - if (pfn < ULONG_MAX) - ptr = __va(__pfn_to_phys(pfn)); - else - ptr = NULL; - pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", - __func__, pfn, altmap->alloc, altmap->align, nr_pfns); + nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); + nr_align = ALIGN(pfn, nr_align) - pfn; + if (nr_pfns + nr_align > vmem_altmap_nr_free(pgmap)) + return NULL; - return ptr; + pgmap->alloc += nr_pfns; + pgmap->align += nr_align; + pfn += nr_align; + + pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", + __func__, pfn, pgmap->alloc, pgmap->align, nr_pfns); + return __va(__pfn_to_phys(pfn)); } void __meminit vmemmap_verify(pte_t *pte, int node, -- 2.14.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-12-07 15:04 UTC|newest] Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-12-07 15:08 revamp vmem_altmap / dev_pagemap handling Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 15:08 ` [PATCH 01/14] mm: move get_dev_pagemap out of line Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:34 ` Logan Gunthorpe 2017-12-07 18:34 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 02/14] mm: optimize dev_pagemap reference counting around get_dev_pagemap Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:46 ` Logan Gunthorpe 2017-12-07 18:46 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 03/14] mm: better abstract out dev_pagemap freeing Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:49 ` Logan Gunthorpe 2017-12-07 18:49 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 04/14] mm: better abstract out dev_pagemap alloc Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:52 ` Logan Gunthorpe 2017-12-07 18:52 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 05/14] mm: better abstract out dev_pagemap offset calculation Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:54 ` Logan Gunthorpe 2017-12-07 18:54 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 06/14] mm: better abstract out dev_pagemap start_pfn Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 18:57 ` Logan Gunthorpe 2017-12-07 18:57 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 07/14] mm: split dev_pagemap memory map allocation from normal case Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 19:08 ` Logan Gunthorpe 2017-12-07 19:08 ` Logan Gunthorpe 2017-12-07 15:08 ` Christoph Hellwig [this message] 2017-12-07 15:08 ` [PATCH 08/14] mm: merge vmem_altmap_alloc into dev_pagemap_alloc_block_buf Christoph Hellwig 2017-12-07 19:14 ` Logan Gunthorpe 2017-12-07 19:14 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 09/14] memremap: drop private struct page_map Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 15:08 ` [PATCH 10/14] memremap: change devm_memremap_pages interface to use struct dev_pagemap Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-08 4:03 ` Dan Williams 2017-12-07 15:08 ` [PATCH 11/14] memremap: simplify duplicate region handling in devm_memremap_pages Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 19:34 ` Logan Gunthorpe 2017-12-07 19:34 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 12/14] memremap: remove find_dev_pagemap Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 19:35 ` Logan Gunthorpe 2017-12-07 19:35 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 13/14] memremap: remove struct vmem_altmap Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 19:40 ` Logan Gunthorpe 2017-12-07 19:40 ` Logan Gunthorpe 2017-12-07 15:08 ` [PATCH 14/14] memremap: RCU protect data returned from dev_pagemap lookups Christoph Hellwig 2017-12-07 15:08 ` Christoph Hellwig 2017-12-07 19:53 ` Logan Gunthorpe 2017-12-07 19:53 ` Logan Gunthorpe 2017-12-08 4:14 ` revamp vmem_altmap / dev_pagemap handling Williams, Dan J 2017-12-08 4:14 ` Williams, Dan J
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20171207150840.28409-9-hch@lst.de \ --to=hch@lst.de \ --cc=dan.j.williams@intel.com \ --cc=jglisse@redhat.com \ --cc=linux-mm@kvack.org \ --cc=linux-nvdimm@lists.01.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.