linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3] mm: cma: NUMA node interface
@ 2020-04-03  2:12 Aslan Bakirov
  2020-04-03  5:02 ` Ira Weiny
  0 siblings, 1 reply; 8+ messages in thread
From: Aslan Bakirov @ 2020-04-03  2:12 UTC (permalink / raw)
  To: akpm
  Cc: linux-kernel, linux-mm, kernel-team, riel, guro, mhocko, hannes,
	Aslan Bakirov

I've noticed that there is no interfaces exposed by CMA which would let me
to declare contigous memory on particular NUMA node.

This patchset adds the ability to try to allocate contiguous memory on
specific node. It will fallback to other nodes if the specified one
doesn't work.

Implement a new method for declaring contigous memory on particular node
and keep cma_declare_contiguous() as a wrapper.

Signed-off-by: Aslan Bakirov <aslan@fb.com>
---
 include/linux/cma.h      | 13 +++++++++++--
 include/linux/memblock.h |  3 +++
 mm/cma.c                 | 16 +++++++++-------
 mm/memblock.c            |  2 +-
 4 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 190184b5ff32..eae834c2162f 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -24,10 +24,19 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
 extern unsigned long cma_get_size(const struct cma *cma);
 extern const char *cma_get_name(const struct cma *cma);
 
-extern int __init cma_declare_contiguous(phys_addr_t base,
+extern int __init cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
-			bool fixed, const char *name, struct cma **res_cma);
+			bool fixed, const char *name, struct cma **res_cma,
+			int nid);
+static inline int __init cma_declare_contiguous(phys_addr_t base,
+			phys_addr_t size, phys_addr_t limit,
+			phys_addr_t alignment, unsigned int order_per_bit,
+			bool fixed, const char *name, struct cma **res_cma)
+{
+	return cma_declare_contiguous_nid(base, size, limit, alignment,
+			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
+}
 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 079d17d96410..6bc37a731d27 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -348,6 +348,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
 
 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 				      phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
+				      phys_addr_t align, phys_addr_t start,
+				      phys_addr_t end, int nid, bool exact_nid);
 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 
 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
diff --git a/mm/cma.c b/mm/cma.c
index be55d1988c67..6405af3dc118 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -220,7 +220,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 }
 
 /**
- * cma_declare_contiguous() - reserve custom contiguous area
+ * cma_declare_contiguous_nid() - reserve custom contiguous area
  * @base: Base address of the reserved area optional, use 0 for any
  * @size: Size of the reserved area (in bytes),
  * @limit: End address of the reserved memory (optional, 0 for any).
@@ -229,6 +229,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * @fixed: hint about where to place the reserved area
  * @name: The name of the area. See function cma_init_reserved_mem()
  * @res_cma: Pointer to store the created cma region.
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  *
  * This function reserves memory from early allocator. It should be
  * called by arch specific code once the early allocator (memblock or bootmem)
@@ -238,10 +239,11 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
  * reserve in range from @base to @limit.
  */
-int __init cma_declare_contiguous(phys_addr_t base,
+int __init cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
-			bool fixed, const char *name, struct cma **res_cma)
+			bool fixed, const char *name, struct cma **res_cma,
+			int nid)
 {
 	phys_addr_t memblock_end = memblock_end_of_DRAM();
 	phys_addr_t highmem_start;
@@ -336,14 +338,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
 		 * memory in case of failure.
 		 */
 		if (base < highmem_start && limit > highmem_start) {
-			addr = memblock_phys_alloc_range(size, alignment,
-							 highmem_start, limit);
+			addr = memblock_alloc_range_nid(size, alignment,
+					highmem_start, limit, nid, false);
 			limit = highmem_start;
 		}
 
 		if (!addr) {
-			addr = memblock_phys_alloc_range(size, alignment, base,
-							 limit);
+			addr = memblock_alloc_range_nid(size, alignment, base,
+					alimit, nid, false);
 			if (!addr) {
 				ret = -ENOMEM;
 				goto err;
diff --git a/mm/memblock.c b/mm/memblock.c
index 4d06bbaded0f..c79ba6f9920c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1349,7 +1349,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
  * Return:
  * Physical address of allocated memory block on success, %0 on failure.
  */
-static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
+phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
 					phys_addr_t align, phys_addr_t start,
 					phys_addr_t end, int nid,
 					bool exact_nid)
-- 
2.24.1



^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-03  2:12 [PATCH v3] mm: cma: NUMA node interface Aslan Bakirov
@ 2020-04-03  5:02 ` Ira Weiny
  2020-04-03  9:51   ` Aslan Bakirov
  0 siblings, 1 reply; 8+ messages in thread
From: Ira Weiny @ 2020-04-03  5:02 UTC (permalink / raw)
  To: Aslan Bakirov
  Cc: akpm, linux-kernel, linux-mm, kernel-team, riel, guro, mhocko, hannes

On Thu, Apr 02, 2020 at 07:12:56PM -0700, Aslan Bakirov wrote:
> I've noticed that there is no interfaces exposed by CMA which would let me
> to declare contigous memory on particular NUMA node.
> 
> This patchset adds the ability to try to allocate contiguous memory on
> specific node. It will fallback to other nodes if the specified one
> doesn't work.
> 
> Implement a new method for declaring contigous memory on particular node
> and keep cma_declare_contiguous() as a wrapper.

Is there an additional patch which uses this new interface?

Generally the patch seems reasonable but we should have a user.

Ira

> 
> Signed-off-by: Aslan Bakirov <aslan@fb.com>
> ---
>  include/linux/cma.h      | 13 +++++++++++--
>  include/linux/memblock.h |  3 +++
>  mm/cma.c                 | 16 +++++++++-------
>  mm/memblock.c            |  2 +-
>  4 files changed, 24 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/cma.h b/include/linux/cma.h
> index 190184b5ff32..eae834c2162f 100644
> --- a/include/linux/cma.h
> +++ b/include/linux/cma.h
> @@ -24,10 +24,19 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
>  extern unsigned long cma_get_size(const struct cma *cma);
>  extern const char *cma_get_name(const struct cma *cma);
>  
> -extern int __init cma_declare_contiguous(phys_addr_t base,
> +extern int __init cma_declare_contiguous_nid(phys_addr_t base,
>  			phys_addr_t size, phys_addr_t limit,
>  			phys_addr_t alignment, unsigned int order_per_bit,
> -			bool fixed, const char *name, struct cma **res_cma);
> +			bool fixed, const char *name, struct cma **res_cma,
> +			int nid);
> +static inline int __init cma_declare_contiguous(phys_addr_t base,
> +			phys_addr_t size, phys_addr_t limit,
> +			phys_addr_t alignment, unsigned int order_per_bit,
> +			bool fixed, const char *name, struct cma **res_cma)
> +{
> +	return cma_declare_contiguous_nid(base, size, limit, alignment,
> +			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
> +}
>  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>  					unsigned int order_per_bit,
>  					const char *name,
> diff --git a/include/linux/memblock.h b/include/linux/memblock.h
> index 079d17d96410..6bc37a731d27 100644
> --- a/include/linux/memblock.h
> +++ b/include/linux/memblock.h
> @@ -348,6 +348,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
>  
>  phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
>  				      phys_addr_t start, phys_addr_t end);
> +phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
> +				      phys_addr_t align, phys_addr_t start,
> +				      phys_addr_t end, int nid, bool exact_nid);
>  phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
>  
>  static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
> diff --git a/mm/cma.c b/mm/cma.c
> index be55d1988c67..6405af3dc118 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -220,7 +220,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>  }
>  
>  /**
> - * cma_declare_contiguous() - reserve custom contiguous area
> + * cma_declare_contiguous_nid() - reserve custom contiguous area
>   * @base: Base address of the reserved area optional, use 0 for any
>   * @size: Size of the reserved area (in bytes),
>   * @limit: End address of the reserved memory (optional, 0 for any).
> @@ -229,6 +229,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>   * @fixed: hint about where to place the reserved area
>   * @name: The name of the area. See function cma_init_reserved_mem()
>   * @res_cma: Pointer to store the created cma region.
> + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
>   *
>   * This function reserves memory from early allocator. It should be
>   * called by arch specific code once the early allocator (memblock or bootmem)
> @@ -238,10 +239,11 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
>   * If @fixed is true, reserve contiguous area at exactly @base.  If false,
>   * reserve in range from @base to @limit.
>   */
> -int __init cma_declare_contiguous(phys_addr_t base,
> +int __init cma_declare_contiguous_nid(phys_addr_t base,
>  			phys_addr_t size, phys_addr_t limit,
>  			phys_addr_t alignment, unsigned int order_per_bit,
> -			bool fixed, const char *name, struct cma **res_cma)
> +			bool fixed, const char *name, struct cma **res_cma,
> +			int nid)
>  {
>  	phys_addr_t memblock_end = memblock_end_of_DRAM();
>  	phys_addr_t highmem_start;
> @@ -336,14 +338,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
>  		 * memory in case of failure.
>  		 */
>  		if (base < highmem_start && limit > highmem_start) {
> -			addr = memblock_phys_alloc_range(size, alignment,
> -							 highmem_start, limit);
> +			addr = memblock_alloc_range_nid(size, alignment,
> +					highmem_start, limit, nid, false);
>  			limit = highmem_start;
>  		}
>  
>  		if (!addr) {
> -			addr = memblock_phys_alloc_range(size, alignment, base,
> -							 limit);
> +			addr = memblock_alloc_range_nid(size, alignment, base,
> +					alimit, nid, false);
>  			if (!addr) {
>  				ret = -ENOMEM;
>  				goto err;
> diff --git a/mm/memblock.c b/mm/memblock.c
> index 4d06bbaded0f..c79ba6f9920c 100644
> --- a/mm/memblock.c
> +++ b/mm/memblock.c
> @@ -1349,7 +1349,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
>   * Return:
>   * Physical address of allocated memory block on success, %0 on failure.
>   */
> -static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
> +phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
>  					phys_addr_t align, phys_addr_t start,
>  					phys_addr_t end, int nid,
>  					bool exact_nid)
> -- 
> 2.24.1
> 
> 


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-03  5:02 ` Ira Weiny
@ 2020-04-03  9:51   ` Aslan Bakirov
  2020-04-03 10:01     ` Michal Hocko
  0 siblings, 1 reply; 8+ messages in thread
From: Aslan Bakirov @ 2020-04-03  9:51 UTC (permalink / raw)
  To: Ira Weiny
  Cc: Aslan Bakirov, akpm, linux-kernel, linux-mm, kernel-team, riel,
	Roman Gushchin, mhocko, hannes

[-- Attachment #1: Type: text/plain, Size: 7240 bytes --]

On Fri, Apr 3, 2020 at 6:02 AM Ira Weiny <ira.weiny@intel.com> wrote:

> On Thu, Apr 02, 2020 at 07:12:56PM -0700, Aslan Bakirov wrote:
> > I've noticed that there is no interfaces exposed by CMA which would let
> me
> > to declare contigous memory on particular NUMA node.
> >
> > This patchset adds the ability to try to allocate contiguous memory on
> > specific node. It will fallback to other nodes if the specified one
> > doesn't work.
> >
> > Implement a new method for declaring contigous memory on particular node
> > and keep cma_declare_contiguous() as a wrapper.
>
> Is there an additional patch which uses this new interface?
>
> Generally the patch seems reasonable but we should have a user.


 Thanks for the comments. Yes, actually, this is the version 3 of first
patch ([PATCH 1/2] mm: cma: NUMA node interface)
 of patchset. Second patch, which uses this interface is  "[PATCH 2/2] mm:
hugetlb: Use node interface of cma"

Ira
>
> >
> > Signed-off-by: Aslan Bakirov <aslan@fb.com>
> > ---
> >  include/linux/cma.h      | 13 +++++++++++--
> >  include/linux/memblock.h |  3 +++
> >  mm/cma.c                 | 16 +++++++++-------
> >  mm/memblock.c            |  2 +-
> >  4 files changed, 24 insertions(+), 10 deletions(-)
> >
> > diff --git a/include/linux/cma.h b/include/linux/cma.h
> > index 190184b5ff32..eae834c2162f 100644
> > --- a/include/linux/cma.h
> > +++ b/include/linux/cma.h
> > @@ -24,10 +24,19 @@ extern phys_addr_t cma_get_base(const struct cma
> *cma);
> >  extern unsigned long cma_get_size(const struct cma *cma);
> >  extern const char *cma_get_name(const struct cma *cma);
> >
> > -extern int __init cma_declare_contiguous(phys_addr_t base,
> > +extern int __init cma_declare_contiguous_nid(phys_addr_t base,
> >                       phys_addr_t size, phys_addr_t limit,
> >                       phys_addr_t alignment, unsigned int order_per_bit,
> > -                     bool fixed, const char *name, struct cma
> **res_cma);
> > +                     bool fixed, const char *name, struct cma **res_cma,
> > +                     int nid);
> > +static inline int __init cma_declare_contiguous(phys_addr_t base,
> > +                     phys_addr_t size, phys_addr_t limit,
> > +                     phys_addr_t alignment, unsigned int order_per_bit,
> > +                     bool fixed, const char *name, struct cma **res_cma)
> > +{
> > +     return cma_declare_contiguous_nid(base, size, limit, alignment,
> > +                     order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
> > +}
> >  extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
> >                                       unsigned int order_per_bit,
> >                                       const char *name,
> > diff --git a/include/linux/memblock.h b/include/linux/memblock.h
> > index 079d17d96410..6bc37a731d27 100644
> > --- a/include/linux/memblock.h
> > +++ b/include/linux/memblock.h
> > @@ -348,6 +348,9 @@ static inline int memblock_get_region_node(const
> struct memblock_region *r)
> >
> >  phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t
> align,
> >                                     phys_addr_t start, phys_addr_t end);
> > +phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
> > +                                   phys_addr_t align, phys_addr_t start,
> > +                                   phys_addr_t end, int nid, bool
> exact_nid);
> >  phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t
> align, int nid);
> >
> >  static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
> > diff --git a/mm/cma.c b/mm/cma.c
> > index be55d1988c67..6405af3dc118 100644
> > --- a/mm/cma.c
> > +++ b/mm/cma.c
> > @@ -220,7 +220,7 @@ int __init cma_init_reserved_mem(phys_addr_t base,
> phys_addr_t size,
> >  }
> >
> >  /**
> > - * cma_declare_contiguous() - reserve custom contiguous area
> > + * cma_declare_contiguous_nid() - reserve custom contiguous area
> >   * @base: Base address of the reserved area optional, use 0 for any
> >   * @size: Size of the reserved area (in bytes),
> >   * @limit: End address of the reserved memory (optional, 0 for any).
> > @@ -229,6 +229,7 @@ int __init cma_init_reserved_mem(phys_addr_t base,
> phys_addr_t size,
> >   * @fixed: hint about where to place the reserved area
> >   * @name: The name of the area. See function cma_init_reserved_mem()
> >   * @res_cma: Pointer to store the created cma region.
> > + * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
> >   *
> >   * This function reserves memory from early allocator. It should be
> >   * called by arch specific code once the early allocator (memblock or
> bootmem)
> > @@ -238,10 +239,11 @@ int __init cma_init_reserved_mem(phys_addr_t base,
> phys_addr_t size,
> >   * If @fixed is true, reserve contiguous area at exactly @base.  If
> false,
> >   * reserve in range from @base to @limit.
> >   */
> > -int __init cma_declare_contiguous(phys_addr_t base,
> > +int __init cma_declare_contiguous_nid(phys_addr_t base,
> >                       phys_addr_t size, phys_addr_t limit,
> >                       phys_addr_t alignment, unsigned int order_per_bit,
> > -                     bool fixed, const char *name, struct cma **res_cma)
> > +                     bool fixed, const char *name, struct cma **res_cma,
> > +                     int nid)
> >  {
> >       phys_addr_t memblock_end = memblock_end_of_DRAM();
> >       phys_addr_t highmem_start;
> > @@ -336,14 +338,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
> >                * memory in case of failure.
> >                */
> >               if (base < highmem_start && limit > highmem_start) {
> > -                     addr = memblock_phys_alloc_range(size, alignment,
> > -                                                      highmem_start,
> limit);
> > +                     addr = memblock_alloc_range_nid(size, alignment,
> > +                                     highmem_start, limit, nid, false);
> >                       limit = highmem_start;
> >               }
> >
> >               if (!addr) {
> > -                     addr = memblock_phys_alloc_range(size, alignment,
> base,
> > -                                                      limit);
> > +                     addr = memblock_alloc_range_nid(size, alignment,
> base,
> > +                                     alimit, nid, false);
> >                       if (!addr) {
> >                               ret = -ENOMEM;
> >                               goto err;
> > diff --git a/mm/memblock.c b/mm/memblock.c
> > index 4d06bbaded0f..c79ba6f9920c 100644
> > --- a/mm/memblock.c
> > +++ b/mm/memblock.c
> > @@ -1349,7 +1349,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone
> *zone,
> >   * Return:
> >   * Physical address of allocated memory block on success, %0 on failure.
> >   */
> > -static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
> > +phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
> >                                       phys_addr_t align, phys_addr_t
> start,
> >                                       phys_addr_t end, int nid,
> >                                       bool exact_nid)
> > --
> > 2.24.1
> >
> >
>
>

[-- Attachment #2: Type: text/html, Size: 9218 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-03  9:51   ` Aslan Bakirov
@ 2020-04-03 10:01     ` Michal Hocko
  2020-04-03 10:21       ` Aslan Bakirov
  0 siblings, 1 reply; 8+ messages in thread
From: Michal Hocko @ 2020-04-03 10:01 UTC (permalink / raw)
  To: Aslan Bakirov
  Cc: Ira Weiny, Aslan Bakirov, akpm, linux-kernel, linux-mm,
	kernel-team, riel, Roman Gushchin, hannes

On Fri 03-04-20 10:51:32, Aslan Bakirov wrote:
> On Fri, Apr 3, 2020 at 6:02 AM Ira Weiny <ira.weiny@intel.com> wrote:
> 
> > On Thu, Apr 02, 2020 at 07:12:56PM -0700, Aslan Bakirov wrote:
> > > I've noticed that there is no interfaces exposed by CMA which would let
> > me
> > > to declare contigous memory on particular NUMA node.
> > >
> > > This patchset adds the ability to try to allocate contiguous memory on
> > > specific node. It will fallback to other nodes if the specified one
> > > doesn't work.
> > >
> > > Implement a new method for declaring contigous memory on particular node
> > > and keep cma_declare_contiguous() as a wrapper.
> >
> > Is there an additional patch which uses this new interface?
> >
> > Generally the patch seems reasonable but we should have a user.
> 
> 
>  Thanks for the comments. Yes, actually, this is the version 3 of first
> patch ([PATCH 1/2] mm: cma: NUMA node interface)
>  of patchset. Second patch, which uses this interface is  "[PATCH 2/2] mm:
> hugetlb: Use node interface of cma"

It would have been much more clear to send those two patches together as
you can see.
-- 
Michal Hocko
SUSE Labs


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-03 10:01     ` Michal Hocko
@ 2020-04-03 10:21       ` Aslan Bakirov
  0 siblings, 0 replies; 8+ messages in thread
From: Aslan Bakirov @ 2020-04-03 10:21 UTC (permalink / raw)
  To: Michal Hocko
  Cc: Ira Weiny, Aslan Bakirov, akpm, linux-kernel, linux-mm,
	kernel-team, riel, Roman Gushchin, hannes

[-- Attachment #1: Type: text/plain, Size: 1329 bytes --]

On Fri, Apr 3, 2020 at 11:01 AM Michal Hocko <mhocko@kernel.org> wrote:

> On Fri 03-04-20 10:51:32, Aslan Bakirov wrote:
> > On Fri, Apr 3, 2020 at 6:02 AM Ira Weiny <ira.weiny@intel.com> wrote:
> >
> > > On Thu, Apr 02, 2020 at 07:12:56PM -0700, Aslan Bakirov wrote:
> > > > I've noticed that there is no interfaces exposed by CMA which would
> let
> > > me
> > > > to declare contigous memory on particular NUMA node.
> > > >
> > > > This patchset adds the ability to try to allocate contiguous memory
> on
> > > > specific node. It will fallback to other nodes if the specified one
> > > > doesn't work.
> > > >
> > > > Implement a new method for declaring contigous memory on particular
> node
> > > > and keep cma_declare_contiguous() as a wrapper.
> > >
> > > Is there an additional patch which uses this new interface?
> > >
> > > Generally the patch seems reasonable but we should have a user.
> >
> >
> >  Thanks for the comments. Yes, actually, this is the version 3 of first
> > patch ([PATCH 1/2] mm: cma: NUMA node interface)
> >  of patchset. Second patch, which uses this interface is  "[PATCH 2/2]
> mm:
> > hugetlb: Use node interface of cma"
>
> It would have been much more clear to send those two patches together as
> you can see.
>
Sincerely apologies, sent them again.


> --
> Michal Hocko
> SUSE Labs
>

[-- Attachment #2: Type: text/html, Size: 2096 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-03  0:27 ` Matthew Wilcox
@ 2020-04-03  2:19   ` Aslan Bakirov
  0 siblings, 0 replies; 8+ messages in thread
From: Aslan Bakirov @ 2020-04-03  2:19 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Aslan Bakirov, akpm, linux-kernel, linux-mm, kernel-team, riel,
	Roman Gushchin, mhocko, hannes

[-- Attachment #1: Type: text/plain, Size: 881 bytes --]

On Fri, Apr 3, 2020 at 1:27 AM Matthew Wilcox <willy@infradead.org> wrote:

> On Thu, Apr 02, 2020 at 04:22:37PM -0700, Aslan Bakirov wrote:
> > +static inline int __init cma_declare_contiguous(phys_addr_t base,
> > +                     phys_addr_t size, phys_addr_t limit,
> > +                     phys_addr_t alignment, unsigned int order_per_bit,
> > +                     bool fixed, const char *name, struct cma **res_cma)
> > +                     {
>
> This { should be in the first column.


> > -                     addr = memblock_phys_alloc_range(size, alignment,
> > -                                                      highmem_start,
> limit);
> > +                     addr = memblock_alloc_range_nid(size, alignment,
> > +                              highmem_start, limit, nid, false);
>
> Two extra tabs, not one.
>
> Thanks for the comments. Addressed them.

[-- Attachment #2: Type: text/html, Size: 1512 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] mm: cma: NUMA node interface
  2020-04-02 23:22 Aslan Bakirov
@ 2020-04-03  0:27 ` Matthew Wilcox
  2020-04-03  2:19   ` Aslan Bakirov
  0 siblings, 1 reply; 8+ messages in thread
From: Matthew Wilcox @ 2020-04-03  0:27 UTC (permalink / raw)
  To: Aslan Bakirov
  Cc: akpm, linux-kernel, linux-mm, kernel-team, riel, guro, mhocko, hannes

On Thu, Apr 02, 2020 at 04:22:37PM -0700, Aslan Bakirov wrote:
> +static inline int __init cma_declare_contiguous(phys_addr_t base,
> +			phys_addr_t size, phys_addr_t limit,
> +			phys_addr_t alignment, unsigned int order_per_bit,
> +			bool fixed, const char *name, struct cma **res_cma)
> +			{

This { should be in the first column.

> -			addr = memblock_phys_alloc_range(size, alignment,
> -							 highmem_start, limit);
> +			addr = memblock_alloc_range_nid(size, alignment,
> +				 highmem_start, limit, nid, false);

Two extra tabs, not one.



^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v3] mm: cma: NUMA node interface
@ 2020-04-02 23:22 Aslan Bakirov
  2020-04-03  0:27 ` Matthew Wilcox
  0 siblings, 1 reply; 8+ messages in thread
From: Aslan Bakirov @ 2020-04-02 23:22 UTC (permalink / raw)
  To: akpm
  Cc: linux-kernel, linux-mm, kernel-team, riel, guro, mhocko, hannes,
	Aslan Bakirov

I've noticed that there is no interfaces exposed by CMA which would let me
to declare contigous memory on particular NUMA node.

This patchset adds the ability to try to allocate contiguous memory on
specific node. It will fallback to other nodes if the specified one
doesn't work.

Implement a new method for declaring contigous memory on particular node
and keep cma_declare_contiguous() as a wrapper.

Signed-off-by: Aslan Bakirov <aslan@fb.com>
---
 include/linux/cma.h      | 14 ++++++++++++--
 include/linux/memblock.h |  3 +++
 mm/cma.c                 | 16 +++++++++-------
 mm/memblock.c            |  2 +-
 4 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/include/linux/cma.h b/include/linux/cma.h
index 190184b5ff32..d64d1fe2c1f7 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -24,10 +24,20 @@ extern phys_addr_t cma_get_base(const struct cma *cma);
 extern unsigned long cma_get_size(const struct cma *cma);
 extern const char *cma_get_name(const struct cma *cma);
 
-extern int __init cma_declare_contiguous(phys_addr_t base,
+extern int __init cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
-			bool fixed, const char *name, struct cma **res_cma);
+			bool fixed, const char *name, struct cma **res_cma,
+			int nid);
+static inline int __init cma_declare_contiguous(phys_addr_t base,
+			phys_addr_t size, phys_addr_t limit,
+			phys_addr_t alignment, unsigned int order_per_bit,
+			bool fixed, const char *name, struct cma **res_cma)
+			{
+				return cma_declare_contiguous_nid(base, size,
+					limit, alignment, order_per_bit,
+					fixed, name, res_cma, NUMA_NO_NODE);
+			}
 extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 					unsigned int order_per_bit,
 					const char *name,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 079d17d96410..6bc37a731d27 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -348,6 +348,9 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
 
 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
 				      phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
+				      phys_addr_t align, phys_addr_t start,
+				      phys_addr_t end, int nid, bool exact_nid);
 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 
 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
diff --git a/mm/cma.c b/mm/cma.c
index be55d1988c67..a3c7bac1dcf2 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -220,7 +220,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
 }
 
 /**
- * cma_declare_contiguous() - reserve custom contiguous area
+ * cma_declare_contiguous_nid() - reserve custom contiguous area
  * @base: Base address of the reserved area optional, use 0 for any
  * @size: Size of the reserved area (in bytes),
  * @limit: End address of the reserved memory (optional, 0 for any).
@@ -229,6 +229,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * @fixed: hint about where to place the reserved area
  * @name: The name of the area. See function cma_init_reserved_mem()
  * @res_cma: Pointer to store the created cma region.
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  *
  * This function reserves memory from early allocator. It should be
  * called by arch specific code once the early allocator (memblock or bootmem)
@@ -238,10 +239,11 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
  * reserve in range from @base to @limit.
  */
-int __init cma_declare_contiguous(phys_addr_t base,
+int __init cma_declare_contiguous_nid(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
-			bool fixed, const char *name, struct cma **res_cma)
+			bool fixed, const char *name, struct cma **res_cma,
+			int nid)
 {
 	phys_addr_t memblock_end = memblock_end_of_DRAM();
 	phys_addr_t highmem_start;
@@ -336,14 +338,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
 		 * memory in case of failure.
 		 */
 		if (base < highmem_start && limit > highmem_start) {
-			addr = memblock_phys_alloc_range(size, alignment,
-							 highmem_start, limit);
+			addr = memblock_alloc_range_nid(size, alignment,
+				 highmem_start, limit, nid, false);
 			limit = highmem_start;
 		}
 
 		if (!addr) {
-			addr = memblock_phys_alloc_range(size, alignment, base,
-							 limit);
+			addr = memblock_alloc_range_nid(size, alignment, base,
+				 limit, nid, false);
 			if (!addr) {
 				ret = -ENOMEM;
 				goto err;
diff --git a/mm/memblock.c b/mm/memblock.c
index 4d06bbaded0f..c79ba6f9920c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1349,7 +1349,7 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
  * Return:
  * Physical address of allocated memory block on success, %0 on failure.
  */
-static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
+phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
 					phys_addr_t align, phys_addr_t start,
 					phys_addr_t end, int nid,
 					bool exact_nid)
-- 
2.24.1



^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-04-03 10:22 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-03  2:12 [PATCH v3] mm: cma: NUMA node interface Aslan Bakirov
2020-04-03  5:02 ` Ira Weiny
2020-04-03  9:51   ` Aslan Bakirov
2020-04-03 10:01     ` Michal Hocko
2020-04-03 10:21       ` Aslan Bakirov
  -- strict thread matches above, loose matches on Subject: below --
2020-04-02 23:22 Aslan Bakirov
2020-04-03  0:27 ` Matthew Wilcox
2020-04-03  2:19   ` Aslan Bakirov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).