All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-16  7:26 ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-16  7:26 UTC (permalink / raw)
  To: akpm, Kirill A. Shutemov, David Rientjes, Vlastimil Babka
  Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

This make sure that we try to allocate hugepages from local node if
allowed by mempolicy. If we can't, we fallback to small page allocation
based on mempolicy. This is based on the observation that allocating pages
on local node is more beneficial than allocating hugepages on remote node.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
Changes from V2:
* Rebase to latest linus tree (cb59670870d90ff8bc31f5f2efc407c6fe4938c0)

 include/linux/gfp.h |  4 ++++
 mm/huge_memory.c    | 24 +++++++++---------------
 mm/mempolicy.c      | 40 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 53 insertions(+), 15 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b840e3b2770d..60110e06419d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -335,11 +335,15 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr,
 			int node);
+extern struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
+				       unsigned long addr, int order);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
 #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
 	alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)			\
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 817a875f2b8c..031fb1584bbf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -766,15 +766,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 }
 
-static inline struct page *alloc_hugepage_vma(int defrag,
-					      struct vm_area_struct *vma,
-					      unsigned long haddr, int nd,
-					      gfp_t extra_gfp)
-{
-	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
-			       HPAGE_PMD_ORDER, vma, haddr, nd);
-}
-
 /* Caller must hold page table lock. */
 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
@@ -795,6 +786,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			       unsigned long address, pmd_t *pmd,
 			       unsigned int flags)
 {
+	gfp_t gfp;
 	struct page *page;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
 
@@ -829,8 +821,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		}
 		return 0;
 	}
-	page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-			vma, haddr, numa_node_id(), 0);
+	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
 	if (unlikely(!page)) {
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
@@ -1118,10 +1110,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	spin_unlock(ptl);
 alloc:
 	if (transparent_hugepage_enabled(vma) &&
-	    !transparent_hugepage_debug_cow())
-		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-					      vma, haddr, numa_node_id(), 0);
-	else
+	    !transparent_hugepage_debug_cow()) {
+		gfp_t gfp;
+
+		gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+		new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+	} else
 		new_page = NULL;
 
 	if (unlikely(!new_page)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e0961b8c39c..14604142c2c2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2030,6 +2030,46 @@ retry_cpuset:
 	return page;
 }
 
+struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
+				unsigned long addr, int order)
+{
+	struct page *page;
+	nodemask_t *nmask;
+	struct mempolicy *pol;
+	int node = numa_node_id();
+	unsigned int cpuset_mems_cookie;
+
+retry_cpuset:
+	pol = get_vma_policy(vma, addr);
+	cpuset_mems_cookie = read_mems_allowed_begin();
+
+	if (pol->mode != MPOL_INTERLEAVE) {
+		/*
+		 * For interleave policy, we don't worry about
+		 * current node. Otherwise if current node is
+		 * in nodemask, try to allocate hugepage from
+		 * current node. Don't fall back to other nodes
+		 * for THP.
+		 */
+		nmask = policy_nodemask(gfp, pol);
+		if (!nmask || node_isset(node, *nmask)) {
+			mpol_cond_put(pol);
+			page = alloc_pages_exact_node(node, gfp, order);
+			if (unlikely(!page &&
+				     read_mems_allowed_retry(cpuset_mems_cookie)))
+				goto retry_cpuset;
+			return page;
+		}
+	}
+	mpol_cond_put(pol);
+	/*
+	 * if current node is not part of node mask, try
+	 * the allocation from any node, and we can do retry
+	 * in that case.
+	 */
+	return alloc_pages_vma(gfp, order, vma, addr, node);
+}
+
 /**
  * 	alloc_pages_current - Allocate pages.
  *
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-16  7:26 ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-16  7:26 UTC (permalink / raw)
  To: akpm, Kirill A. Shutemov, David Rientjes, Vlastimil Babka
  Cc: linux-mm, linux-kernel, Aneesh Kumar K.V

This make sure that we try to allocate hugepages from local node if
allowed by mempolicy. If we can't, we fallback to small page allocation
based on mempolicy. This is based on the observation that allocating pages
on local node is more beneficial than allocating hugepages on remote node.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
Changes from V2:
* Rebase to latest linus tree (cb59670870d90ff8bc31f5f2efc407c6fe4938c0)

 include/linux/gfp.h |  4 ++++
 mm/huge_memory.c    | 24 +++++++++---------------
 mm/mempolicy.c      | 40 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 53 insertions(+), 15 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b840e3b2770d..60110e06419d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -335,11 +335,15 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr,
 			int node);
+extern struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
+				       unsigned long addr, int order);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
 #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
 	alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
 #define alloc_page_vma(gfp_mask, vma, addr)			\
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 817a875f2b8c..031fb1584bbf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -766,15 +766,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 }
 
-static inline struct page *alloc_hugepage_vma(int defrag,
-					      struct vm_area_struct *vma,
-					      unsigned long haddr, int nd,
-					      gfp_t extra_gfp)
-{
-	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
-			       HPAGE_PMD_ORDER, vma, haddr, nd);
-}
-
 /* Caller must hold page table lock. */
 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
@@ -795,6 +786,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			       unsigned long address, pmd_t *pmd,
 			       unsigned int flags)
 {
+	gfp_t gfp;
 	struct page *page;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
 
@@ -829,8 +821,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		}
 		return 0;
 	}
-	page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-			vma, haddr, numa_node_id(), 0);
+	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
 	if (unlikely(!page)) {
 		count_vm_event(THP_FAULT_FALLBACK);
 		return VM_FAULT_FALLBACK;
@@ -1118,10 +1110,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	spin_unlock(ptl);
 alloc:
 	if (transparent_hugepage_enabled(vma) &&
-	    !transparent_hugepage_debug_cow())
-		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-					      vma, haddr, numa_node_id(), 0);
-	else
+	    !transparent_hugepage_debug_cow()) {
+		gfp_t gfp;
+
+		gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
+		new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+	} else
 		new_page = NULL;
 
 	if (unlikely(!new_page)) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e0961b8c39c..14604142c2c2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2030,6 +2030,46 @@ retry_cpuset:
 	return page;
 }
 
+struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
+				unsigned long addr, int order)
+{
+	struct page *page;
+	nodemask_t *nmask;
+	struct mempolicy *pol;
+	int node = numa_node_id();
+	unsigned int cpuset_mems_cookie;
+
+retry_cpuset:
+	pol = get_vma_policy(vma, addr);
+	cpuset_mems_cookie = read_mems_allowed_begin();
+
+	if (pol->mode != MPOL_INTERLEAVE) {
+		/*
+		 * For interleave policy, we don't worry about
+		 * current node. Otherwise if current node is
+		 * in nodemask, try to allocate hugepage from
+		 * current node. Don't fall back to other nodes
+		 * for THP.
+		 */
+		nmask = policy_nodemask(gfp, pol);
+		if (!nmask || node_isset(node, *nmask)) {
+			mpol_cond_put(pol);
+			page = alloc_pages_exact_node(node, gfp, order);
+			if (unlikely(!page &&
+				     read_mems_allowed_retry(cpuset_mems_cookie)))
+				goto retry_cpuset;
+			return page;
+		}
+	}
+	mpol_cond_put(pol);
+	/*
+	 * if current node is not part of node mask, try
+	 * the allocation from any node, and we can do retry
+	 * in that case.
+	 */
+	return alloc_pages_vma(gfp, order, vma, addr, node);
+}
+
 /**
  * 	alloc_pages_current - Allocate pages.
  *
-- 
2.1.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-16  7:26 ` Aneesh Kumar K.V
@ 2015-01-16 12:27   ` Kirill A. Shutemov
  -1 siblings, 0 replies; 22+ messages in thread
From: Kirill A. Shutemov @ 2015-01-16 12:27 UTC (permalink / raw)
  To: Aneesh Kumar K.V
  Cc: akpm, David Rientjes, Vlastimil Babka, linux-mm, linux-kernel

On Fri, Jan 16, 2015 at 12:56:36PM +0530, Aneesh Kumar K.V wrote:
> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Looks good to me.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-16 12:27   ` Kirill A. Shutemov
  0 siblings, 0 replies; 22+ messages in thread
From: Kirill A. Shutemov @ 2015-01-16 12:27 UTC (permalink / raw)
  To: Aneesh Kumar K.V
  Cc: akpm, David Rientjes, Vlastimil Babka, linux-mm, linux-kernel

On Fri, Jan 16, 2015 at 12:56:36PM +0530, Aneesh Kumar K.V wrote:
> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Looks good to me.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

-- 
 Kirill A. Shutemov

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-16  7:26 ` Aneesh Kumar K.V
@ 2015-01-16 20:01   ` Vlastimil Babka
  -1 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-16 20:01 UTC (permalink / raw)
  To: Aneesh Kumar K.V, akpm, Kirill A. Shutemov, David Rientjes
  Cc: linux-mm, linux-kernel

On 01/16/2015 08:26 AM, Aneesh Kumar K.V wrote:
> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
> Changes from V2:
> * Rebase to latest linus tree (cb59670870d90ff8bc31f5f2efc407c6fe4938c0)
> 
>  include/linux/gfp.h |  4 ++++
>  mm/huge_memory.c    | 24 +++++++++---------------
>  mm/mempolicy.c      | 40 ++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 15 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index b840e3b2770d..60110e06419d 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -335,11 +335,15 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
>  extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
>  			struct vm_area_struct *vma, unsigned long addr,
>  			int node);
> +extern struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				       unsigned long addr, int order);
>  #else
>  #define alloc_pages(gfp_mask, order) \
>  		alloc_pages_node(numa_node_id(), gfp_mask, order)
>  #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
>  	alloc_pages(gfp_mask, order)
> +#define alloc_hugepage_vma(gfp_mask, vma, addr, order)	\
> +	alloc_pages(gfp_mask, order)
>  #endif
>  #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
>  #define alloc_page_vma(gfp_mask, vma, addr)			\
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 817a875f2b8c..031fb1584bbf 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -766,15 +766,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
>  	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
>  }
>  
> -static inline struct page *alloc_hugepage_vma(int defrag,
> -					      struct vm_area_struct *vma,
> -					      unsigned long haddr, int nd,
> -					      gfp_t extra_gfp)
> -{
> -	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
> -			       HPAGE_PMD_ORDER, vma, haddr, nd);
> -}
> -
>  /* Caller must hold page table lock. */
>  static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
>  		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
> @@ -795,6 +786,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  			       unsigned long address, pmd_t *pmd,
>  			       unsigned int flags)
>  {
> +	gfp_t gfp;
>  	struct page *page;
>  	unsigned long haddr = address & HPAGE_PMD_MASK;
>  
> @@ -829,8 +821,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  		}
>  		return 0;
>  	}
> -	page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
> -			vma, haddr, numa_node_id(), 0);
> +	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
> +	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
>  	if (unlikely(!page)) {
>  		count_vm_event(THP_FAULT_FALLBACK);
>  		return VM_FAULT_FALLBACK;
> @@ -1118,10 +1110,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  	spin_unlock(ptl);
>  alloc:
>  	if (transparent_hugepage_enabled(vma) &&
> -	    !transparent_hugepage_debug_cow())
> -		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
> -					      vma, haddr, numa_node_id(), 0);
> -	else
> +	    !transparent_hugepage_debug_cow()) {
> +		gfp_t gfp;
> +
> +		gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
> +		new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> +	} else
>  		new_page = NULL;
>  
>  	if (unlikely(!new_page)) {
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 0e0961b8c39c..14604142c2c2 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2030,6 +2030,46 @@ retry_cpuset:
>  	return page;
>  }
>  
> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				unsigned long addr, int order)
> +{
> +	struct page *page;
> +	nodemask_t *nmask;
> +	struct mempolicy *pol;
> +	int node = numa_node_id();
> +	unsigned int cpuset_mems_cookie;
> +
> +retry_cpuset:
> +	pol = get_vma_policy(vma, addr);
> +	cpuset_mems_cookie = read_mems_allowed_begin();
> +
> +	if (pol->mode != MPOL_INTERLEAVE) {
> +		/*
> +		 * For interleave policy, we don't worry about
> +		 * current node. Otherwise if current node is
> +		 * in nodemask, try to allocate hugepage from
> +		 * current node. Don't fall back to other nodes
> +		 * for THP.
> +		 */
> +		nmask = policy_nodemask(gfp, pol);
> +		if (!nmask || node_isset(node, *nmask)) {
> +			mpol_cond_put(pol);
> +			page = alloc_pages_exact_node(node, gfp, order);
> +			if (unlikely(!page &&
> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> +				goto retry_cpuset;
> +			return page;
> +		}
> +	}
> +	mpol_cond_put(pol);
> +	/*
> +	 * if current node is not part of node mask, try
> +	 * the allocation from any node, and we can do retry
> +	 * in that case.
> +	 */
> +	return alloc_pages_vma(gfp, order, vma, addr, node);
> +}
> +
>  /**
>   * 	alloc_pages_current - Allocate pages.
>   *
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-16 20:01   ` Vlastimil Babka
  0 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-16 20:01 UTC (permalink / raw)
  To: Aneesh Kumar K.V, akpm, Kirill A. Shutemov, David Rientjes
  Cc: linux-mm, linux-kernel

On 01/16/2015 08:26 AM, Aneesh Kumar K.V wrote:
> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
> Changes from V2:
> * Rebase to latest linus tree (cb59670870d90ff8bc31f5f2efc407c6fe4938c0)
> 
>  include/linux/gfp.h |  4 ++++
>  mm/huge_memory.c    | 24 +++++++++---------------
>  mm/mempolicy.c      | 40 ++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 53 insertions(+), 15 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index b840e3b2770d..60110e06419d 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -335,11 +335,15 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
>  extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
>  			struct vm_area_struct *vma, unsigned long addr,
>  			int node);
> +extern struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				       unsigned long addr, int order);
>  #else
>  #define alloc_pages(gfp_mask, order) \
>  		alloc_pages_node(numa_node_id(), gfp_mask, order)
>  #define alloc_pages_vma(gfp_mask, order, vma, addr, node)	\
>  	alloc_pages(gfp_mask, order)
> +#define alloc_hugepage_vma(gfp_mask, vma, addr, order)	\
> +	alloc_pages(gfp_mask, order)
>  #endif
>  #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
>  #define alloc_page_vma(gfp_mask, vma, addr)			\
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 817a875f2b8c..031fb1584bbf 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -766,15 +766,6 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
>  	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
>  }
>  
> -static inline struct page *alloc_hugepage_vma(int defrag,
> -					      struct vm_area_struct *vma,
> -					      unsigned long haddr, int nd,
> -					      gfp_t extra_gfp)
> -{
> -	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
> -			       HPAGE_PMD_ORDER, vma, haddr, nd);
> -}
> -
>  /* Caller must hold page table lock. */
>  static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
>  		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
> @@ -795,6 +786,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  			       unsigned long address, pmd_t *pmd,
>  			       unsigned int flags)
>  {
> +	gfp_t gfp;
>  	struct page *page;
>  	unsigned long haddr = address & HPAGE_PMD_MASK;
>  
> @@ -829,8 +821,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  		}
>  		return 0;
>  	}
> -	page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
> -			vma, haddr, numa_node_id(), 0);
> +	gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
> +	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
>  	if (unlikely(!page)) {
>  		count_vm_event(THP_FAULT_FALLBACK);
>  		return VM_FAULT_FALLBACK;
> @@ -1118,10 +1110,12 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
>  	spin_unlock(ptl);
>  alloc:
>  	if (transparent_hugepage_enabled(vma) &&
> -	    !transparent_hugepage_debug_cow())
> -		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
> -					      vma, haddr, numa_node_id(), 0);
> -	else
> +	    !transparent_hugepage_debug_cow()) {
> +		gfp_t gfp;
> +
> +		gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
> +		new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
> +	} else
>  		new_page = NULL;
>  
>  	if (unlikely(!new_page)) {
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 0e0961b8c39c..14604142c2c2 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2030,6 +2030,46 @@ retry_cpuset:
>  	return page;
>  }
>  
> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				unsigned long addr, int order)
> +{
> +	struct page *page;
> +	nodemask_t *nmask;
> +	struct mempolicy *pol;
> +	int node = numa_node_id();
> +	unsigned int cpuset_mems_cookie;
> +
> +retry_cpuset:
> +	pol = get_vma_policy(vma, addr);
> +	cpuset_mems_cookie = read_mems_allowed_begin();
> +
> +	if (pol->mode != MPOL_INTERLEAVE) {
> +		/*
> +		 * For interleave policy, we don't worry about
> +		 * current node. Otherwise if current node is
> +		 * in nodemask, try to allocate hugepage from
> +		 * current node. Don't fall back to other nodes
> +		 * for THP.
> +		 */
> +		nmask = policy_nodemask(gfp, pol);
> +		if (!nmask || node_isset(node, *nmask)) {
> +			mpol_cond_put(pol);
> +			page = alloc_pages_exact_node(node, gfp, order);
> +			if (unlikely(!page &&
> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> +				goto retry_cpuset;
> +			return page;
> +		}
> +	}
> +	mpol_cond_put(pol);
> +	/*
> +	 * if current node is not part of node mask, try
> +	 * the allocation from any node, and we can do retry
> +	 * in that case.
> +	 */
> +	return alloc_pages_vma(gfp, order, vma, addr, node);
> +}
> +
>  /**
>   * 	alloc_pages_current - Allocate pages.
>   *
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-16  7:26 ` Aneesh Kumar K.V
@ 2015-01-17  0:02   ` Andrew Morton
  -1 siblings, 0 replies; 22+ messages in thread
From: Andrew Morton @ 2015-01-17  0:02 UTC (permalink / raw)
  To: Aneesh Kumar K.V
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:

> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.

The changelog is a bit incomplete.  It doesn't describe the current
behaviour, nor what is wrong with it.  What are the before-and-after
effects of this change?

And what might be the user-visible effects?

> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2030,6 +2030,46 @@ retry_cpuset:
>  	return page;
>  }
>  
> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				unsigned long addr, int order)

alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
documented at all.  This makes it a bit had for readers to work out the
difference!

Is it possible to scrunch them both into the same function?  Probably
too messy?

> +{
> +	struct page *page;
> +	nodemask_t *nmask;
> +	struct mempolicy *pol;
> +	int node = numa_node_id();
> +	unsigned int cpuset_mems_cookie;
> +
> +retry_cpuset:
> +	pol = get_vma_policy(vma, addr);
> +	cpuset_mems_cookie = read_mems_allowed_begin();
> +
> +	if (pol->mode != MPOL_INTERLEAVE) {
> +		/*
> +		 * For interleave policy, we don't worry about
> +		 * current node. Otherwise if current node is
> +		 * in nodemask, try to allocate hugepage from
> +		 * current node. Don't fall back to other nodes
> +		 * for THP.
> +		 */

This code isn't "interleave policy".  It's everything *but* interleave
policy.  Comment makes no sense!

> +		nmask = policy_nodemask(gfp, pol);
> +		if (!nmask || node_isset(node, *nmask)) {
> +			mpol_cond_put(pol);
> +			page = alloc_pages_exact_node(node, gfp, order);
> +			if (unlikely(!page &&
> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> +				goto retry_cpuset;
> +			return page;
> +		}
> +	}
> +	mpol_cond_put(pol);
> +	/*
> +	 * if current node is not part of node mask, try
> +	 * the allocation from any node, and we can do retry
> +	 * in that case.
> +	 */
> +	return alloc_pages_vma(gfp, order, vma, addr, node);
> +}


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-17  0:02   ` Andrew Morton
  0 siblings, 0 replies; 22+ messages in thread
From: Andrew Morton @ 2015-01-17  0:02 UTC (permalink / raw)
  To: Aneesh Kumar K.V
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:

> This make sure that we try to allocate hugepages from local node if
> allowed by mempolicy. If we can't, we fallback to small page allocation
> based on mempolicy. This is based on the observation that allocating pages
> on local node is more beneficial than allocating hugepages on remote node.

The changelog is a bit incomplete.  It doesn't describe the current
behaviour, nor what is wrong with it.  What are the before-and-after
effects of this change?

And what might be the user-visible effects?

> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2030,6 +2030,46 @@ retry_cpuset:
>  	return page;
>  }
>  
> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> +				unsigned long addr, int order)

alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
documented at all.  This makes it a bit had for readers to work out the
difference!

Is it possible to scrunch them both into the same function?  Probably
too messy?

> +{
> +	struct page *page;
> +	nodemask_t *nmask;
> +	struct mempolicy *pol;
> +	int node = numa_node_id();
> +	unsigned int cpuset_mems_cookie;
> +
> +retry_cpuset:
> +	pol = get_vma_policy(vma, addr);
> +	cpuset_mems_cookie = read_mems_allowed_begin();
> +
> +	if (pol->mode != MPOL_INTERLEAVE) {
> +		/*
> +		 * For interleave policy, we don't worry about
> +		 * current node. Otherwise if current node is
> +		 * in nodemask, try to allocate hugepage from
> +		 * current node. Don't fall back to other nodes
> +		 * for THP.
> +		 */

This code isn't "interleave policy".  It's everything *but* interleave
policy.  Comment makes no sense!

> +		nmask = policy_nodemask(gfp, pol);
> +		if (!nmask || node_isset(node, *nmask)) {
> +			mpol_cond_put(pol);
> +			page = alloc_pages_exact_node(node, gfp, order);
> +			if (unlikely(!page &&
> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> +				goto retry_cpuset;
> +			return page;
> +		}
> +	}
> +	mpol_cond_put(pol);
> +	/*
> +	 * if current node is not part of node mask, try
> +	 * the allocation from any node, and we can do retry
> +	 * in that case.
> +	 */
> +	return alloc_pages_vma(gfp, order, vma, addr, node);
> +}

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-17  0:02   ` Andrew Morton
@ 2015-01-17  7:15     ` Davidlohr Bueso
  -1 siblings, 0 replies; 22+ messages in thread
From: Davidlohr Bueso @ 2015-01-17  7:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Aneesh Kumar K.V, Kirill A. Shutemov, David Rientjes,
	Vlastimil Babka, linux-mm, linux-kernel

On Fri, 2015-01-16 at 16:02 -0800, Andrew Morton wrote:
> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
> 
> > This make sure that we try to allocate hugepages from local node if
> > allowed by mempolicy. If we can't, we fallback to small page allocation
> > based on mempolicy. This is based on the observation that allocating pages
> > on local node is more beneficial than allocating hugepages on remote node.
> 
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
> 
> And what might be the user-visible effects?

I'd be interested in any performance data. I'll run this by a 4 node box
next week.

> 
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -2030,6 +2030,46 @@ retry_cpuset:
> >  	return page;
> >  }
> >  
> > +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> > +				unsigned long addr, int order)
> 
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
> 
> Is it possible to scrunch them both into the same function?  Probably
> too messy?
> 
> > +{
> > +	struct page *page;
> > +	nodemask_t *nmask;
> > +	struct mempolicy *pol;
> > +	int node = numa_node_id();
> > +	unsigned int cpuset_mems_cookie;
> > +
> > +retry_cpuset:
> > +	pol = get_vma_policy(vma, addr);
> > +	cpuset_mems_cookie = read_mems_allowed_begin();
> > +
> > +	if (pol->mode != MPOL_INTERLEAVE) {
> > +		/*
> > +		 * For interleave policy, we don't worry about
> > +		 * current node. Otherwise if current node is
> > +		 * in nodemask, try to allocate hugepage from
> > +		 * current node. Don't fall back to other nodes
> > +		 * for THP.
> > +		 */
> 
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!

May I add that, while a nit, this indentation is quite ugly:

> 
> > +		nmask = policy_nodemask(gfp, pol);
> > +		if (!nmask || node_isset(node, *nmask)) {
> > +			mpol_cond_put(pol);
> > +			page = alloc_pages_exact_node(node, gfp, order);
> > +			if (unlikely(!page &&
> > +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> > +				goto retry_cpuset;
> > +			return page;
> > +		}
> > +	}

Improving it makes the code visually easier on the eye. So this should
be considered if another re-spin of the patch is to be done anyway. Just
jump to the mpol refcounting and be done when 'pol->mode ==
MPOL_INTERLEAVE'.

Thanks,
Davidlohr


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-17  7:15     ` Davidlohr Bueso
  0 siblings, 0 replies; 22+ messages in thread
From: Davidlohr Bueso @ 2015-01-17  7:15 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Aneesh Kumar K.V, Kirill A. Shutemov, David Rientjes,
	Vlastimil Babka, linux-mm, linux-kernel

On Fri, 2015-01-16 at 16:02 -0800, Andrew Morton wrote:
> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
> 
> > This make sure that we try to allocate hugepages from local node if
> > allowed by mempolicy. If we can't, we fallback to small page allocation
> > based on mempolicy. This is based on the observation that allocating pages
> > on local node is more beneficial than allocating hugepages on remote node.
> 
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
> 
> And what might be the user-visible effects?

I'd be interested in any performance data. I'll run this by a 4 node box
next week.

> 
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -2030,6 +2030,46 @@ retry_cpuset:
> >  	return page;
> >  }
> >  
> > +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
> > +				unsigned long addr, int order)
> 
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
> 
> Is it possible to scrunch them both into the same function?  Probably
> too messy?
> 
> > +{
> > +	struct page *page;
> > +	nodemask_t *nmask;
> > +	struct mempolicy *pol;
> > +	int node = numa_node_id();
> > +	unsigned int cpuset_mems_cookie;
> > +
> > +retry_cpuset:
> > +	pol = get_vma_policy(vma, addr);
> > +	cpuset_mems_cookie = read_mems_allowed_begin();
> > +
> > +	if (pol->mode != MPOL_INTERLEAVE) {
> > +		/*
> > +		 * For interleave policy, we don't worry about
> > +		 * current node. Otherwise if current node is
> > +		 * in nodemask, try to allocate hugepage from
> > +		 * current node. Don't fall back to other nodes
> > +		 * for THP.
> > +		 */
> 
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!

May I add that, while a nit, this indentation is quite ugly:

> 
> > +		nmask = policy_nodemask(gfp, pol);
> > +		if (!nmask || node_isset(node, *nmask)) {
> > +			mpol_cond_put(pol);
> > +			page = alloc_pages_exact_node(node, gfp, order);
> > +			if (unlikely(!page &&
> > +				     read_mems_allowed_retry(cpuset_mems_cookie)))
> > +				goto retry_cpuset;
> > +			return page;
> > +		}
> > +	}

Improving it makes the code visually easier on the eye. So this should
be considered if another re-spin of the patch is to be done anyway. Just
jump to the mpol refcounting and be done when 'pol->mode ==
MPOL_INTERLEAVE'.

Thanks,
Davidlohr

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-17  0:02   ` Andrew Morton
@ 2015-01-18 15:48     ` Aneesh Kumar K.V
  -1 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-18 15:48 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

Andrew Morton <akpm@linux-foundation.org> writes:

> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>
>> This make sure that we try to allocate hugepages from local node if
>> allowed by mempolicy. If we can't, we fallback to small page allocation
>> based on mempolicy. This is based on the observation that allocating pages
>> on local node is more beneficial than allocating hugepages on remote node.
>
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
>
> And what might be the user-visible effects?

How about ?

With this patch applied we may find transparent huge page allocation
failures if the current node doesn't have enough freee hugepages.
Before this patch such failures result in us retrying the allocation on
other nodes in the numa node mask.


>
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>  	return page;
>>  }
>>  
>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> +				unsigned long addr, int order)
>
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
>
> Is it possible to scrunch them both into the same function?  Probably
> too messy?


/**
 * alloc_hugepage_vma: Allocate a hugepage for a VMA
 * @gfp:
 *   %GFP_USER	  user allocation.
 *   %GFP_KERNEL  kernel allocations,
 *   %GFP_HIGHMEM highmem/user allocations,
 *   %GFP_FS	  allocation should not call back into a file system.
 *   %GFP_ATOMIC  don't sleep.
 *
 * @vma:   Pointer to VMA or NULL if not available.
 * @addr:  Virtual Address of the allocation. Must be inside the VMA.
 * @order: Order of the hugepage for gfp allocation.
 *
 * This functions allocate a huge page from the kernel page pool and applies
 * a NUMA policy associated with the VMA or the current process.
 * For policy other than %MPOL_INTERLEAVE, we make sure we allocate hugepage
 * only from the current node if the current node is part of the node mask.
 * If we can't allocate a hugepage we fail the allocation and don' try to fallback
 * to other nodes in the node mask. If the current node is not part of node mask
 * or if the NUMA policy is MPOL_INTERLEAVE we use the allocator that can
 * fallback to nodes in the policy node mask.
 *
 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
 * mm_struct of the VMA to prevent it from going away. Should be used for
 * all allocations for pages that will be mapped into
 * user space. Returns NULL when no page can be allocated.
 *
 * Should be called with the mm_sem of the vma hold.
 */

>
>> +{
>> +	struct page *page;
>> +	nodemask_t *nmask;
>> +	struct mempolicy *pol;
>> +	int node = numa_node_id();
>> +	unsigned int cpuset_mems_cookie;
>> +
>> +retry_cpuset:
>> +	pol = get_vma_policy(vma, addr);
>> +	cpuset_mems_cookie = read_mems_allowed_begin();
>> +
>> +	if (pol->mode != MPOL_INTERLEAVE) {
>> +		/*
>> +		 * For interleave policy, we don't worry about
>> +		 * current node. Otherwise if current node is
>> +		 * in nodemask, try to allocate hugepage from
>> +		 * current node. Don't fall back to other nodes
>> +		 * for THP.
>> +		 */
>
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!
>

I moved the comment before the if check

struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
				unsigned long addr, int order)
{
	struct page *page;
	nodemask_t *nmask;
	struct mempolicy *pol;
	int node = numa_node_id();
	unsigned int cpuset_mems_cookie;

retry_cpuset:
	pol = get_vma_policy(vma, addr);
	cpuset_mems_cookie = read_mems_allowed_begin();
	/*
	 * For interleave policy, we don't worry about
	 * current node. Otherwise if current node is
	 * in nodemask, try to allocate hugepage from
	 * the current node. Don't fall back to other nodes
	 * for THP.
	 */
	if (pol->mode == MPOL_INTERLEAVE)
		goto alloc_with_fallback;
	nmask = policy_nodemask(gfp, pol);
	if (!nmask || node_isset(node, *nmask)) {
		mpol_cond_put(pol);
		page = alloc_pages_exact_node(node, gfp, order);
		if (unlikely(!page &&
			     read_mems_allowed_retry(cpuset_mems_cookie)))
			goto retry_cpuset;
		return page;
	}
alloc_with_fallback:
	mpol_cond_put(pol);
	/*
	 * if current node is not part of node mask, try
	 * the allocation from any node, and we can do retry
	 * in that case.
	 */
	return alloc_pages_vma(gfp, order, vma, addr, node);
}


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-18 15:48     ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-18 15:48 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

Andrew Morton <akpm@linux-foundation.org> writes:

> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>
>> This make sure that we try to allocate hugepages from local node if
>> allowed by mempolicy. If we can't, we fallback to small page allocation
>> based on mempolicy. This is based on the observation that allocating pages
>> on local node is more beneficial than allocating hugepages on remote node.
>
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
>
> And what might be the user-visible effects?

How about ?

With this patch applied we may find transparent huge page allocation
failures if the current node doesn't have enough freee hugepages.
Before this patch such failures result in us retrying the allocation on
other nodes in the numa node mask.


>
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>  	return page;
>>  }
>>  
>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> +				unsigned long addr, int order)
>
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
>
> Is it possible to scrunch them both into the same function?  Probably
> too messy?


/**
 * alloc_hugepage_vma: Allocate a hugepage for a VMA
 * @gfp:
 *   %GFP_USER	  user allocation.
 *   %GFP_KERNEL  kernel allocations,
 *   %GFP_HIGHMEM highmem/user allocations,
 *   %GFP_FS	  allocation should not call back into a file system.
 *   %GFP_ATOMIC  don't sleep.
 *
 * @vma:   Pointer to VMA or NULL if not available.
 * @addr:  Virtual Address of the allocation. Must be inside the VMA.
 * @order: Order of the hugepage for gfp allocation.
 *
 * This functions allocate a huge page from the kernel page pool and applies
 * a NUMA policy associated with the VMA or the current process.
 * For policy other than %MPOL_INTERLEAVE, we make sure we allocate hugepage
 * only from the current node if the current node is part of the node mask.
 * If we can't allocate a hugepage we fail the allocation and don' try to fallback
 * to other nodes in the node mask. If the current node is not part of node mask
 * or if the NUMA policy is MPOL_INTERLEAVE we use the allocator that can
 * fallback to nodes in the policy node mask.
 *
 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
 * mm_struct of the VMA to prevent it from going away. Should be used for
 * all allocations for pages that will be mapped into
 * user space. Returns NULL when no page can be allocated.
 *
 * Should be called with the mm_sem of the vma hold.
 */

>
>> +{
>> +	struct page *page;
>> +	nodemask_t *nmask;
>> +	struct mempolicy *pol;
>> +	int node = numa_node_id();
>> +	unsigned int cpuset_mems_cookie;
>> +
>> +retry_cpuset:
>> +	pol = get_vma_policy(vma, addr);
>> +	cpuset_mems_cookie = read_mems_allowed_begin();
>> +
>> +	if (pol->mode != MPOL_INTERLEAVE) {
>> +		/*
>> +		 * For interleave policy, we don't worry about
>> +		 * current node. Otherwise if current node is
>> +		 * in nodemask, try to allocate hugepage from
>> +		 * current node. Don't fall back to other nodes
>> +		 * for THP.
>> +		 */
>
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!
>

I moved the comment before the if check

struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
				unsigned long addr, int order)
{
	struct page *page;
	nodemask_t *nmask;
	struct mempolicy *pol;
	int node = numa_node_id();
	unsigned int cpuset_mems_cookie;

retry_cpuset:
	pol = get_vma_policy(vma, addr);
	cpuset_mems_cookie = read_mems_allowed_begin();
	/*
	 * For interleave policy, we don't worry about
	 * current node. Otherwise if current node is
	 * in nodemask, try to allocate hugepage from
	 * the current node. Don't fall back to other nodes
	 * for THP.
	 */
	if (pol->mode == MPOL_INTERLEAVE)
		goto alloc_with_fallback;
	nmask = policy_nodemask(gfp, pol);
	if (!nmask || node_isset(node, *nmask)) {
		mpol_cond_put(pol);
		page = alloc_pages_exact_node(node, gfp, order);
		if (unlikely(!page &&
			     read_mems_allowed_retry(cpuset_mems_cookie)))
			goto retry_cpuset;
		return page;
	}
alloc_with_fallback:
	mpol_cond_put(pol);
	/*
	 * if current node is not part of node mask, try
	 * the allocation from any node, and we can do retry
	 * in that case.
	 */
	return alloc_pages_vma(gfp, order, vma, addr, node);
}

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-17  7:15     ` Davidlohr Bueso
@ 2015-01-18 15:50       ` Aneesh Kumar K.V
  -1 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-18 15:50 UTC (permalink / raw)
  To: Davidlohr Bueso, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

Davidlohr Bueso <dave@stgolabs.net> writes:

> On Fri, 2015-01-16 at 16:02 -0800, Andrew Morton wrote:
>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> 
>> > This make sure that we try to allocate hugepages from local node if
>> > allowed by mempolicy. If we can't, we fallback to small page allocation
>> > based on mempolicy. This is based on the observation that allocating pages
>> > on local node is more beneficial than allocating hugepages on remote node.
>> 
>> The changelog is a bit incomplete.  It doesn't describe the current
>> behaviour, nor what is wrong with it.  What are the before-and-after
>> effects of this change?
>> 
>> And what might be the user-visible effects?
>
> I'd be interested in any performance data. I'll run this by a 4 node box
> next week.


Thanks.

>
>> 
>> > --- a/mm/mempolicy.c
>> > +++ b/mm/mempolicy.c
>> > @@ -2030,6 +2030,46 @@ retry_cpuset:
>> >  	return page;
>> >  }
>> >  
>> > +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> > +				unsigned long addr, int order)
>> 
>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>> documented at all.  This makes it a bit had for readers to work out the
>> difference!
>> 
>> Is it possible to scrunch them both into the same function?  Probably
>> too messy?
>> 
>> > +{
>> > +	struct page *page;
>> > +	nodemask_t *nmask;
>> > +	struct mempolicy *pol;
>> > +	int node = numa_node_id();
>> > +	unsigned int cpuset_mems_cookie;
>> > +
>> > +retry_cpuset:
>> > +	pol = get_vma_policy(vma, addr);
>> > +	cpuset_mems_cookie = read_mems_allowed_begin();
>> > +
>> > +	if (pol->mode != MPOL_INTERLEAVE) {
>> > +		/*
>> > +		 * For interleave policy, we don't worry about
>> > +		 * current node. Otherwise if current node is
>> > +		 * in nodemask, try to allocate hugepage from
>> > +		 * current node. Don't fall back to other nodes
>> > +		 * for THP.
>> > +		 */
>> 
>> This code isn't "interleave policy".  It's everything *but* interleave
>> policy.  Comment makes no sense!
>
> May I add that, while a nit, this indentation is quite ugly:

I updated that and replied here
http://article.gmane.org/gmane.linux.kernel/1868545. Let me know what you think.
>
>> 
>> > +		nmask = policy_nodemask(gfp, pol);
>> > +		if (!nmask || node_isset(node, *nmask)) {
>> > +			mpol_cond_put(pol);
>> > +			page = alloc_pages_exact_node(node, gfp, order);
>> > +			if (unlikely(!page &&
>> > +				     read_mems_allowed_retry(cpuset_mems_cookie)))
>> > +				goto retry_cpuset;
>> > +			return page;
>> > +		}
>> > +	}
>
> Improving it makes the code visually easier on the eye. So this should
> be considered if another re-spin of the patch is to be done anyway. Just
> jump to the mpol refcounting and be done when 'pol->mode ==
> MPOL_INTERLEAVE'.
>


-aneesh


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-18 15:50       ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-18 15:50 UTC (permalink / raw)
  To: Davidlohr Bueso, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, Vlastimil Babka, linux-mm,
	linux-kernel

Davidlohr Bueso <dave@stgolabs.net> writes:

> On Fri, 2015-01-16 at 16:02 -0800, Andrew Morton wrote:
>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> 
>> > This make sure that we try to allocate hugepages from local node if
>> > allowed by mempolicy. If we can't, we fallback to small page allocation
>> > based on mempolicy. This is based on the observation that allocating pages
>> > on local node is more beneficial than allocating hugepages on remote node.
>> 
>> The changelog is a bit incomplete.  It doesn't describe the current
>> behaviour, nor what is wrong with it.  What are the before-and-after
>> effects of this change?
>> 
>> And what might be the user-visible effects?
>
> I'd be interested in any performance data. I'll run this by a 4 node box
> next week.


Thanks.

>
>> 
>> > --- a/mm/mempolicy.c
>> > +++ b/mm/mempolicy.c
>> > @@ -2030,6 +2030,46 @@ retry_cpuset:
>> >  	return page;
>> >  }
>> >  
>> > +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> > +				unsigned long addr, int order)
>> 
>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>> documented at all.  This makes it a bit had for readers to work out the
>> difference!
>> 
>> Is it possible to scrunch them both into the same function?  Probably
>> too messy?
>> 
>> > +{
>> > +	struct page *page;
>> > +	nodemask_t *nmask;
>> > +	struct mempolicy *pol;
>> > +	int node = numa_node_id();
>> > +	unsigned int cpuset_mems_cookie;
>> > +
>> > +retry_cpuset:
>> > +	pol = get_vma_policy(vma, addr);
>> > +	cpuset_mems_cookie = read_mems_allowed_begin();
>> > +
>> > +	if (pol->mode != MPOL_INTERLEAVE) {
>> > +		/*
>> > +		 * For interleave policy, we don't worry about
>> > +		 * current node. Otherwise if current node is
>> > +		 * in nodemask, try to allocate hugepage from
>> > +		 * current node. Don't fall back to other nodes
>> > +		 * for THP.
>> > +		 */
>> 
>> This code isn't "interleave policy".  It's everything *but* interleave
>> policy.  Comment makes no sense!
>
> May I add that, while a nit, this indentation is quite ugly:

I updated that and replied here
http://article.gmane.org/gmane.linux.kernel/1868545. Let me know what you think.
>
>> 
>> > +		nmask = policy_nodemask(gfp, pol);
>> > +		if (!nmask || node_isset(node, *nmask)) {
>> > +			mpol_cond_put(pol);
>> > +			page = alloc_pages_exact_node(node, gfp, order);
>> > +			if (unlikely(!page &&
>> > +				     read_mems_allowed_retry(cpuset_mems_cookie)))
>> > +				goto retry_cpuset;
>> > +			return page;
>> > +		}
>> > +	}
>
> Improving it makes the code visually easier on the eye. So this should
> be considered if another re-spin of the patch is to be done anyway. Just
> jump to the mpol refcounting and be done when 'pol->mode ==
> MPOL_INTERLEAVE'.
>


-aneesh

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-17  0:02   ` Andrew Morton
@ 2015-01-19 16:27     ` Vlastimil Babka
  -1 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-19 16:27 UTC (permalink / raw)
  To: Andrew Morton, Aneesh Kumar K.V
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/17/2015 01:02 AM, Andrew Morton wrote:
> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
> 
>> This make sure that we try to allocate hugepages from local node if
>> allowed by mempolicy. If we can't, we fallback to small page allocation
>> based on mempolicy. This is based on the observation that allocating pages
>> on local node is more beneficial than allocating hugepages on remote node.
> 
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
> 
> And what might be the user-visible effects?
> 
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>  	return page;
>>  }
>>  
>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> +				unsigned long addr, int order)
> 
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
> 
> Is it possible to scrunch them both into the same function?  Probably
> too messy?

Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
just put the THP specialities into an "else if (huge_page)" part there?

You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
does. There might be false positives theoretically, but is there anything else
that would use these flags and not be a THP?



>> +{
>> +	struct page *page;
>> +	nodemask_t *nmask;
>> +	struct mempolicy *pol;
>> +	int node = numa_node_id();
>> +	unsigned int cpuset_mems_cookie;
>> +
>> +retry_cpuset:
>> +	pol = get_vma_policy(vma, addr);
>> +	cpuset_mems_cookie = read_mems_allowed_begin();
>> +
>> +	if (pol->mode != MPOL_INTERLEAVE) {
>> +		/*
>> +		 * For interleave policy, we don't worry about
>> +		 * current node. Otherwise if current node is
>> +		 * in nodemask, try to allocate hugepage from
>> +		 * current node. Don't fall back to other nodes
>> +		 * for THP.
>> +		 */
> 
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!
> 
>> +		nmask = policy_nodemask(gfp, pol);
>> +		if (!nmask || node_isset(node, *nmask)) {
>> +			mpol_cond_put(pol);
>> +			page = alloc_pages_exact_node(node, gfp, order);
>> +			if (unlikely(!page &&
>> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
>> +				goto retry_cpuset;
>> +			return page;
>> +		}
>> +	}
>> +	mpol_cond_put(pol);
>> +	/*
>> +	 * if current node is not part of node mask, try
>> +	 * the allocation from any node, and we can do retry
>> +	 * in that case.
>> +	 */
>> +	return alloc_pages_vma(gfp, order, vma, addr, node);
>> +}
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-19 16:27     ` Vlastimil Babka
  0 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-19 16:27 UTC (permalink / raw)
  To: Andrew Morton, Aneesh Kumar K.V
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/17/2015 01:02 AM, Andrew Morton wrote:
> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
> 
>> This make sure that we try to allocate hugepages from local node if
>> allowed by mempolicy. If we can't, we fallback to small page allocation
>> based on mempolicy. This is based on the observation that allocating pages
>> on local node is more beneficial than allocating hugepages on remote node.
> 
> The changelog is a bit incomplete.  It doesn't describe the current
> behaviour, nor what is wrong with it.  What are the before-and-after
> effects of this change?
> 
> And what might be the user-visible effects?
> 
>> --- a/mm/mempolicy.c
>> +++ b/mm/mempolicy.c
>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>  	return page;
>>  }
>>  
>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>> +				unsigned long addr, int order)
> 
> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
> documented at all.  This makes it a bit had for readers to work out the
> difference!
> 
> Is it possible to scrunch them both into the same function?  Probably
> too messy?

Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
just put the THP specialities into an "else if (huge_page)" part there?

You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
does. There might be false positives theoretically, but is there anything else
that would use these flags and not be a THP?



>> +{
>> +	struct page *page;
>> +	nodemask_t *nmask;
>> +	struct mempolicy *pol;
>> +	int node = numa_node_id();
>> +	unsigned int cpuset_mems_cookie;
>> +
>> +retry_cpuset:
>> +	pol = get_vma_policy(vma, addr);
>> +	cpuset_mems_cookie = read_mems_allowed_begin();
>> +
>> +	if (pol->mode != MPOL_INTERLEAVE) {
>> +		/*
>> +		 * For interleave policy, we don't worry about
>> +		 * current node. Otherwise if current node is
>> +		 * in nodemask, try to allocate hugepage from
>> +		 * current node. Don't fall back to other nodes
>> +		 * for THP.
>> +		 */
> 
> This code isn't "interleave policy".  It's everything *but* interleave
> policy.  Comment makes no sense!
> 
>> +		nmask = policy_nodemask(gfp, pol);
>> +		if (!nmask || node_isset(node, *nmask)) {
>> +			mpol_cond_put(pol);
>> +			page = alloc_pages_exact_node(node, gfp, order);
>> +			if (unlikely(!page &&
>> +				     read_mems_allowed_retry(cpuset_mems_cookie)))
>> +				goto retry_cpuset;
>> +			return page;
>> +		}
>> +	}
>> +	mpol_cond_put(pol);
>> +	/*
>> +	 * if current node is not part of node mask, try
>> +	 * the allocation from any node, and we can do retry
>> +	 * in that case.
>> +	 */
>> +	return alloc_pages_vma(gfp, order, vma, addr, node);
>> +}
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-19 16:27     ` Vlastimil Babka
@ 2015-01-20  5:52       ` Aneesh Kumar K.V
  -1 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-20  5:52 UTC (permalink / raw)
  To: Vlastimil Babka, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

Vlastimil Babka <vbabka@suse.cz> writes:

> On 01/17/2015 01:02 AM, Andrew Morton wrote:
>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> 
>>> This make sure that we try to allocate hugepages from local node if
>>> allowed by mempolicy. If we can't, we fallback to small page allocation
>>> based on mempolicy. This is based on the observation that allocating pages
>>> on local node is more beneficial than allocating hugepages on remote node.
>> 
>> The changelog is a bit incomplete.  It doesn't describe the current
>> behaviour, nor what is wrong with it.  What are the before-and-after
>> effects of this change?
>> 
>> And what might be the user-visible effects?
>> 
>>> --- a/mm/mempolicy.c
>>> +++ b/mm/mempolicy.c
>>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>>  	return page;
>>>  }
>>>  
>>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>>> +				unsigned long addr, int order)
>> 
>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>> documented at all.  This makes it a bit had for readers to work out the
>> difference!
>> 
>> Is it possible to scrunch them both into the same function?  Probably
>> too messy?
>
> Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
> just put the THP specialities into an "else if (huge_page)" part there?
>
> You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
> does. There might be false positives theoretically, but is there anything else
> that would use these flags and not be a THP?
>

is that check correct ? ie, 

if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)

may not always indicate transparent hugepage if defrag = 0 . With defrag
cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.

static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}

-aneesh


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-20  5:52       ` Aneesh Kumar K.V
  0 siblings, 0 replies; 22+ messages in thread
From: Aneesh Kumar K.V @ 2015-01-20  5:52 UTC (permalink / raw)
  To: Vlastimil Babka, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

Vlastimil Babka <vbabka@suse.cz> writes:

> On 01/17/2015 01:02 AM, Andrew Morton wrote:
>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>> 
>>> This make sure that we try to allocate hugepages from local node if
>>> allowed by mempolicy. If we can't, we fallback to small page allocation
>>> based on mempolicy. This is based on the observation that allocating pages
>>> on local node is more beneficial than allocating hugepages on remote node.
>> 
>> The changelog is a bit incomplete.  It doesn't describe the current
>> behaviour, nor what is wrong with it.  What are the before-and-after
>> effects of this change?
>> 
>> And what might be the user-visible effects?
>> 
>>> --- a/mm/mempolicy.c
>>> +++ b/mm/mempolicy.c
>>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>>  	return page;
>>>  }
>>>  
>>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>>> +				unsigned long addr, int order)
>> 
>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>> documented at all.  This makes it a bit had for readers to work out the
>> difference!
>> 
>> Is it possible to scrunch them both into the same function?  Probably
>> too messy?
>
> Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
> just put the THP specialities into an "else if (huge_page)" part there?
>
> You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
> does. There might be false positives theoretically, but is there anything else
> that would use these flags and not be a THP?
>

is that check correct ? ie, 

if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)

may not always indicate transparent hugepage if defrag = 0 . With defrag
cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.

static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}

-aneesh

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-20  5:52       ` Aneesh Kumar K.V
@ 2015-01-20  9:08         ` Vlastimil Babka
  -1 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-20  9:08 UTC (permalink / raw)
  To: Aneesh Kumar K.V, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/20/2015 06:52 AM, Aneesh Kumar K.V wrote:
> Vlastimil Babka <vbabka@suse.cz> writes:
> 
>> On 01/17/2015 01:02 AM, Andrew Morton wrote:
>>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>>> 
>>>> This make sure that we try to allocate hugepages from local node if
>>>> allowed by mempolicy. If we can't, we fallback to small page allocation
>>>> based on mempolicy. This is based on the observation that allocating pages
>>>> on local node is more beneficial than allocating hugepages on remote node.
>>> 
>>> The changelog is a bit incomplete.  It doesn't describe the current
>>> behaviour, nor what is wrong with it.  What are the before-and-after
>>> effects of this change?
>>> 
>>> And what might be the user-visible effects?
>>> 
>>>> --- a/mm/mempolicy.c
>>>> +++ b/mm/mempolicy.c
>>>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>>>  	return page;
>>>>  }
>>>>  
>>>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>>>> +				unsigned long addr, int order)
>>> 
>>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>>> documented at all.  This makes it a bit had for readers to work out the
>>> difference!
>>> 
>>> Is it possible to scrunch them both into the same function?  Probably
>>> too messy?
>>
>> Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
>> just put the THP specialities into an "else if (huge_page)" part there?
>>
>> You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
>> does. There might be false positives theoretically, but is there anything else
>> that would use these flags and not be a THP?
>>
> 
> is that check correct ? ie, 
> 
> if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)
> 
> may not always indicate transparent hugepage if defrag = 0 . With defrag
> cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.

Yep, that looks wrong. Sigh. I guess we can't spare an extra GFP flag to
indicate TRANSHUGE?

> static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
> {
> 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
> }
> 
> -aneesh
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-20  9:08         ` Vlastimil Babka
  0 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-20  9:08 UTC (permalink / raw)
  To: Aneesh Kumar K.V, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/20/2015 06:52 AM, Aneesh Kumar K.V wrote:
> Vlastimil Babka <vbabka@suse.cz> writes:
> 
>> On 01/17/2015 01:02 AM, Andrew Morton wrote:
>>> On Fri, 16 Jan 2015 12:56:36 +0530 "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> wrote:
>>> 
>>>> This make sure that we try to allocate hugepages from local node if
>>>> allowed by mempolicy. If we can't, we fallback to small page allocation
>>>> based on mempolicy. This is based on the observation that allocating pages
>>>> on local node is more beneficial than allocating hugepages on remote node.
>>> 
>>> The changelog is a bit incomplete.  It doesn't describe the current
>>> behaviour, nor what is wrong with it.  What are the before-and-after
>>> effects of this change?
>>> 
>>> And what might be the user-visible effects?
>>> 
>>>> --- a/mm/mempolicy.c
>>>> +++ b/mm/mempolicy.c
>>>> @@ -2030,6 +2030,46 @@ retry_cpuset:
>>>>  	return page;
>>>>  }
>>>>  
>>>> +struct page *alloc_hugepage_vma(gfp_t gfp, struct vm_area_struct *vma,
>>>> +				unsigned long addr, int order)
>>> 
>>> alloc_pages_vma() is nicely documented.  alloc_hugepage_vma() is not
>>> documented at all.  This makes it a bit had for readers to work out the
>>> difference!
>>> 
>>> Is it possible to scrunch them both into the same function?  Probably
>>> too messy?
>>
>> Hm that could work, alloc_pages_vma already has an if (MPOL_INTERLEAVE) part, so
>> just put the THP specialities into an "else if (huge_page)" part there?
>>
>> You could probably test for GFP_TRANSHUGE the same way as __alloc_pages_slowpath
>> does. There might be false positives theoretically, but is there anything else
>> that would use these flags and not be a THP?
>>
> 
> is that check correct ? ie, 
> 
> if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)
> 
> may not always indicate transparent hugepage if defrag = 0 . With defrag
> cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.

Yep, that looks wrong. Sigh. I guess we can't spare an extra GFP flag to
indicate TRANSHUGE?

> static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
> {
> 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
> }
> 
> -aneesh
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
  2015-01-20  9:08         ` Vlastimil Babka
@ 2015-01-21 11:28           ` Vlastimil Babka
  -1 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-21 11:28 UTC (permalink / raw)
  To: Aneesh Kumar K.V, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/20/2015 10:08 AM, Vlastimil Babka wrote:
> On 01/20/2015 06:52 AM, Aneesh Kumar K.V wrote:
>> Vlastimil Babka <vbabka@suse.cz> writes:
>> 
>> is that check correct ? ie, 
>> 
>> if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)
>> 
>> may not always indicate transparent hugepage if defrag = 0 . With defrag
>> cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.
> 
> Yep, that looks wrong. Sigh. I guess we can't spare an extra GFP flag to
> indicate TRANSHUGE?

I wanted to fix this in __alloc_pages_slowpath(), but actually there's no issue
(other than being quite subtle) - if defrag == 0 and thus we don't have
__GFP_WAIT, we reach "if (!wait) goto nopage;" and bail out before reaching the
checks for GFP_TRANSHUGE.

>> static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
>> {
>> 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
>> }
>> 
>> -aneesh
>> 
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 


^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH V3] mm/thp: Allocate transparent hugepages on local node
@ 2015-01-21 11:28           ` Vlastimil Babka
  0 siblings, 0 replies; 22+ messages in thread
From: Vlastimil Babka @ 2015-01-21 11:28 UTC (permalink / raw)
  To: Aneesh Kumar K.V, Andrew Morton
  Cc: Kirill A. Shutemov, David Rientjes, linux-mm, linux-kernel

On 01/20/2015 10:08 AM, Vlastimil Babka wrote:
> On 01/20/2015 06:52 AM, Aneesh Kumar K.V wrote:
>> Vlastimil Babka <vbabka@suse.cz> writes:
>> 
>> is that check correct ? ie, 
>> 
>> if ((gfp & GFP_TRANSHUGE) == GFP_TRANSHUGE)
>> 
>> may not always indicate transparent hugepage if defrag = 0 . With defrag
>> cleared, we remove __GFP_WAIT from GFP_TRANSHUGE.
> 
> Yep, that looks wrong. Sigh. I guess we can't spare an extra GFP flag to
> indicate TRANSHUGE?

I wanted to fix this in __alloc_pages_slowpath(), but actually there's no issue
(other than being quite subtle) - if defrag == 0 and thus we don't have
__GFP_WAIT, we reach "if (!wait) goto nopage;" and bail out before reaching the
checks for GFP_TRANSHUGE.

>> static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
>> {
>> 	return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
>> }
>> 
>> -aneesh
>> 
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to majordomo@kvack.org.  For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
>> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2015-01-21 11:28 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-16  7:26 [PATCH V3] mm/thp: Allocate transparent hugepages on local node Aneesh Kumar K.V
2015-01-16  7:26 ` Aneesh Kumar K.V
2015-01-16 12:27 ` Kirill A. Shutemov
2015-01-16 12:27   ` Kirill A. Shutemov
2015-01-16 20:01 ` Vlastimil Babka
2015-01-16 20:01   ` Vlastimil Babka
2015-01-17  0:02 ` Andrew Morton
2015-01-17  0:02   ` Andrew Morton
2015-01-17  7:15   ` Davidlohr Bueso
2015-01-17  7:15     ` Davidlohr Bueso
2015-01-18 15:50     ` Aneesh Kumar K.V
2015-01-18 15:50       ` Aneesh Kumar K.V
2015-01-18 15:48   ` Aneesh Kumar K.V
2015-01-18 15:48     ` Aneesh Kumar K.V
2015-01-19 16:27   ` Vlastimil Babka
2015-01-19 16:27     ` Vlastimil Babka
2015-01-20  5:52     ` Aneesh Kumar K.V
2015-01-20  5:52       ` Aneesh Kumar K.V
2015-01-20  9:08       ` Vlastimil Babka
2015-01-20  9:08         ` Vlastimil Babka
2015-01-21 11:28         ` Vlastimil Babka
2015-01-21 11:28           ` Vlastimil Babka

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.