All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mem: balanced allocation of hugepages
       [not found] <CGME20170216130139eucas1p2512567d6f5db9eaac5ee840b56bf920a@eucas1p2.samsung.com>
@ 2017-02-16 13:01 ` Ilya Maximets
  2017-02-16 13:26   ` Tan, Jianfeng
                     ` (3 more replies)
  0 siblings, 4 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:01 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets, stable

Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.

Cc: <stable@dpdk.org>
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base                       |  1 +
 lib/librte_eal/Makefile                  |  4 ++
 lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
 mk/rte.app.mk                            |  3 ++
 4 files changed, 74 insertions(+)

diff --git a/config/common_base b/config/common_base
index 71a4fcb..fbcebbd 100644
--- a/config/common_base
+++ b/config/common_base
@@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 # Default driver path (or "" to disable)
 CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index cf11a09..5ae3846 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -35,4 +35,8 @@ DIRS-y += common
 DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
 DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
 
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a956bb2..8536a36 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -82,6 +82,9 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		if (internal_config.socket_mem[i])
+			maxnode = i + 1;
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+			while (!internal_config.socket_mem[node_id])
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+#endif
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
 	return i;
 }
 
@@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 92f3635..c2153b9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-02-16 13:01 ` [PATCH] mem: balanced allocation of hugepages Ilya Maximets
@ 2017-02-16 13:26   ` Tan, Jianfeng
  2017-02-16 13:55     ` Ilya Maximets
  2017-02-16 13:31   ` Bruce Richardson
                     ` (2 subsequent siblings)
  3 siblings, 1 reply; 99+ messages in thread
From: Tan, Jianfeng @ 2017-02-16 13:26 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Gonzalez Monroy, Sergio
  Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable

Hi,

> -----Original Message-----
> From: Ilya Maximets [mailto:i.maximets@samsung.com]
> Sent: Thursday, February 16, 2017 9:01 PM
> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
> Maximets; stable@dpdk.org
> Subject: [PATCH] mem: balanced allocation of hugepages
> 
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
> 
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
> 
> Example:
> 	# 90 x 1GB hugepages availavle in a system
> 
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> 
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
> 
> 	This happens beacause all allocated pages are
> 	on socket 0.

For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?

Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?

Thanks,
Jianfeng

> 
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
> 
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
> 
> Cc: <stable@dpdk.org>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_base                       |  1 +
>  lib/librte_eal/Makefile                  |  4 ++
>  lib/librte_eal/linuxapp/eal/eal_memory.c | 66
> ++++++++++++++++++++++++++++++++
>  mk/rte.app.mk                            |  3 ++
>  4 files changed, 74 insertions(+)
> 
> diff --git a/config/common_base b/config/common_base
> index 71a4fcb..fbcebbd 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -97,6 +97,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
>  CONFIG_RTE_EAL_IGB_UIO=n
>  CONFIG_RTE_EAL_VFIO=n
>  CONFIG_RTE_MALLOC_DEBUG=n
> +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
> 
>  # Default driver path (or "" to disable)
>  CONFIG_RTE_EAL_PMD_PATH=""
> diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
> index cf11a09..5ae3846 100644
> --- a/lib/librte_eal/Makefile
> +++ b/lib/librte_eal/Makefile
> @@ -35,4 +35,8 @@ DIRS-y += common
>  DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
>  DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
> 
> +ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +LDLIBS += -lnuma
> +endif
> +
>  include $(RTE_SDK)/mk/rte.subdir.mk
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c
> b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index a956bb2..8536a36 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -82,6 +82,9 @@
>  #include <sys/time.h>
>  #include <signal.h>
>  #include <setjmp.h>
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +#include <numaif.h>
> +#endif
> 
>  #include <rte_log.h>
>  #include <rte_memory.h>
> @@ -359,6 +362,21 @@ static int huge_wrap_sigsetjmp(void)
>  	return sigsetjmp(huge_jmpenv, 1);
>  }
> 
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +#ifndef ULONG_SIZE
> +#define ULONG_SIZE sizeof(unsigned long)
> +#endif
> +#ifndef ULONG_BITS
> +#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
> +#endif
> +#ifndef DIV_ROUND_UP
> +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> +#endif
> +#ifndef BITS_TO_LONGS
> +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
> +#endif
> +#endif
> +
>  /*
>   * Mmap all hugepages of hugepage table: it first open a file in
>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
> @@ -375,10 +393,48 @@ map_all_hugepages(struct hugepage_file
> *hugepg_tbl,
>  	void *virtaddr;
>  	void *vma_addr = NULL;
>  	size_t vma_len = 0;
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +	unsigned long
> nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
> +	unsigned long maxnode = 0;
> +	int node_id = -1;
> +
> +	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> +		if (internal_config.socket_mem[i])
> +			maxnode = i + 1;
> +#endif
> 
>  	for (i = 0; i < hpi->num_pages[0]; i++) {
>  		uint64_t hugepage_sz = hpi->hugepage_sz;
> 
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +		if (maxnode) {
> +			node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
> +			while (!internal_config.socket_mem[node_id])
> +				node_id = (node_id + 1) %
> RTE_MAX_NUMA_NODES;
> +
> +			nodemask[node_id / ULONG_BITS] =
> +						1UL << (node_id %
> ULONG_BITS);
> +
> +			RTE_LOG(DEBUG, EAL,
> +				"Setting policy MPOL_PREFERRED for
> socket %d\n",
> +				node_id);
> +			/*
> +			 * Due to old linux kernel bug (feature?) we have to
> +			 * increase maxnode by 1. It will be unconditionally
> +			 * decreased back to normal value inside the syscall
> +			 * handler.
> +			 */
> +			if (set_mempolicy(MPOL_PREFERRED,
> +					  nodemask, maxnode + 1) < 0) {
> +				RTE_LOG(ERR, EAL,
> +					"Failed to set policy
> MPOL_PREFERRED: "
> +					"%s\n", strerror(errno));
> +				return i;
> +			}
> +
> +			nodemask[node_id / ULONG_BITS] = 0UL;
> +		}
> +#endif
>  		if (orig) {
>  			hugepg_tbl[i].file_id = i;
>  			hugepg_tbl[i].size = hugepage_sz;
> @@ -489,6 +545,10 @@ map_all_hugepages(struct hugepage_file
> *hugepg_tbl,
>  		vma_len -= hugepage_sz;
>  	}
> 
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
> +		RTE_LOG(ERR, EAL, "Failed to set mempolicy
> MPOL_DEFAULT\n");
> +#endif
>  	return i;
>  }
> 
> @@ -573,6 +634,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl,
> struct hugepage_info *hpi)
>  			if (hugepg_tbl[i].orig_va == va) {
>  				hugepg_tbl[i].socket_id = socket_id;
>  				hp_count++;
> +#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> +				RTE_LOG(DEBUG, EAL,
> +					"Hugepage %s is on socket %d\n",
> +					hugepg_tbl[i].filepath, socket_id);
> +#endif
>  			}
>  		}
>  	}
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index 92f3635..c2153b9 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -159,6 +159,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
>  # The static libraries do not know their dependencies.
>  # So linking with static library requires explicit dependencies.
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
> +ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
> +endif
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-02-16 13:01 ` [PATCH] mem: balanced allocation of hugepages Ilya Maximets
  2017-02-16 13:26   ` Tan, Jianfeng
@ 2017-02-16 13:31   ` Bruce Richardson
  2017-03-06  9:34   ` Ilya Maximets
       [not found]   ` <CGME20170410080425eucas1p27fd424ae58151f13b1a7a3723aa4ad1e@eucas1p2.samsung.com>
  3 siblings, 0 replies; 99+ messages in thread
From: Bruce Richardson @ 2017-02-16 13:31 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei, stable

On Thu, Feb 16, 2017 at 04:01:10PM +0300, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
> 
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
> 
> Example:
> 	# 90 x 1GB hugepages availavle in a system
> 
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> 
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
> 
> 	This happens beacause all allocated pages are
> 	on socket 0.
> 
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
> 
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
> 

I think this highlights a general technical problem we need to resolve
in DPDK. If we want to add support for a new feature in DPDK by
leveraging functionality in an existing library, we are caught in a sort
of catch-22:
* If we want to leverage the existing library, we have to have the
  feature off-by-default, as we don't want to increase the minimum
  requirements for DPDK.
* If we want the feature enabled by default we need to avoid the
  dependency, and so reimplement some or all of the functionality inside
  DPDK itself. That will be rejected on the basis that it duplicates
  existing library functionality.

I suspect the solution to this is more dynamic build-time configuration
to start enabling things based on installed dependencies, but I'm open
to other opinions. I see a gap here, however.

/Bruce

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-02-16 13:26   ` Tan, Jianfeng
@ 2017-02-16 13:55     ` Ilya Maximets
  2017-02-16 13:57       ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:55 UTC (permalink / raw)
  To: Tan, Jianfeng, dev, David Marchand, Gonzalez Monroy, Sergio
  Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable

Hi,

On 16.02.2017 16:26, Tan, Jianfeng wrote:
> Hi,
> 
>> -----Original Message-----
>> From: Ilya Maximets [mailto:i.maximets@samsung.com]
>> Sent: Thursday, February 16, 2017 9:01 PM
>> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
>> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
>> Maximets; stable@dpdk.org
>> Subject: [PATCH] mem: balanced allocation of hugepages
>>
>> Currently EAL allocates hugepages one by one not paying
>> attention from which NUMA node allocation was done.
>>
>> Such behaviour leads to allocation failure if number of
>> available hugepages for application limited by cgroups
>> or hugetlbfs and memory requested not only from the first
>> socket.
>>
>> Example:
>> 	# 90 x 1GB hugepages availavle in a system
>>
>> 	cgcreate -g hugetlb:/test
>> 	# Limit to 32GB of hugepages
>> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>> 	# Request 4GB from each of 2 sockets
>> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>
>> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
>> 	EAL: Not enough memory available on socket 1!
>> 	     Requested: 4096MB, available: 0MB
>> 	PANIC in rte_eal_init():
>> 	Cannot init memory
>>
>> 	This happens beacause all allocated pages are
>> 	on socket 0.
> 
> For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?

Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
blocks this or I don't understand something in kernel internals.
I'm using 3.10 rt kernel from rhel7.

I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
with numactl doesn't work too:

# Limited to 8GB of hugepages
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 

EAL: Setting up physically contiguous memory...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 8 not 90 hugepages of size 1024 MB allocated
EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory

Also, using numactl will affect all the allocations in application. This may
cause additional unexpected issues.

> 
> Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?

This case will work with my patch.
But the opposite one '--socket-mem=1024,2048' will fail.
To be clear, we need to allocate all required memory at first
from each numa node and then allocate all other available pages
in round-robin fashion. But such solution looks a little ugly.

What do you think?

Best regards, Ilya Maximets.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-02-16 13:55     ` Ilya Maximets
@ 2017-02-16 13:57       ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-02-16 13:57 UTC (permalink / raw)
  To: Tan, Jianfeng, dev, David Marchand, Gonzalez Monroy, Sergio
  Cc: Heetae Ahn, Yuanhan Liu, Neil Horman, Pei, Yulong, stable



On 16.02.2017 16:55, Ilya Maximets wrote:
> Hi,
> 
> On 16.02.2017 16:26, Tan, Jianfeng wrote:
>> Hi,
>>
>>> -----Original Message-----
>>> From: Ilya Maximets [mailto:i.maximets@samsung.com]
>>> Sent: Thursday, February 16, 2017 9:01 PM
>>> To: dev@dpdk.org; David Marchand; Gonzalez Monroy, Sergio
>>> Cc: Heetae Ahn; Yuanhan Liu; Tan, Jianfeng; Neil Horman; Pei, Yulong; Ilya
>>> Maximets; stable@dpdk.org
>>> Subject: [PATCH] mem: balanced allocation of hugepages
>>>
>>> Currently EAL allocates hugepages one by one not paying
>>> attention from which NUMA node allocation was done.
>>>
>>> Such behaviour leads to allocation failure if number of
>>> available hugepages for application limited by cgroups
>>> or hugetlbfs and memory requested not only from the first
>>> socket.
>>>
>>> Example:
>>> 	# 90 x 1GB hugepages availavle in a system
>>>
>>> 	cgcreate -g hugetlb:/test
>>> 	# Limit to 32GB of hugepages
>>> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>> 	# Request 4GB from each of 2 sockets
>>> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>
>>> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
>>> 	EAL: Not enough memory available on socket 1!
>>> 	     Requested: 4096MB, available: 0MB
>>> 	PANIC in rte_eal_init():
>>> 	Cannot init memory
>>>
>>> 	This happens beacause all allocated pages are
>>> 	on socket 0.
>>
>> For such an use case, why not just use "numactl --interleave=0,1 <DPDK app> xxx"?
> 
> Unfortunately, interleave policy doesn't work for me. I suspect kernel configuration
> blocks this or I don't understand something in kernel internals.
> I'm using 3.10 rt kernel from rhel7.
> 
> I tried to set up MPOL_INTERLEAVE in code and it doesn't work for me. Your example
> with numactl doesn't work too:
> 
> # Limited to 8GB of hugepages
> cgexec -g hugetlb:test testpmd --socket-mem=4096,4096

Sorry,
cgexec -g hugetlb:test numactl --interleave=0,1 ./testpmd --socket-mem=4096,4096 ..


> 
> EAL: Setting up physically contiguous memory...
> EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> EAL: 8 not 90 hugepages of size 1024 MB allocated
> EAL: Hugepage /dev/hugepages/rtemap_0 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_1 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_2 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_3 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_4 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_5 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_6 is on socket 0
> EAL: Hugepage /dev/hugepages/rtemap_7 is on socket 0
> EAL: Not enough memory available on socket 1! Requested: 4096MB, available: 0MB
> PANIC in rte_eal_init():
> Cannot init memory
> 
> Also, using numactl will affect all the allocations in application. This may
> cause additional unexpected issues.
> 
>>
>> Do you see use case like --socket-mem 2048,1024 and only three 1GB-hugepage are allowed?
> 
> This case will work with my patch.
> But the opposite one '--socket-mem=1024,2048' will fail.
> To be clear, we need to allocate all required memory at first
> from each numa node and then allocate all other available pages
> in round-robin fashion. But such solution looks a little ugly.
> 
> What do you think?
> 
> Best regards, Ilya Maximets.
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-02-16 13:01 ` [PATCH] mem: balanced allocation of hugepages Ilya Maximets
  2017-02-16 13:26   ` Tan, Jianfeng
  2017-02-16 13:31   ` Bruce Richardson
@ 2017-03-06  9:34   ` Ilya Maximets
  2017-03-08 13:46     ` Sergio Gonzalez Monroy
       [not found]   ` <CGME20170410080425eucas1p27fd424ae58151f13b1a7a3723aa4ad1e@eucas1p2.samsung.com>
  3 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-03-06  9:34 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Thomas Monjalon, Bruce Richardson

Hi all.

So, what about this change?

Best regards, Ilya Maximets.

On 16.02.2017 16:01, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
> 
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
> 
> Example:
> 	# 90 x 1GB hugepages availavle in a system
> 
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> 
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
> 
> 	This happens beacause all allocated pages are
> 	on socket 0.
> 
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
> 
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
> 
> Cc: <stable@dpdk.org>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_base                       |  1 +
>  lib/librte_eal/Makefile                  |  4 ++
>  lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>  mk/rte.app.mk                            |  3 ++
>  4 files changed, 74 insertions(+)
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-03-06  9:34   ` Ilya Maximets
@ 2017-03-08 13:46     ` Sergio Gonzalez Monroy
  2017-03-09 12:57       ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-03-08 13:46 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Thomas Monjalon, Bruce Richardson

Hi Ilya,

I have done similar tests and as you already pointed out, 'numactl 
--interleave' does not seem to work as expected.
I have also checked that the issue can be reproduced with quota limit on 
hugetlbfs mount point.

I would be inclined towards *adding libnuma as dependency* to DPDK to 
make memory allocation a bit more reliable.

Currently at a high level regarding hugepages per numa node:
1) Try to map all free hugepages. The total number of mapped hugepages 
depends if there were any limits, such as cgroups or quota in mount point.
2) Find out numa node of each hugepage.
3) Check if we have enough hugepages for requested memory in each numa 
socket/node.

Using libnuma we could try to allocate hugepages per numa:
1) Try to map as many hugepages from numa 0.
2) Check if we have enough hugepages for requested memory in numa 0.
3) Try to map as many hugepages from numa 1.
4) Check if we have enough hugepages for requested memory in numa 1.

This approach would improve failing scenarios caused by limits but It 
would still not fix issues regarding non-contiguous hugepages (worst 
case each hugepage is a memseg).
The non-contiguous hugepages issues are not as critical now that 
mempools can span over multiple memsegs/hugepages, but it is still a 
problem for any other library requiring big chunks of memory.

Potentially if we were to add an option such as 'iommu-only' when all 
devices are bound to vfio-pci, we could have a reliable way to allocate 
hugepages by just requesting the number of pages from each numa.

Thoughts?

Sergio

On 06/03/2017 09:34, Ilya Maximets wrote:
> Hi all.
>
> So, what about this change?
>
> Best regards, Ilya Maximets.
>
> On 16.02.2017 16:01, Ilya Maximets wrote:
>> Currently EAL allocates hugepages one by one not paying
>> attention from which NUMA node allocation was done.
>>
>> Such behaviour leads to allocation failure if number of
>> available hugepages for application limited by cgroups
>> or hugetlbfs and memory requested not only from the first
>> socket.
>>
>> Example:
>> 	# 90 x 1GB hugepages availavle in a system
>>
>> 	cgcreate -g hugetlb:/test
>> 	# Limit to 32GB of hugepages
>> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>> 	# Request 4GB from each of 2 sockets
>> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>
>> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
>> 	EAL: Not enough memory available on socket 1!
>> 	     Requested: 4096MB, available: 0MB
>> 	PANIC in rte_eal_init():
>> 	Cannot init memory
>>
>> 	This happens beacause all allocated pages are
>> 	on socket 0.
>>
>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>> hugepage to one of requested nodes in a round-robin fashion.
>> In this case all allocated pages will be fairly distributed
>> between all requested nodes.
>>
>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> introduced and disabled by default because of external
>> dependency from libnuma.
>>
>> Cc: <stable@dpdk.org>
>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>>   config/common_base                       |  1 +
>>   lib/librte_eal/Makefile                  |  4 ++
>>   lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>   mk/rte.app.mk                            |  3 ++
>>   4 files changed, 74 insertions(+)
>>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-03-08 13:46     ` Sergio Gonzalez Monroy
@ 2017-03-09 12:57       ` Ilya Maximets
  2017-03-27 13:01         ` Sergio Gonzalez Monroy
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-03-09 12:57 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, dev, David Marchand
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Thomas Monjalon, Bruce Richardson

On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
> Hi Ilya,
> 
> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
> 
> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
> 
> Currently at a high level regarding hugepages per numa node:
> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
> 2) Find out numa node of each hugepage.
> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
> 
> Using libnuma we could try to allocate hugepages per numa:
> 1) Try to map as many hugepages from numa 0.
> 2) Check if we have enough hugepages for requested memory in numa 0.
> 3) Try to map as many hugepages from numa 1.
> 4) Check if we have enough hugepages for requested memory in numa 1.
> 
> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
> 
> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
> 
> Thoughts?

Hi Sergio,

Thanks for your attention to this.

For now, as we have some issues with non-contiguous
hugepages, I'm thinking about following hybrid schema:
1) Allocate essential hugepages:
	1.1) Allocate as many hugepages from numa N to
	     only fit requested memory for this numa.
	1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
   fashion like in this patch.
3) Sort pages and choose the most suitable.

This solution should decrease number of issues connected with
non-contiguous memory.

Best regards, Ilya Maximets.

> 
> On 06/03/2017 09:34, Ilya Maximets wrote:
>> Hi all.
>>
>> So, what about this change?
>>
>> Best regards, Ilya Maximets.
>>
>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>> Currently EAL allocates hugepages one by one not paying
>>> attention from which NUMA node allocation was done.
>>>
>>> Such behaviour leads to allocation failure if number of
>>> available hugepages for application limited by cgroups
>>> or hugetlbfs and memory requested not only from the first
>>> socket.
>>>
>>> Example:
>>>     # 90 x 1GB hugepages availavle in a system
>>>
>>>     cgcreate -g hugetlb:/test
>>>     # Limit to 32GB of hugepages
>>>     cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>     # Request 4GB from each of 2 sockets
>>>     cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>
>>>     EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>     EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>     EAL: Not enough memory available on socket 1!
>>>          Requested: 4096MB, available: 0MB
>>>     PANIC in rte_eal_init():
>>>     Cannot init memory
>>>
>>>     This happens beacause all allocated pages are
>>>     on socket 0.
>>>
>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>> hugepage to one of requested nodes in a round-robin fashion.
>>> In this case all allocated pages will be fairly distributed
>>> between all requested nodes.
>>>
>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>> introduced and disabled by default because of external
>>> dependency from libnuma.
>>>
>>> Cc: <stable@dpdk.org>
>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>
>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>> ---
>>>   config/common_base                       |  1 +
>>>   lib/librte_eal/Makefile                  |  4 ++
>>>   lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>   mk/rte.app.mk                            |  3 ++
>>>   4 files changed, 74 insertions(+)
>>>
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-03-09 12:57       ` Ilya Maximets
@ 2017-03-27 13:01         ` Sergio Gonzalez Monroy
  2017-03-27 14:43           ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-03-27 13:01 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Thomas Monjalon, Bruce Richardson

On 09/03/2017 12:57, Ilya Maximets wrote:
> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>> Hi Ilya,
>>
>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>
>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>
>> Currently at a high level regarding hugepages per numa node:
>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>> 2) Find out numa node of each hugepage.
>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>
>> Using libnuma we could try to allocate hugepages per numa:
>> 1) Try to map as many hugepages from numa 0.
>> 2) Check if we have enough hugepages for requested memory in numa 0.
>> 3) Try to map as many hugepages from numa 1.
>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>
>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>
>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>
>> Thoughts?
> Hi Sergio,
>
> Thanks for your attention to this.
>
> For now, as we have some issues with non-contiguous
> hugepages, I'm thinking about following hybrid schema:
> 1) Allocate essential hugepages:
> 	1.1) Allocate as many hugepages from numa N to
> 	     only fit requested memory for this numa.
> 	1.2) repeat 1.1 for all numa nodes.
> 2) Try to map all remaining free hugepages in a round-robin
>     fashion like in this patch.
> 3) Sort pages and choose the most suitable.
>
> This solution should decrease number of issues connected with
> non-contiguous memory.

Sorry for late reply, I was hoping for more comments from the community.

IMHO this should be default behavior, which means no config option and 
libnuma as EAL dependency.
I think your proposal is good, could you consider implementing such 
approach on next release?

Regards.

> Best regards, Ilya Maximets.
>
>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>> Hi all.
>>>
>>> So, what about this change?
>>>
>>> Best regards, Ilya Maximets.
>>>
>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>> Currently EAL allocates hugepages one by one not paying
>>>> attention from which NUMA node allocation was done.
>>>>
>>>> Such behaviour leads to allocation failure if number of
>>>> available hugepages for application limited by cgroups
>>>> or hugetlbfs and memory requested not only from the first
>>>> socket.
>>>>
>>>> Example:
>>>>      # 90 x 1GB hugepages availavle in a system
>>>>
>>>>      cgcreate -g hugetlb:/test
>>>>      # Limit to 32GB of hugepages
>>>>      cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>      # Request 4GB from each of 2 sockets
>>>>      cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>
>>>>      EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>      EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>      EAL: Not enough memory available on socket 1!
>>>>           Requested: 4096MB, available: 0MB
>>>>      PANIC in rte_eal_init():
>>>>      Cannot init memory
>>>>
>>>>      This happens beacause all allocated pages are
>>>>      on socket 0.
>>>>
>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>> In this case all allocated pages will be fairly distributed
>>>> between all requested nodes.
>>>>
>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> introduced and disabled by default because of external
>>>> dependency from libnuma.
>>>>
>>>> Cc:<stable@dpdk.org>
>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>
>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>> ---
>>>>    config/common_base                       |  1 +
>>>>    lib/librte_eal/Makefile                  |  4 ++
>>>>    lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>    mk/rte.app.mk                            |  3 ++
>>>>    4 files changed, 74 insertions(+)

Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-03-27 13:01         ` Sergio Gonzalez Monroy
@ 2017-03-27 14:43           ` Ilya Maximets
  2017-04-07 15:14             ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-03-27 14:43 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, dev, David Marchand
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Thomas Monjalon, Bruce Richardson

On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
> On 09/03/2017 12:57, Ilya Maximets wrote:
>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>> Hi Ilya,
>>>
>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>
>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>
>>> Currently at a high level regarding hugepages per numa node:
>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>> 2) Find out numa node of each hugepage.
>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>
>>> Using libnuma we could try to allocate hugepages per numa:
>>> 1) Try to map as many hugepages from numa 0.
>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>> 3) Try to map as many hugepages from numa 1.
>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>
>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>
>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>
>>> Thoughts?
>> Hi Sergio,
>>
>> Thanks for your attention to this.
>>
>> For now, as we have some issues with non-contiguous
>> hugepages, I'm thinking about following hybrid schema:
>> 1) Allocate essential hugepages:
>>     1.1) Allocate as many hugepages from numa N to
>>          only fit requested memory for this numa.
>>     1.2) repeat 1.1 for all numa nodes.
>> 2) Try to map all remaining free hugepages in a round-robin
>>     fashion like in this patch.
>> 3) Sort pages and choose the most suitable.
>>
>> This solution should decrease number of issues connected with
>> non-contiguous memory.
> 
> Sorry for late reply, I was hoping for more comments from the community.
> 
> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
> I think your proposal is good, could you consider implementing such approach on next release?

Sure, I can implement this for 17.08 release.

>>
>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>> Hi all.
>>>>
>>>> So, what about this change?
>>>>
>>>> Best regards, Ilya Maximets.
>>>>
>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>> Currently EAL allocates hugepages one by one not paying
>>>>> attention from which NUMA node allocation was done.
>>>>>
>>>>> Such behaviour leads to allocation failure if number of
>>>>> available hugepages for application limited by cgroups
>>>>> or hugetlbfs and memory requested not only from the first
>>>>> socket.
>>>>>
>>>>> Example:
>>>>>      # 90 x 1GB hugepages availavle in a system
>>>>>
>>>>>      cgcreate -g hugetlb:/test
>>>>>      # Limit to 32GB of hugepages
>>>>>      cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>      # Request 4GB from each of 2 sockets
>>>>>      cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>
>>>>>      EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>      EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>      EAL: Not enough memory available on socket 1!
>>>>>           Requested: 4096MB, available: 0MB
>>>>>      PANIC in rte_eal_init():
>>>>>      Cannot init memory
>>>>>
>>>>>      This happens beacause all allocated pages are
>>>>>      on socket 0.
>>>>>
>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>> In this case all allocated pages will be fairly distributed
>>>>> between all requested nodes.
>>>>>
>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>> introduced and disabled by default because of external
>>>>> dependency from libnuma.
>>>>>
>>>>> Cc:<stable@dpdk.org>
>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>
>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>> ---
>>>>>    config/common_base                       |  1 +
>>>>>    lib/librte_eal/Makefile                  |  4 ++
>>>>>    lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>    mk/rte.app.mk                            |  3 ++
>>>>>    4 files changed, 74 insertions(+)
> 
> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>

Thanks.

Best regards, Ilya Maximets.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-03-27 14:43           ` Ilya Maximets
@ 2017-04-07 15:14             ` Ilya Maximets
  2017-04-07 15:44               ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-04-07 15:14 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, dev, David Marchand, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	stable, Bruce Richardson

Hi All.

I wanted to ask (just to clarify current status):
Will this patch be included in current release (acked by maintainer)
and then I will upgrade it to hybrid logic or I will just prepare v3
with hybrid logic for 17.08 ?

Best regards, Ilya Maximets.


On 27.03.2017 17:43, Ilya Maximets wrote:
> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>> Hi Ilya,
>>>>
>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>
>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>
>>>> Currently at a high level regarding hugepages per numa node:
>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>> 2) Find out numa node of each hugepage.
>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>
>>>> Using libnuma we could try to allocate hugepages per numa:
>>>> 1) Try to map as many hugepages from numa 0.
>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>> 3) Try to map as many hugepages from numa 1.
>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>
>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>
>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>
>>>> Thoughts?
>>> Hi Sergio,
>>>
>>> Thanks for your attention to this.
>>>
>>> For now, as we have some issues with non-contiguous
>>> hugepages, I'm thinking about following hybrid schema:
>>> 1) Allocate essential hugepages:
>>>     1.1) Allocate as many hugepages from numa N to
>>>          only fit requested memory for this numa.
>>>     1.2) repeat 1.1 for all numa nodes.
>>> 2) Try to map all remaining free hugepages in a round-robin
>>>     fashion like in this patch.
>>> 3) Sort pages and choose the most suitable.
>>>
>>> This solution should decrease number of issues connected with
>>> non-contiguous memory.
>>
>> Sorry for late reply, I was hoping for more comments from the community.
>>
>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>> I think your proposal is good, could you consider implementing such approach on next release?
> 
> Sure, I can implement this for 17.08 release.
> 
>>>
>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>> Hi all.
>>>>>
>>>>> So, what about this change?
>>>>>
>>>>> Best regards, Ilya Maximets.
>>>>>
>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>> attention from which NUMA node allocation was done.
>>>>>>
>>>>>> Such behaviour leads to allocation failure if number of
>>>>>> available hugepages for application limited by cgroups
>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>> socket.
>>>>>>
>>>>>> Example:
>>>>>>      # 90 x 1GB hugepages availavle in a system
>>>>>>
>>>>>>      cgcreate -g hugetlb:/test
>>>>>>      # Limit to 32GB of hugepages
>>>>>>      cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>      # Request 4GB from each of 2 sockets
>>>>>>      cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>
>>>>>>      EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>      EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>      EAL: Not enough memory available on socket 1!
>>>>>>           Requested: 4096MB, available: 0MB
>>>>>>      PANIC in rte_eal_init():
>>>>>>      Cannot init memory
>>>>>>
>>>>>>      This happens beacause all allocated pages are
>>>>>>      on socket 0.
>>>>>>
>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>> In this case all allocated pages will be fairly distributed
>>>>>> between all requested nodes.
>>>>>>
>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>> introduced and disabled by default because of external
>>>>>> dependency from libnuma.
>>>>>>
>>>>>> Cc:<stable@dpdk.org>
>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>
>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>> ---
>>>>>>    config/common_base                       |  1 +
>>>>>>    lib/librte_eal/Makefile                  |  4 ++
>>>>>>    lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>    mk/rte.app.mk                            |  3 ++
>>>>>>    4 files changed, 74 insertions(+)
>>
>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> 
> Thanks.
> 
> Best regards, Ilya Maximets.
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-04-07 15:14             ` Ilya Maximets
@ 2017-04-07 15:44               ` Thomas Monjalon
  2017-04-10  7:11                 ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-04-07 15:44 UTC (permalink / raw)
  To: Ilya Maximets, Sergio Gonzalez Monroy
  Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, stable, Bruce Richardson

2017-04-07 18:14, Ilya Maximets:
> Hi All.
> 
> I wanted to ask (just to clarify current status):
> Will this patch be included in current release (acked by maintainer)
> and then I will upgrade it to hybrid logic or I will just prepare v3
> with hybrid logic for 17.08 ?

What is your preferred option Ilya?
Sergio?


> On 27.03.2017 17:43, Ilya Maximets wrote:
> > On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
> >> On 09/03/2017 12:57, Ilya Maximets wrote:
> >>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
> >>>> Hi Ilya,
> >>>>
> >>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
> >>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
> >>>>
> >>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
> >>>>
> >>>> Currently at a high level regarding hugepages per numa node:
> >>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
> >>>> 2) Find out numa node of each hugepage.
> >>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
> >>>>
> >>>> Using libnuma we could try to allocate hugepages per numa:
> >>>> 1) Try to map as many hugepages from numa 0.
> >>>> 2) Check if we have enough hugepages for requested memory in numa 0.
> >>>> 3) Try to map as many hugepages from numa 1.
> >>>> 4) Check if we have enough hugepages for requested memory in numa 1.
> >>>>
> >>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
> >>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
> >>>>
> >>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
> >>>>
> >>>> Thoughts?
> >>> Hi Sergio,
> >>>
> >>> Thanks for your attention to this.
> >>>
> >>> For now, as we have some issues with non-contiguous
> >>> hugepages, I'm thinking about following hybrid schema:
> >>> 1) Allocate essential hugepages:
> >>>     1.1) Allocate as many hugepages from numa N to
> >>>          only fit requested memory for this numa.
> >>>     1.2) repeat 1.1 for all numa nodes.
> >>> 2) Try to map all remaining free hugepages in a round-robin
> >>>     fashion like in this patch.
> >>> 3) Sort pages and choose the most suitable.
> >>>
> >>> This solution should decrease number of issues connected with
> >>> non-contiguous memory.
> >>
> >> Sorry for late reply, I was hoping for more comments from the community.
> >>
> >> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
> >> I think your proposal is good, could you consider implementing such approach on next release?
> > 
> > Sure, I can implement this for 17.08 release.
> > 
> >>>
> >>>> On 06/03/2017 09:34, Ilya Maximets wrote:
> >>>>> Hi all.
> >>>>>
> >>>>> So, what about this change?
> >>>>>
> >>>>> Best regards, Ilya Maximets.
> >>>>>
> >>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
> >>>>>> Currently EAL allocates hugepages one by one not paying
> >>>>>> attention from which NUMA node allocation was done.
> >>>>>>
> >>>>>> Such behaviour leads to allocation failure if number of
> >>>>>> available hugepages for application limited by cgroups
> >>>>>> or hugetlbfs and memory requested not only from the first
> >>>>>> socket.
> >>>>>>
> >>>>>> Example:
> >>>>>>      # 90 x 1GB hugepages availavle in a system
> >>>>>>
> >>>>>>      cgcreate -g hugetlb:/test
> >>>>>>      # Limit to 32GB of hugepages
> >>>>>>      cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> >>>>>>      # Request 4GB from each of 2 sockets
> >>>>>>      cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> >>>>>>
> >>>>>>      EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> >>>>>>      EAL: 32 not 90 hugepages of size 1024 MB allocated
> >>>>>>      EAL: Not enough memory available on socket 1!
> >>>>>>           Requested: 4096MB, available: 0MB
> >>>>>>      PANIC in rte_eal_init():
> >>>>>>      Cannot init memory
> >>>>>>
> >>>>>>      This happens beacause all allocated pages are
> >>>>>>      on socket 0.
> >>>>>>
> >>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> >>>>>> hugepage to one of requested nodes in a round-robin fashion.
> >>>>>> In this case all allocated pages will be fairly distributed
> >>>>>> between all requested nodes.
> >>>>>>
> >>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>>> introduced and disabled by default because of external
> >>>>>> dependency from libnuma.
> >>>>>>
> >>>>>> Cc:<stable@dpdk.org>
> >>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
> >>>>>>
> >>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
> >>>>>> ---
> >>>>>>    config/common_base                       |  1 +
> >>>>>>    lib/librte_eal/Makefile                  |  4 ++
> >>>>>>    lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
> >>>>>>    mk/rte.app.mk                            |  3 ++
> >>>>>>    4 files changed, 74 insertions(+)
> >>
> >> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> > 
> > Thanks.
> > 
> > Best regards, Ilya Maximets.
> > 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-04-07 15:44               ` Thomas Monjalon
@ 2017-04-10  7:11                 ` Ilya Maximets
  2017-04-10  7:51                   ` Sergio Gonzalez Monroy
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-04-10  7:11 UTC (permalink / raw)
  To: Thomas Monjalon, Sergio Gonzalez Monroy
  Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, stable, Bruce Richardson

On 07.04.2017 18:44, Thomas Monjalon wrote:
> 2017-04-07 18:14, Ilya Maximets:
>> Hi All.
>>
>> I wanted to ask (just to clarify current status):
>> Will this patch be included in current release (acked by maintainer)
>> and then I will upgrade it to hybrid logic or I will just prepare v3
>> with hybrid logic for 17.08 ?
> 
> What is your preferred option Ilya?

I have no strong opinion on this. One thought is that it could be
nice if someone else could test this functionality with current
release before enabling it by default in 17.08.

Tomorrow I'm going on vacation. So I'll post rebased version today
(there are few fuzzes with current master) and you with Sergio may
decide what to do.

Best regards, Ilya Maximets.

> Sergio?
> 
> 
>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>> Hi Ilya,
>>>>>>
>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>
>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>
>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>> 2) Find out numa node of each hugepage.
>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>
>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>
>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>
>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>
>>>>>> Thoughts?
>>>>> Hi Sergio,
>>>>>
>>>>> Thanks for your attention to this.
>>>>>
>>>>> For now, as we have some issues with non-contiguous
>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>> 1) Allocate essential hugepages:
>>>>>     1.1) Allocate as many hugepages from numa N to
>>>>>          only fit requested memory for this numa.
>>>>>     1.2) repeat 1.1 for all numa nodes.
>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>>     fashion like in this patch.
>>>>> 3) Sort pages and choose the most suitable.
>>>>>
>>>>> This solution should decrease number of issues connected with
>>>>> non-contiguous memory.
>>>>
>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>
>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>
>>> Sure, I can implement this for 17.08 release.
>>>
>>>>>
>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>> Hi all.
>>>>>>>
>>>>>>> So, what about this change?
>>>>>>>
>>>>>>> Best regards, Ilya Maximets.
>>>>>>>
>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>
>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>> socket.
>>>>>>>>
>>>>>>>> Example:
>>>>>>>>      # 90 x 1GB hugepages availavle in a system
>>>>>>>>
>>>>>>>>      cgcreate -g hugetlb:/test
>>>>>>>>      # Limit to 32GB of hugepages
>>>>>>>>      cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>>      # Request 4GB from each of 2 sockets
>>>>>>>>      cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>
>>>>>>>>      EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>>      EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>>      EAL: Not enough memory available on socket 1!
>>>>>>>>           Requested: 4096MB, available: 0MB
>>>>>>>>      PANIC in rte_eal_init():
>>>>>>>>      Cannot init memory
>>>>>>>>
>>>>>>>>      This happens beacause all allocated pages are
>>>>>>>>      on socket 0.
>>>>>>>>
>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>> between all requested nodes.
>>>>>>>>
>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>> introduced and disabled by default because of external
>>>>>>>> dependency from libnuma.
>>>>>>>>
>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>
>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>> ---
>>>>>>>>    config/common_base                       |  1 +
>>>>>>>>    lib/librte_eal/Makefile                  |  4 ++
>>>>>>>>    lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>>    mk/rte.app.mk                            |  3 ++
>>>>>>>>    4 files changed, 74 insertions(+)
>>>>
>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>
>>> Thanks.
>>>
>>> Best regards, Ilya Maximets.
>>>
> 
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-04-10  7:11                 ` Ilya Maximets
@ 2017-04-10  7:51                   ` Sergio Gonzalez Monroy
  2017-04-10  8:05                     ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-04-10  7:51 UTC (permalink / raw)
  To: Ilya Maximets, Thomas Monjalon
  Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, stable, Bruce Richardson

On 10/04/2017 08:11, Ilya Maximets wrote:
> On 07.04.2017 18:44, Thomas Monjalon wrote:
>> 2017-04-07 18:14, Ilya Maximets:
>>> Hi All.
>>>
>>> I wanted to ask (just to clarify current status):
>>> Will this patch be included in current release (acked by maintainer)
>>> and then I will upgrade it to hybrid logic or I will just prepare v3
>>> with hybrid logic for 17.08 ?
>> What is your preferred option Ilya?
> I have no strong opinion on this. One thought is that it could be
> nice if someone else could test this functionality with current
> release before enabling it by default in 17.08.
>
> Tomorrow I'm going on vacation. So I'll post rebased version today
> (there are few fuzzes with current master) and you with Sergio may
> decide what to do.
>
> Best regards, Ilya Maximets.
>
>> Sergio?

I would be inclined towards v3 targeting v17.08. IMHO it would be more 
clean this way.

Sergio

>>
>>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>>> Hi Ilya,
>>>>>>>
>>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>>
>>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>>
>>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>>> 2) Find out numa node of each hugepage.
>>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>>
>>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>>
>>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>>
>>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>>
>>>>>>> Thoughts?
>>>>>> Hi Sergio,
>>>>>>
>>>>>> Thanks for your attention to this.
>>>>>>
>>>>>> For now, as we have some issues with non-contiguous
>>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>>> 1) Allocate essential hugepages:
>>>>>>      1.1) Allocate as many hugepages from numa N to
>>>>>>           only fit requested memory for this numa.
>>>>>>      1.2) repeat 1.1 for all numa nodes.
>>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>>>      fashion like in this patch.
>>>>>> 3) Sort pages and choose the most suitable.
>>>>>>
>>>>>> This solution should decrease number of issues connected with
>>>>>> non-contiguous memory.
>>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>>
>>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>> Sure, I can implement this for 17.08 release.
>>>>
>>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>>> Hi all.
>>>>>>>>
>>>>>>>> So, what about this change?
>>>>>>>>
>>>>>>>> Best regards, Ilya Maximets.
>>>>>>>>
>>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>>
>>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>>> socket.
>>>>>>>>>
>>>>>>>>> Example:
>>>>>>>>>       # 90 x 1GB hugepages availavle in a system
>>>>>>>>>
>>>>>>>>>       cgcreate -g hugetlb:/test
>>>>>>>>>       # Limit to 32GB of hugepages
>>>>>>>>>       cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>>>       # Request 4GB from each of 2 sockets
>>>>>>>>>       cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>>
>>>>>>>>>       EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>>>       EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>>>       EAL: Not enough memory available on socket 1!
>>>>>>>>>            Requested: 4096MB, available: 0MB
>>>>>>>>>       PANIC in rte_eal_init():
>>>>>>>>>       Cannot init memory
>>>>>>>>>
>>>>>>>>>       This happens beacause all allocated pages are
>>>>>>>>>       on socket 0.
>>>>>>>>>
>>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>>> between all requested nodes.
>>>>>>>>>
>>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>> introduced and disabled by default because of external
>>>>>>>>> dependency from libnuma.
>>>>>>>>>
>>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>>
>>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>>> ---
>>>>>>>>>     config/common_base                       |  1 +
>>>>>>>>>     lib/librte_eal/Makefile                  |  4 ++
>>>>>>>>>     lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>>>     mk/rte.app.mk                            |  3 ++
>>>>>>>>>     4 files changed, 74 insertions(+)
>>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>> Thanks.
>>>>
>>>> Best regards, Ilya Maximets.
>>>>
>>
>>
>>
>>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v2] mem: balanced allocation of hugepages
       [not found]   ` <CGME20170410080425eucas1p27fd424ae58151f13b1a7a3723aa4ad1e@eucas1p2.samsung.com>
@ 2017-04-10  8:04     ` Ilya Maximets
  2017-04-10 10:03       ` Thomas Monjalon
       [not found]       ` <CGME20170606062227eucas1p2c49a95fb0fe11a4cadd5b4ceeb9712b1@eucas1p2.samsung.com>
  0 siblings, 2 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-04-10  8:04 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Currently EAL allocates hugepages one by one not paying
attention from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of
available hugepages for application limited by cgroups
or hugetlbfs and memory requested not only from the first
socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each
hugepage to one of requested nodes in a round-robin fashion.
In this case all allocated pages will be fairly distributed
between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
introduced and disabled by default because of external
dependency from libnuma.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---

Version 2:
	* rebased (fuzz in Makefile)

 config/common_base                       |  1 +
 lib/librte_eal/Makefile                  |  4 ++
 lib/librte_eal/linuxapp/eal/eal_memory.c | 65 ++++++++++++++++++++++++++++++++
 mk/rte.app.mk                            |  3 ++
 4 files changed, 73 insertions(+)

diff --git a/config/common_base b/config/common_base
index 5f2ad94..09782ff 100644
--- a/config/common_base
+++ b/config/common_base
@@ -102,6 +102,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 # Default driver path (or "" to disable)
 CONFIG_RTE_EAL_PMD_PATH=""
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..e5f552a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,8 @@ DEPDIRS-linuxapp := common
 DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
 DEPDIRS-bsdapp := common
 
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
+
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 657c6f4..8cb7432 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -83,6 +83,9 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -377,6 +380,21 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -393,10 +411,48 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		if (internal_config.socket_mem[i])
+			maxnode = i + 1;
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+			while (!internal_config.socket_mem[node_id])
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+#endif
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -507,6 +563,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
 	return i;
 }
 
@@ -591,6 +651,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 4c659e9..ca8e5fe 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -173,6 +173,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH] mem: balanced allocation of hugepages
  2017-04-10  7:51                   ` Sergio Gonzalez Monroy
@ 2017-04-10  8:05                     ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-04-10  8:05 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: dev, David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, stable, Bruce Richardson



On 10.04.2017 10:51, Sergio Gonzalez Monroy wrote:
> On 10/04/2017 08:11, Ilya Maximets wrote:
>> On 07.04.2017 18:44, Thomas Monjalon wrote:
>>> 2017-04-07 18:14, Ilya Maximets:
>>>> Hi All.
>>>>
>>>> I wanted to ask (just to clarify current status):
>>>> Will this patch be included in current release (acked by maintainer)
>>>> and then I will upgrade it to hybrid logic or I will just prepare v3
>>>> with hybrid logic for 17.08 ?
>>> What is your preferred option Ilya?
>> I have no strong opinion on this. One thought is that it could be
>> nice if someone else could test this functionality with current
>> release before enabling it by default in 17.08.
>>
>> Tomorrow I'm going on vacation. So I'll post rebased version today
>> (there are few fuzzes with current master) and you with Sergio may
>> decide what to do.
>>
>> Best regards, Ilya Maximets.
>>
>>> Sergio?
> 
> I would be inclined towards v3 targeting v17.08. IMHO it would be more clean this way.

OK.
I've sent rebased version just in case.

> 
> Sergio
> 
>>>
>>>> On 27.03.2017 17:43, Ilya Maximets wrote:
>>>>> On 27.03.2017 16:01, Sergio Gonzalez Monroy wrote:
>>>>>> On 09/03/2017 12:57, Ilya Maximets wrote:
>>>>>>> On 08.03.2017 16:46, Sergio Gonzalez Monroy wrote:
>>>>>>>> Hi Ilya,
>>>>>>>>
>>>>>>>> I have done similar tests and as you already pointed out, 'numactl --interleave' does not seem to work as expected.
>>>>>>>> I have also checked that the issue can be reproduced with quota limit on hugetlbfs mount point.
>>>>>>>>
>>>>>>>> I would be inclined towards *adding libnuma as dependency* to DPDK to make memory allocation a bit more reliable.
>>>>>>>>
>>>>>>>> Currently at a high level regarding hugepages per numa node:
>>>>>>>> 1) Try to map all free hugepages. The total number of mapped hugepages depends if there were any limits, such as cgroups or quota in mount point.
>>>>>>>> 2) Find out numa node of each hugepage.
>>>>>>>> 3) Check if we have enough hugepages for requested memory in each numa socket/node.
>>>>>>>>
>>>>>>>> Using libnuma we could try to allocate hugepages per numa:
>>>>>>>> 1) Try to map as many hugepages from numa 0.
>>>>>>>> 2) Check if we have enough hugepages for requested memory in numa 0.
>>>>>>>> 3) Try to map as many hugepages from numa 1.
>>>>>>>> 4) Check if we have enough hugepages for requested memory in numa 1.
>>>>>>>>
>>>>>>>> This approach would improve failing scenarios caused by limits but It would still not fix issues regarding non-contiguous hugepages (worst case each hugepage is a memseg).
>>>>>>>> The non-contiguous hugepages issues are not as critical now that mempools can span over multiple memsegs/hugepages, but it is still a problem for any other library requiring big chunks of memory.
>>>>>>>>
>>>>>>>> Potentially if we were to add an option such as 'iommu-only' when all devices are bound to vfio-pci, we could have a reliable way to allocate hugepages by just requesting the number of pages from each numa.
>>>>>>>>
>>>>>>>> Thoughts?
>>>>>>> Hi Sergio,
>>>>>>>
>>>>>>> Thanks for your attention to this.
>>>>>>>
>>>>>>> For now, as we have some issues with non-contiguous
>>>>>>> hugepages, I'm thinking about following hybrid schema:
>>>>>>> 1) Allocate essential hugepages:
>>>>>>>      1.1) Allocate as many hugepages from numa N to
>>>>>>>           only fit requested memory for this numa.
>>>>>>>      1.2) repeat 1.1 for all numa nodes.
>>>>>>> 2) Try to map all remaining free hugepages in a round-robin
>>>>>>>      fashion like in this patch.
>>>>>>> 3) Sort pages and choose the most suitable.
>>>>>>>
>>>>>>> This solution should decrease number of issues connected with
>>>>>>> non-contiguous memory.
>>>>>> Sorry for late reply, I was hoping for more comments from the community.
>>>>>>
>>>>>> IMHO this should be default behavior, which means no config option and libnuma as EAL dependency.
>>>>>> I think your proposal is good, could you consider implementing such approach on next release?
>>>>> Sure, I can implement this for 17.08 release.
>>>>>
>>>>>>>> On 06/03/2017 09:34, Ilya Maximets wrote:
>>>>>>>>> Hi all.
>>>>>>>>>
>>>>>>>>> So, what about this change?
>>>>>>>>>
>>>>>>>>> Best regards, Ilya Maximets.
>>>>>>>>>
>>>>>>>>> On 16.02.2017 16:01, Ilya Maximets wrote:
>>>>>>>>>> Currently EAL allocates hugepages one by one not paying
>>>>>>>>>> attention from which NUMA node allocation was done.
>>>>>>>>>>
>>>>>>>>>> Such behaviour leads to allocation failure if number of
>>>>>>>>>> available hugepages for application limited by cgroups
>>>>>>>>>> or hugetlbfs and memory requested not only from the first
>>>>>>>>>> socket.
>>>>>>>>>>
>>>>>>>>>> Example:
>>>>>>>>>>       # 90 x 1GB hugepages availavle in a system
>>>>>>>>>>
>>>>>>>>>>       cgcreate -g hugetlb:/test
>>>>>>>>>>       # Limit to 32GB of hugepages
>>>>>>>>>>       cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>>>>>>>>       # Request 4GB from each of 2 sockets
>>>>>>>>>>       cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>>>>>>>>
>>>>>>>>>>       EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>>>>>>>>       EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>>>>>>>>       EAL: Not enough memory available on socket 1!
>>>>>>>>>>            Requested: 4096MB, available: 0MB
>>>>>>>>>>       PANIC in rte_eal_init():
>>>>>>>>>>       Cannot init memory
>>>>>>>>>>
>>>>>>>>>>       This happens beacause all allocated pages are
>>>>>>>>>>       on socket 0.
>>>>>>>>>>
>>>>>>>>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each
>>>>>>>>>> hugepage to one of requested nodes in a round-robin fashion.
>>>>>>>>>> In this case all allocated pages will be fairly distributed
>>>>>>>>>> between all requested nodes.
>>>>>>>>>>
>>>>>>>>>> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>> introduced and disabled by default because of external
>>>>>>>>>> dependency from libnuma.
>>>>>>>>>>
>>>>>>>>>> Cc:<stable@dpdk.org>
>>>>>>>>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>>>>>>>>
>>>>>>>>>> Signed-off-by: Ilya Maximets<i.maximets@samsung.com>
>>>>>>>>>> ---
>>>>>>>>>>     config/common_base                       |  1 +
>>>>>>>>>>     lib/librte_eal/Makefile                  |  4 ++
>>>>>>>>>>     lib/librte_eal/linuxapp/eal/eal_memory.c | 66 ++++++++++++++++++++++++++++++++
>>>>>>>>>>     mk/rte.app.mk                            |  3 ++
>>>>>>>>>>     4 files changed, 74 insertions(+)
>>>>>> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>>> Thanks.
>>>>>
>>>>> Best regards, Ilya Maximets.
>>>>>
>>>
>>>
>>>
>>>
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v2] mem: balanced allocation of hugepages
  2017-04-10  8:04     ` [PATCH v2] " Ilya Maximets
@ 2017-04-10 10:03       ` Thomas Monjalon
       [not found]       ` <CGME20170606062227eucas1p2c49a95fb0fe11a4cadd5b4ceeb9712b1@eucas1p2.samsung.com>
  1 sibling, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-04-10 10:03 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy
  Cc: Ilya Maximets, dev, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

2017-04-10 11:04, Ilya Maximets:
> Currently EAL allocates hugepages one by one not paying
> attention from which NUMA node allocation was done.
> 
> Such behaviour leads to allocation failure if number of
> available hugepages for application limited by cgroups
> or hugetlbfs and memory requested not only from the first
> socket.
> 
> Example:
> 	# 90 x 1GB hugepages availavle in a system
> 
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
> 
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
> 
> 	This happens beacause all allocated pages are
> 	on socket 0.
> 
> Fix this issue by setting mempolicy MPOL_PREFERRED for each
> hugepage to one of requested nodes in a round-robin fashion.
> In this case all allocated pages will be fairly distributed
> between all requested nodes.
> 
> New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> introduced and disabled by default because of external
> dependency from libnuma.
> 
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>

Status: Changes Requested
per Sergio advice: "I would be inclined towards v3 targeting v17.08."

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v3 0/2] Balanced allocation of hugepages
       [not found]       ` <CGME20170606062227eucas1p2c49a95fb0fe11a4cadd5b4ceeb9712b1@eucas1p2.samsung.com>
@ 2017-06-06  6:22         ` Ilya Maximets
       [not found]           ` <CGME20170606062232eucas1p11d2c304a28353d32b93ddfbd134d4da9@eucas1p1.samsung.com>
                             ` (2 more replies)
  0 siblings, 3 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  6:22 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                       |  2 +-
 lib/librte_eal/Makefile                  |  2 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 87 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  1 +
 4 files changed, 87 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v3 1/2] mem: balanced allocation of hugepages
       [not found]           ` <CGME20170606062232eucas1p11d2c304a28353d32b93ddfbd134d4da9@eucas1p1.samsung.com>
@ 2017-06-06  6:22             ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  6:22 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/librte_eal/Makefile                  |  2 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 87 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  1 +
 3 files changed, 86 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..0a1af3a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,6 @@ DEPDIRS-linuxapp := common
 DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
 DEPDIRS-bsdapp := common
 
+LDLIBS += -lnuma
+
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..35e5bce 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#include <numaif.h>
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,71 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+
+	if (orig) {
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == RTE_MAX_NUMA_NODES) {
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= RTE_MAX_NUMA_NODES;
+				}
+			} else {
+				node_id = j;
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +555,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
 	return i;
 }
 
@@ -572,6 +642,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
 			}
 		}
 	}
@@ -1010,6 +1083,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1027,7 +1105,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1149,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..b208e88 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,7 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v3 2/2] config: enable vhost numa awareness by default
       [not found]           ` <CGME20170606062237eucas1p1de58fdde1bff816e480e50308804ba7a@eucas1p1.samsung.com>
@ 2017-06-06  6:22             ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  6:22 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
 # Compile vhost user library
 #
 CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
 
 #
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v4 0/2] Balanced allocation of hugepages
       [not found]           ` <CGME20170606081359eucas1p2f7eafa1abc346c5bb910c783df1d1520@eucas1p2.samsung.com>
@ 2017-06-06  8:13             ` Ilya Maximets
       [not found]               ` <CGME20170606081403eucas1p20c561b9177a51cfe58dd53b76cbfaaf7@eucas1p2.samsung.com>
                                 ` (2 more replies)
  0 siblings, 3 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  8:13 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                       |  2 +-
 lib/librte_eal/Makefile                  |  2 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  1 +
 4 files changed, 94 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v4 1/2] mem: balanced allocation of hugepages
       [not found]               ` <CGME20170606081403eucas1p20c561b9177a51cfe58dd53b76cbfaaf7@eucas1p2.samsung.com>
@ 2017-06-06  8:13                 ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  8:13 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/librte_eal/Makefile                  |  2 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  1 +
 3 files changed, 93 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index 5690bb4..0a1af3a 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -37,4 +37,6 @@ DEPDIRS-linuxapp := common
 DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
 DEPDIRS-bsdapp := common
 
+LDLIBS += -lnuma
+
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..5947434 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#include <numaif.h>
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,78 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+	bool numa_available = true;
+
+	/* Check if kernel supports NUMA. */
+	if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		numa_available = false;
+	}
+
+	if (orig && numa_available) {
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == RTE_MAX_NUMA_NODES) {
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= RTE_MAX_NUMA_NODES;
+				}
+			} else {
+				node_id = j;
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +562,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
 	return i;
 }
 
@@ -572,6 +649,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
 			}
 		}
 	}
@@ -1010,6 +1090,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1027,7 +1112,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1156,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..b208e88 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,7 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v4 2/2] config: enable vhost numa awareness by default
       [not found]               ` <CGME20170606081409eucas1p2eed4a7dc49f1028c723f8c0a7a61fadf@eucas1p2.samsung.com>
@ 2017-06-06  8:13                 ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06  8:13 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
 # Compile vhost user library
 #
 CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
 
 #
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v5 0/2] Balanced allocation of hugepages
       [not found]               ` <CGME20170606133348eucas1p1cc5c3c05f88b2101c2ea47b26e0cac24@eucas1p1.samsung.com>
@ 2017-06-06 13:33                 ` Ilya Maximets
       [not found]                   ` <CGME20170606133352eucas1p13d1e860e996057a50a084f9365189e4d@eucas1p1.samsung.com>
                                     ` (3 more replies)
  0 siblings, 4 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06 13:33 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Sorry for so frequent respinning of the series.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                       |  2 +-
 lib/librte_eal/linuxapp/eal/Makefile     |  1 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  3 +
 4 files changed, 95 insertions(+), 5 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v5 1/2] mem: balanced allocation of hugepages
       [not found]                   ` <CGME20170606133352eucas1p13d1e860e996057a50a084f9365189e4d@eucas1p1.samsung.com>
@ 2017-06-06 13:33                     ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06 13:33 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

libnuma added as a general dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/librte_eal/linuxapp/eal/Makefile     |  1 +
 lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
 mk/rte.app.mk                            |  3 +
 3 files changed, 94 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..1440fc5 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,7 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+LDLIBS += -lnuma
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 9c9baf6..5947434 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,7 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#include <numaif.h>
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -358,6 +359,19 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -366,18 +380,78 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+	bool numa_available = true;
+
+	/* Check if kernel supports NUMA. */
+	if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		numa_available = false;
+	}
+
+	if (orig && numa_available) {
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == RTE_MAX_NUMA_NODES) {
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= RTE_MAX_NUMA_NODES;
+				}
+			} else {
+				node_id = j;
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -488,6 +562,9 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+
 	return i;
 }
 
@@ -572,6 +649,9 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
 			}
 		}
 	}
@@ -1010,6 +1090,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1027,7 +1112,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1070,7 +1156,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..5f370c9 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v5 2/2] config: enable vhost numa awareness by default
       [not found]                   ` <CGME20170606133354eucas1p284ae347e9ff07d6e8ab2bc09344ad1e5@eucas1p2.samsung.com>
@ 2017-06-06 13:33                     ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-06 13:33 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Ilya Maximets

Since libnuma is added as a general dependency for EAL,
it is safe to enable LIBRTE_VHOST_NUMA by default.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index c858769..db4cc1c 100644
--- a/config/common_base
+++ b/config/common_base
@@ -708,7 +708,7 @@ CONFIG_RTE_LIBRTE_PDUMP=y
 # Compile vhost user library
 #
 CONFIG_RTE_LIBRTE_VHOST=n
-CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
 
 #
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-06 13:33                 ` [PATCH v5 0/2] Balanced allocation of hugepages Ilya Maximets
       [not found]                   ` <CGME20170606133352eucas1p13d1e860e996057a50a084f9365189e4d@eucas1p1.samsung.com>
       [not found]                   ` <CGME20170606133354eucas1p284ae347e9ff07d6e8ab2bc09344ad1e5@eucas1p2.samsung.com>
@ 2017-06-08 11:21                   ` Ilya Maximets
  2017-06-08 12:14                     ` Bruce Richardson
       [not found]                   ` <CGME20170621080434eucas1p18d3d4e4133c1cf885c849d022806408d@eucas1p1.samsung.com>
  3 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-08 11:21 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon,
	Bruce Richardson
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei

Hi everyone,

I just want to clarify current status of these patches.
As I understand, moving to the new build system (for example,
meson+ninja as proposed[1] by Bruce) is a very long process.
But we have issues with imbalanced memory allocation now, and
IMHO it's better to fix them in a near future.

Latest version (v5) of balanced allocation patches adds linbuma
as general unconditional dependency which conflicts with the
current DPDK policies.

So, there are 2 option:

	1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	   from the first version of the patch and disable it by default.

	2. Keep patch as it is now and make everyone install libnuma
	   for successful build.

I have no preferences about above options. I'm asking your opinions.

Bruce, Sergio, Thomas, what do you think?

[1] http://dpdk.org/ml/archives/dev/2017-June/067428.html

Best regards, Ilya Maximets.

On 06.06.2017 16:33, Ilya Maximets wrote:
> Sorry for so frequent respinning of the series.
> 
> Version 5:
> 	* Fixed shared build. (Automated build test will fail
> 	  anyway because libnuma-devel not installed on build servers)
> 
> Version 4:
> 	* Fixed work on systems without NUMA by adding check for NUMA
> 	  support in kernel.
> 
> Version 3:
> 	* Implemented hybrid schema for allocation.
> 	* Fixed not needed mempolicy change while remapping. (orig = 0)
> 	* Added patch to enable VHOST_NUMA by default.
> 
> Version 2:
> 	* rebased (fuzz in Makefile)
> 
> Ilya Maximets (2):
>   mem: balanced allocation of hugepages
>   config: enable vhost numa awareness by default
> 
>  config/common_base                       |  2 +-
>  lib/librte_eal/linuxapp/eal/Makefile     |  1 +
>  lib/librte_eal/linuxapp/eal/eal_memory.c | 94 ++++++++++++++++++++++++++++++--
>  mk/rte.app.mk                            |  3 +
>  4 files changed, 95 insertions(+), 5 deletions(-)
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-08 11:21                   ` [PATCH v5 0/2] Balanced allocation of hugepages Ilya Maximets
@ 2017-06-08 12:14                     ` Bruce Richardson
  2017-06-08 15:44                       ` Sergio Gonzalez Monroy
  0 siblings, 1 reply; 99+ messages in thread
From: Bruce Richardson @ 2017-06-08 12:14 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei

On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
> Hi everyone,
> 
> I just want to clarify current status of these patches.
> As I understand, moving to the new build system (for example,
> meson+ninja as proposed[1] by Bruce) is a very long process.
> But we have issues with imbalanced memory allocation now, and
> IMHO it's better to fix them in a near future.
> 
> Latest version (v5) of balanced allocation patches adds linbuma
> as general unconditional dependency which conflicts with the
> current DPDK policies.
> 
> So, there are 2 option:
> 
> 	1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> 	   from the first version of the patch and disable it by default.
> 
> 	2. Keep patch as it is now and make everyone install libnuma
> 	   for successful build.
> 
> I have no preferences about above options. I'm asking your opinions.
> 
> Bruce, Sergio, Thomas, what do you think?
> 
> [1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
> 
> Best regards, Ilya Maximets.
>
I would be ok with having libnuma as a dependency, so I think I'd prefer
option 2 to 1, assuming libnuma is available in all major Linux distros.

/Bruce

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-08 12:14                     ` Bruce Richardson
@ 2017-06-08 15:44                       ` Sergio Gonzalez Monroy
  2017-06-14  6:11                         ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-08 15:44 UTC (permalink / raw)
  To: Bruce Richardson, Ilya Maximets
  Cc: dev, David Marchand, Thomas Monjalon, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

On 08/06/2017 13:14, Bruce Richardson wrote:
> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>> Hi everyone,
>>
>> I just want to clarify current status of these patches.
>> As I understand, moving to the new build system (for example,
>> meson+ninja as proposed[1] by Bruce) is a very long process.
>> But we have issues with imbalanced memory allocation now, and
>> IMHO it's better to fix them in a near future.
>>
>> Latest version (v5) of balanced allocation patches adds linbuma
>> as general unconditional dependency which conflicts with the
>> current DPDK policies.
>>
>> So, there are 2 option:
>>
>> 	1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> 	   from the first version of the patch and disable it by default.
>>
>> 	2. Keep patch as it is now and make everyone install libnuma
>> 	   for successful build.
>>
>> I have no preferences about above options. I'm asking your opinions.
>>
>> Bruce, Sergio, Thomas, what do you think?
>>
>> [1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
>>
>> Best regards, Ilya Maximets.
>>
> I would be ok with having libnuma as a dependency, so I think I'd prefer
> option 2 to 1, assuming libnuma is available in all major Linux distros.
>
> /Bruce

+1 on option 2 (current patch and libnuma as DPDK dependency).

Sergio

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-08 15:44                       ` Sergio Gonzalez Monroy
@ 2017-06-14  6:11                         ` Ilya Maximets
  2017-06-19 11:10                           ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-14  6:11 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, Bruce Richardson
  Cc: dev, David Marchand, Thomas Monjalon, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

On 08.06.2017 18:44, Sergio Gonzalez Monroy wrote:
> On 08/06/2017 13:14, Bruce Richardson wrote:
>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>> Hi everyone,
>>>
>>> I just want to clarify current status of these patches.
>>> As I understand, moving to the new build system (for example,
>>> meson+ninja as proposed[1] by Bruce) is a very long process.
>>> But we have issues with imbalanced memory allocation now, and
>>> IMHO it's better to fix them in a near future.
>>>
>>> Latest version (v5) of balanced allocation patches adds linbuma
>>> as general unconditional dependency which conflicts with the
>>> current DPDK policies.
>>>
>>> So, there are 2 option:
>>>
>>>     1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>        from the first version of the patch and disable it by default.
>>>
>>>     2. Keep patch as it is now and make everyone install libnuma
>>>        for successful build.
>>>
>>> I have no preferences about above options. I'm asking your opinions.
>>>
>>> Bruce, Sergio, Thomas, what do you think?
>>>
>>> [1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
>>>
>>> Best regards, Ilya Maximets.
>>>
>> I would be ok with having libnuma as a dependency, so I think I'd prefer
>> option 2 to 1, assuming libnuma is available in all major Linux distros.
>>
>> /Bruce
> 
> +1 on option 2 (current patch and libnuma as DPDK dependency).
> 
> Sergio
> 

Ok. In this case I'm waiting for review.

And someone need to install libnuma development package in automatic
build test environment. Otherwise there will be constant compilation
test failures like this:
	http://dpdk.org/ml/archives/test-report/2017-June/021437.html

Best regards, Ilya Maximets.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-14  6:11                         ` Ilya Maximets
@ 2017-06-19 11:10                           ` Hemant Agrawal
  2017-06-20 13:07                             ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-19 11:10 UTC (permalink / raw)
  To: Ilya Maximets, Sergio Gonzalez Monroy, Bruce Richardson
  Cc: dev, David Marchand, Thomas Monjalon, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

On 6/14/2017 11:41 AM, Ilya Maximets wrote:
> On 08.06.2017 18:44, Sergio Gonzalez Monroy wrote:
>> On 08/06/2017 13:14, Bruce Richardson wrote:
>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>> Hi everyone,
>>>>
>>>> I just want to clarify current status of these patches.
>>>> As I understand, moving to the new build system (for example,
>>>> meson+ninja as proposed[1] by Bruce) is a very long process.
>>>> But we have issues with imbalanced memory allocation now, and
>>>> IMHO it's better to fix them in a near future.
>>>>
>>>> Latest version (v5) of balanced allocation patches adds linbuma
>>>> as general unconditional dependency which conflicts with the
>>>> current DPDK policies.
>>>>
>>>> So, there are 2 option:
>>>>
>>>>     1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>        from the first version of the patch and disable it by default.
>>>>
>>>>     2. Keep patch as it is now and make everyone install libnuma
>>>>        for successful build.
>>>>
>>>> I have no preferences about above options. I'm asking your opinions.
>>>>
>>>> Bruce, Sergio, Thomas, what do you think?
>>>>
>>>> [1] http://dpdk.org/ml/archives/dev/2017-June/067428.html
>>>>
>>>> Best regards, Ilya Maximets.
>>>>
>>> I would be ok with having libnuma as a dependency, so I think I'd prefer
>>> option 2 to 1, assuming libnuma is available in all major Linux distros.
>>>
>>> /Bruce
>>
>> +1 on option 2 (current patch and libnuma as DPDK dependency).
>>
>> Sergio
>>
>
> Ok. In this case I'm waiting for review.
>
> And someone need to install libnuma development package in automatic
> build test environment. Otherwise there will be constant compilation
> test failures like this:
> 	http://dpdk.org/ml/archives/test-report/2017-June/021437.html
>
> Best regards, Ilya Maximets.
>
+1 for option 1
It will be a issue and undesired dependency for SoCs, not supporting 
NUMA architecture.

It can be added to the config, who desired to use it by default.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-19 11:10                           ` Hemant Agrawal
@ 2017-06-20 13:07                             ` Thomas Monjalon
  2017-06-20 13:58                               ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-20 13:07 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, Hemant Agrawal, Sergio Gonzalez Monroy, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

19/06/2017 13:10, Hemant Agrawal:
> >>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
> >>>> So, there are 2 option:
> >>>>
> >>>>     1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>        from the first version of the patch and disable it by default.
> >>>>
> >>>>     2. Keep patch as it is now and make everyone install libnuma
> >>>>        for successful build.
> >
> +1 for option 1
> It will be a issue and undesired dependency for SoCs, not supporting 
> NUMA architecture.
> 
> It can be added to the config, who desired to use it by default.

Yes I agree, it cannot be a dependency for architectures which
do not support NUMA.
Please can we rework the patch so that only one node is assumed
if NUMA is disabled for the architecture?

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 13:07                             ` Thomas Monjalon
@ 2017-06-20 13:58                               ` Ilya Maximets
  2017-06-20 14:35                                 ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-20 13:58 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, Hemant Agrawal, Sergio Gonzalez Monroy, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

On 20.06.2017 16:07, Thomas Monjalon wrote:
> 19/06/2017 13:10, Hemant Agrawal:
>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>> So, there are 2 option:
>>>>>>
>>>>>>     1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>        from the first version of the patch and disable it by default.
>>>>>>
>>>>>>     2. Keep patch as it is now and make everyone install libnuma
>>>>>>        for successful build.
>>>
>> +1 for option 1
>> It will be a issue and undesired dependency for SoCs, not supporting 
>> NUMA architecture.
>>
>> It can be added to the config, who desired to use it by default.
> 
> Yes I agree, it cannot be a dependency for architectures which
> do not support NUMA.
> Please can we rework the patch so that only one node is assumed
> if NUMA is disabled for the architecture?

We're still don't have dynamic build time configuration system.
To make get/set_mempolicy work we need to include <numaif.h>
and have libnuma for successful linkage.
This means that the only option to not have libnuma as dependency
is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
as it was in the first version of the patch.

There is, actually, the third option (besides 2 already described):

	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	   from the first version of the patch and *enable* it by default.
	   In this case anyone who doesn't want to have libnuma as dependency
	   will be able to disable the config option manually.

Thomas, what do you think? Bruce? Sergio?

P.S. We're always able to implement syscall wrappers by hands without any
     external dependencies, but I don't think it's a good decision.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 13:58                               ` Ilya Maximets
@ 2017-06-20 14:35                                 ` Thomas Monjalon
  2017-06-20 14:58                                   ` Sergio Gonzalez Monroy
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-20 14:35 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, Hemant Agrawal, Sergio Gonzalez Monroy, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

20/06/2017 15:58, Ilya Maximets:
> On 20.06.2017 16:07, Thomas Monjalon wrote:
> > 19/06/2017 13:10, Hemant Agrawal:
> >>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
> >>>>>> So, there are 2 option:
> >>>>>>
> >>>>>>     1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>>>        from the first version of the patch and disable it by default.
> >>>>>>
> >>>>>>     2. Keep patch as it is now and make everyone install libnuma
> >>>>>>        for successful build.
> >>>
> >> +1 for option 1
> >> It will be a issue and undesired dependency for SoCs, not supporting 
> >> NUMA architecture.
> >>
> >> It can be added to the config, who desired to use it by default.
> > 
> > Yes I agree, it cannot be a dependency for architectures which
> > do not support NUMA.
> > Please can we rework the patch so that only one node is assumed
> > if NUMA is disabled for the architecture?
> 
> We're still don't have dynamic build time configuration system.
> To make get/set_mempolicy work we need to include <numaif.h>
> and have libnuma for successful linkage.
> This means that the only option to not have libnuma as dependency
> is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> as it was in the first version of the patch.
> 
> There is, actually, the third option (besides 2 already described):
> 
> 	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> 	   from the first version of the patch and *enable* it by default.
> 	   In this case anyone who doesn't want to have libnuma as dependency
> 	   will be able to disable the config option manually.
> 
> Thomas, what do you think? Bruce? Sergio?

It should be enabled on x86 and ppc, and disabled in other
default configurations (ARM for now).

> P.S. We're always able to implement syscall wrappers by hands without any
>      external dependencies, but I don't think it's a good decision.

I agree to use libnuma instead of re-inventing the wheel.
Let's just make it optional at build time and fallback on one node
if disabled.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 14:35                                 ` Thomas Monjalon
@ 2017-06-20 14:58                                   ` Sergio Gonzalez Monroy
  2017-06-20 15:41                                     ` Jerin Jacob
  0 siblings, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-20 14:58 UTC (permalink / raw)
  To: Thomas Monjalon, Ilya Maximets
  Cc: dev, Hemant Agrawal, Bruce Richardson, David Marchand,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei

On 20/06/2017 15:35, Thomas Monjalon wrote:
> 20/06/2017 15:58, Ilya Maximets:
>> On 20.06.2017 16:07, Thomas Monjalon wrote:
>>> 19/06/2017 13:10, Hemant Agrawal:
>>>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>>>> So, there are 2 option:
>>>>>>>>
>>>>>>>>      1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>         from the first version of the patch and disable it by default.
>>>>>>>>
>>>>>>>>      2. Keep patch as it is now and make everyone install libnuma
>>>>>>>>         for successful build.
>>>> +1 for option 1
>>>> It will be a issue and undesired dependency for SoCs, not supporting
>>>> NUMA architecture.
>>>>
>>>> It can be added to the config, who desired to use it by default.
>>> Yes I agree, it cannot be a dependency for architectures which
>>> do not support NUMA.
>>> Please can we rework the patch so that only one node is assumed
>>> if NUMA is disabled for the architecture?

Ilya, I missed that libnuma is not supported on ARM.

>> We're still don't have dynamic build time configuration system.
>> To make get/set_mempolicy work we need to include <numaif.h>
>> and have libnuma for successful linkage.
>> This means that the only option to not have libnuma as dependency
>> is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> as it was in the first version of the patch.
>>
>> There is, actually, the third option (besides 2 already described):
>>
>> 	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> 	   from the first version of the patch and *enable* it by default.
>> 	   In this case anyone who doesn't want to have libnuma as dependency
>> 	   will be able to disable the config option manually.
>>
>> Thomas, what do you think? Bruce? Sergio?
> It should be enabled on x86 and ppc, and disabled in other
> default configurations (ARM for now).

Agree.

>> P.S. We're always able to implement syscall wrappers by hands without any
>>       external dependencies, but I don't think it's a good decision.
> I agree to use libnuma instead of re-inventing the wheel.
> Let's just make it optional at build time and fallback on one node
> if disabled.

That is the simple way out.

Sergio

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 14:58                                   ` Sergio Gonzalez Monroy
@ 2017-06-20 15:41                                     ` Jerin Jacob
  2017-06-20 15:51                                       ` Sergio Gonzalez Monroy
  2017-06-21  8:14                                       ` Hemant Agrawal
  0 siblings, 2 replies; 99+ messages in thread
From: Jerin Jacob @ 2017-06-20 15:41 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy
  Cc: Thomas Monjalon, Ilya Maximets, dev, Hemant Agrawal,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

-----Original Message-----
> Date: Tue, 20 Jun 2017 15:58:50 +0100
> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>  <i.maximets@samsung.com>
> CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce Richardson
>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>  Thunderbird/45.1.1
> 
> On 20/06/2017 15:35, Thomas Monjalon wrote:
> > 20/06/2017 15:58, Ilya Maximets:
> > > On 20.06.2017 16:07, Thomas Monjalon wrote:
> > > > 19/06/2017 13:10, Hemant Agrawal:
> > > > > > > > On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
> > > > > > > > > So, there are 2 option:
> > > > > > > > > 
> > > > > > > > >      1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > > > > > > >         from the first version of the patch and disable it by default.
> > > > > > > > > 
> > > > > > > > >      2. Keep patch as it is now and make everyone install libnuma
> > > > > > > > >         for successful build.
> > > > > +1 for option 1
> > > > > It will be a issue and undesired dependency for SoCs, not supporting
> > > > > NUMA architecture.
> > > > > 
> > > > > It can be added to the config, who desired to use it by default.
> > > > Yes I agree, it cannot be a dependency for architectures which
> > > > do not support NUMA.
> > > > Please can we rework the patch so that only one node is assumed
> > > > if NUMA is disabled for the architecture?
> 
> Ilya, I missed that libnuma is not supported on ARM.

It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.

[dpdk.org] $ dpkg-query -L libnuma-dev
/.
/usr
/usr/lib
/usr/lib/aarch64-linux-gnu
/usr/lib/aarch64-linux-gnu/libnuma.a
/usr/share
/usr/share/man
/usr/share/man/man3
/usr/share/man/man3/numa.3.gz
/usr/share/doc
/usr/share/doc/libnuma-dev
/usr/share/doc/libnuma-dev/copyright
/usr/include
/usr/include/numaif.h
/usr/include/numa.h
/usr/include/numacompat1.h
/usr/lib/aarch64-linux-gnu/libnuma.so


> 
> > > We're still don't have dynamic build time configuration system.
> > > To make get/set_mempolicy work we need to include <numaif.h>
> > > and have libnuma for successful linkage.
> > > This means that the only option to not have libnuma as dependency
> > > is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > as it was in the first version of the patch.
> > > 
> > > There is, actually, the third option (besides 2 already described):
> > > 
> > > 	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > 	   from the first version of the patch and *enable* it by default.
> > > 	   In this case anyone who doesn't want to have libnuma as dependency
> > > 	   will be able to disable the config option manually.
> > > 
> > > Thomas, what do you think? Bruce? Sergio?
> > It should be enabled on x86 and ppc, and disabled in other
> > default configurations (ARM for now).
> 
> Agree.
> 
> > > P.S. We're always able to implement syscall wrappers by hands without any
> > >       external dependencies, but I don't think it's a good decision.
> > I agree to use libnuma instead of re-inventing the wheel.
> > Let's just make it optional at build time and fallback on one node
> > if disabled.
> 
> That is the simple way out.
> 
> Sergio

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 15:41                                     ` Jerin Jacob
@ 2017-06-20 15:51                                       ` Sergio Gonzalez Monroy
  2017-06-21  8:14                                       ` Hemant Agrawal
  1 sibling, 0 replies; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-20 15:51 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ilya Maximets, dev, Hemant Agrawal,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

On 20/06/2017 16:41, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Tue, 20 Jun 2017 15:58:50 +0100
>> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>> To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>>   <i.maximets@samsung.com>
>> CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce Richardson
>>   <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>>   Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>>   Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>   <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
>> Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
>> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>>   Thunderbird/45.1.1
>>
>> On 20/06/2017 15:35, Thomas Monjalon wrote:
>>> 20/06/2017 15:58, Ilya Maximets:
>>>> On 20.06.2017 16:07, Thomas Monjalon wrote:
>>>>> 19/06/2017 13:10, Hemant Agrawal:
>>>>>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>>>>>> So, there are 2 option:
>>>>>>>>>>
>>>>>>>>>>       1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>>          from the first version of the patch and disable it by default.
>>>>>>>>>>
>>>>>>>>>>       2. Keep patch as it is now and make everyone install libnuma
>>>>>>>>>>          for successful build.
>>>>>> +1 for option 1
>>>>>> It will be a issue and undesired dependency for SoCs, not supporting
>>>>>> NUMA architecture.
>>>>>>
>>>>>> It can be added to the config, who desired to use it by default.
>>>>> Yes I agree, it cannot be a dependency for architectures which
>>>>> do not support NUMA.
>>>>> Please can we rework the patch so that only one node is assumed
>>>>> if NUMA is disabled for the architecture?
>> Ilya, I missed that libnuma is not supported on ARM.
> It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
>
> [dpdk.org] $ dpkg-query -L libnuma-dev
> /.
> /usr
> /usr/lib
> /usr/lib/aarch64-linux-gnu
> /usr/lib/aarch64-linux-gnu/libnuma.a
> /usr/share
> /usr/share/man
> /usr/share/man/man3
> /usr/share/man/man3/numa.3.gz
> /usr/share/doc
> /usr/share/doc/libnuma-dev
> /usr/share/doc/libnuma-dev/copyright
> /usr/include
> /usr/include/numaif.h
> /usr/include/numa.h
> /usr/include/numacompat1.h
> /usr/lib/aarch64-linux-gnu/libnuma.so
>

Is it ARMv7 then the only supported arch missing libnuma support?

>>>> We're still don't have dynamic build time configuration system.
>>>> To make get/set_mempolicy work we need to include <numaif.h>
>>>> and have libnuma for successful linkage.
>>>> This means that the only option to not have libnuma as dependency
>>>> is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> as it was in the first version of the patch.
>>>>
>>>> There is, actually, the third option (besides 2 already described):
>>>>
>>>> 	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> 	   from the first version of the patch and *enable* it by default.
>>>> 	   In this case anyone who doesn't want to have libnuma as dependency
>>>> 	   will be able to disable the config option manually.
>>>>
>>>> Thomas, what do you think? Bruce? Sergio?
>>> It should be enabled on x86 and ppc, and disabled in other
>>> default configurations (ARM for now).
>> Agree.
>>
>>>> P.S. We're always able to implement syscall wrappers by hands without any
>>>>        external dependencies, but I don't think it's a good decision.
>>> I agree to use libnuma instead of re-inventing the wheel.
>>> Let's just make it optional at build time and fallback on one node
>>> if disabled.
>> That is the simple way out.
>>
>> Sergio

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v6 0/2] Balanced allocation of hugepages
       [not found]                   ` <CGME20170621080434eucas1p18d3d4e4133c1cf885c849d022806408d@eucas1p1.samsung.com>
@ 2017-06-21  8:04                     ` Ilya Maximets
       [not found]                       ` <CGME20170621080441eucas1p2dc01b29e7c8e4c1546ace6cd76ae51ff@eucas1p2.samsung.com>
                                         ` (2 more replies)
  0 siblings, 3 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21  8:04 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Version 6:
	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	  returned. Enabled by default for x86, ppc and thunderx.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                           |   1 +
 config/common_linuxapp                       |   3 +
 config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
 config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
 lib/librte_eal/linuxapp/eal/Makefile         |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
 mk/rte.app.mk                                |   3 +
 8 files changed, 123 insertions(+), 4 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v6 1/2] mem: balanced allocation of hugepages
       [not found]                       ` <CGME20170621080441eucas1p2dc01b29e7c8e4c1546ace6cd76ae51ff@eucas1p2.samsung.com>
@ 2017-06-21  8:04                         ` Ilya Maximets
  2017-06-21  8:51                           ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21  8:04 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp on x86, ppc and thunderx.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base                           |   1 +
 config/common_linuxapp                       |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc     |   3 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   |   3 +
 config/defconfig_arm64-thunderx-linuxapp-gcc |   3 +
 lib/librte_eal/linuxapp/eal/Makefile         |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
 mk/rte.app.mk                                |   3 +
 8 files changed, 119 insertions(+), 4 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..b9efdf2 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..5eb568b 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,8 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..5c5226a 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f32766..d9667d3 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 # to address minimum DMA alignment across all arm64 implementations.
 CONFIG_RTE_CACHE_LINE_SIZE=128
 
+# Most ARMv8 systems doesn't support NUMA
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 CONFIG_RTE_EAL_IGB_UIO=n
 
 CONFIG_RTE_LIBRTE_FM10K_PMD=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4c..e486c1d 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -37,6 +37,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
 CONFIG_RTE_MAX_NUMA_NODES=2
 CONFIG_RTE_MAX_LCORE=96
 
+# ThunderX supports NUMA
+CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 #
 # Compile PMD for octeontx sso event device
 #
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..bd10489 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..9a0087c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,9 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +351,21 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +374,82 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+	bool numa_available = true;
+
+	/* Check if kernel supports NUMA. */
+	if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		numa_available = false;
+	}
+
+	if (orig && numa_available) {
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == RTE_MAX_NUMA_NODES) {
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= RTE_MAX_NUMA_NODES;
+				}
+			} else {
+				node_id = j;
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -478,6 +560,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
 	return i;
 }
 
@@ -562,6 +648,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1091,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1113,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1157,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..cfc743a 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v6 2/2] config: enable vhost numa awareness by default
       [not found]                       ` <CGME20170621080448eucas1p28951fac6e4910cc599fe88d7edac9734@eucas1p2.samsung.com>
@ 2017-06-21  8:04                         ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21  8:04 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_linuxapp                       | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc     | 1 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   | 1 +
 config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
 4 files changed, 4 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 5eb568b..d2658a2 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -36,6 +36,7 @@ CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
 CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 5c5226a..cef6789 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index d9667d3..069e4ed 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
 
 # Most ARMv8 systems doesn't support NUMA
 CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 CONFIG_RTE_EAL_IGB_UIO=n
 
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index e486c1d..e54845c 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96
 
 # ThunderX supports NUMA
 CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 
 #
 # Compile PMD for octeontx sso event device
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-20 15:41                                     ` Jerin Jacob
  2017-06-20 15:51                                       ` Sergio Gonzalez Monroy
@ 2017-06-21  8:14                                       ` Hemant Agrawal
  2017-06-21  8:25                                         ` Sergio Gonzalez Monroy
  1 sibling, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-21  8:14 UTC (permalink / raw)
  To: Jerin Jacob, Sergio Gonzalez Monroy
  Cc: Thomas Monjalon, Ilya Maximets, dev, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

On 6/20/2017 9:11 PM, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Tue, 20 Jun 2017 15:58:50 +0100
>> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>> To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>>  <i.maximets@samsung.com>
>> CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce Richardson
>>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
>> Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
>> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>>  Thunderbird/45.1.1
>>
>> On 20/06/2017 15:35, Thomas Monjalon wrote:
>>> 20/06/2017 15:58, Ilya Maximets:
>>>> On 20.06.2017 16:07, Thomas Monjalon wrote:
>>>>> 19/06/2017 13:10, Hemant Agrawal:
>>>>>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>>>>>> So, there are 2 option:
>>>>>>>>>>
>>>>>>>>>>      1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>>         from the first version of the patch and disable it by default.
>>>>>>>>>>
>>>>>>>>>>      2. Keep patch as it is now and make everyone install libnuma
>>>>>>>>>>         for successful build.
>>>>>> +1 for option 1
>>>>>> It will be a issue and undesired dependency for SoCs, not supporting
>>>>>> NUMA architecture.
>>>>>>
>>>>>> It can be added to the config, who desired to use it by default.
>>>>> Yes I agree, it cannot be a dependency for architectures which
>>>>> do not support NUMA.
>>>>> Please can we rework the patch so that only one node is assumed
>>>>> if NUMA is disabled for the architecture?
>>
>> Ilya, I missed that libnuma is not supported on ARM.
>
> It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
>
> [dpdk.org] $ dpkg-query -L libnuma-dev
> /.
> /usr
> /usr/lib
> /usr/lib/aarch64-linux-gnu
> /usr/lib/aarch64-linux-gnu/libnuma.a
> /usr/share
> /usr/share/man
> /usr/share/man/man3
> /usr/share/man/man3/numa.3.gz
> /usr/share/doc
> /usr/share/doc/libnuma-dev
> /usr/share/doc/libnuma-dev/copyright
> /usr/include
> /usr/include/numaif.h
> /usr/include/numa.h
> /usr/include/numacompat1.h
> /usr/lib/aarch64-linux-gnu/libnuma.so
>

1. There are many machines (arm/ppc), which do not support NUMA.

https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA

2. I could not locate it by default in Linaro toolchains.

3.  Since this is not a common across all platform. This option should 
not be added to the common_base or common configs. It can be added to 
any architecture configuration, which needs it.

Regards,
Hemant

>
>>
>>>> We're still don't have dynamic build time configuration system.
>>>> To make get/set_mempolicy work we need to include <numaif.h>
>>>> and have libnuma for successful linkage.
>>>> This means that the only option to not have libnuma as dependency
>>>> is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> as it was in the first version of the patch.
>>>>
>>>> There is, actually, the third option (besides 2 already described):
>>>>
>>>> 	3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>> 	   from the first version of the patch and *enable* it by default.
>>>> 	   In this case anyone who doesn't want to have libnuma as dependency
>>>> 	   will be able to disable the config option manually.
>>>>
>>>> Thomas, what do you think? Bruce? Sergio?
>>> It should be enabled on x86 and ppc, and disabled in other
>>> default configurations (ARM for now).
>>
>> Agree.
>>
>>>> P.S. We're always able to implement syscall wrappers by hands without any
>>>>       external dependencies, but I don't think it's a good decision.
>>> I agree to use libnuma instead of re-inventing the wheel.
>>> Let's just make it optional at build time and fallback on one node
>>> if disabled.
>>
>> That is the simple way out.
>>
>> Sergio
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  8:14                                       ` Hemant Agrawal
@ 2017-06-21  8:25                                         ` Sergio Gonzalez Monroy
  2017-06-21  8:36                                           ` Ilya Maximets
  2017-06-21  8:41                                           ` Jerin Jacob
  0 siblings, 2 replies; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-21  8:25 UTC (permalink / raw)
  To: Hemant Agrawal, Jerin Jacob
  Cc: Thomas Monjalon, Ilya Maximets, dev, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

On 21/06/2017 09:14, Hemant Agrawal wrote:
> On 6/20/2017 9:11 PM, Jerin Jacob wrote:
>> -----Original Message-----
>>> Date: Tue, 20 Jun 2017 15:58:50 +0100
>>> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>> To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>>>  <i.maximets@samsung.com>
>>> CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce 
>>> Richardson
>>>  <bruce.richardson@intel.com>, David Marchand 
>>> <david.marchand@6wind.com>,
>>>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu 
>>> <yliu@fridaylinux.org>,
>>>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
>>> Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
>>> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>>>  Thunderbird/45.1.1
>>>
>>> On 20/06/2017 15:35, Thomas Monjalon wrote:
>>>> 20/06/2017 15:58, Ilya Maximets:
>>>>> On 20.06.2017 16:07, Thomas Monjalon wrote:
>>>>>> 19/06/2017 13:10, Hemant Agrawal:
>>>>>>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>>>>>>> So, there are 2 option:
>>>>>>>>>>>
>>>>>>>>>>>      1. Return back config option 
>>>>>>>>>>> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>>>         from the first version of the patch and disable it 
>>>>>>>>>>> by default.
>>>>>>>>>>>
>>>>>>>>>>>      2. Keep patch as it is now and make everyone install 
>>>>>>>>>>> libnuma
>>>>>>>>>>>         for successful build.
>>>>>>> +1 for option 1
>>>>>>> It will be a issue and undesired dependency for SoCs, not 
>>>>>>> supporting
>>>>>>> NUMA architecture.
>>>>>>>
>>>>>>> It can be added to the config, who desired to use it by default.
>>>>>> Yes I agree, it cannot be a dependency for architectures which
>>>>>> do not support NUMA.
>>>>>> Please can we rework the patch so that only one node is assumed
>>>>>> if NUMA is disabled for the architecture?
>>>
>>> Ilya, I missed that libnuma is not supported on ARM.
>>
>> It is supported on arm64 and arm64 has NUMA machines(thunderx, 
>> thunderx2) too.
>>
>> [dpdk.org] $ dpkg-query -L libnuma-dev
>> /.
>> /usr
>> /usr/lib
>> /usr/lib/aarch64-linux-gnu
>> /usr/lib/aarch64-linux-gnu/libnuma.a
>> /usr/share
>> /usr/share/man
>> /usr/share/man/man3
>> /usr/share/man/man3/numa.3.gz
>> /usr/share/doc
>> /usr/share/doc/libnuma-dev
>> /usr/share/doc/libnuma-dev/copyright
>> /usr/include
>> /usr/include/numaif.h
>> /usr/include/numa.h
>> /usr/include/numacompat1.h
>> /usr/lib/aarch64-linux-gnu/libnuma.so
>>
>
> 1. There are many machines (arm/ppc), which do not support NUMA.
>
> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
>

I did find that link too, last modified 4 years ago.
Despite that, I could not find any ARM references in libnuma sources, 
but Jerin proved that there is support for it.

http://oss.sgi.com/projects/libnuma/
https://github.com/numactl/numactl

> 2. I could not locate it by default in Linaro toolchains.
>
> 3.  Since this is not a common across all platform. This option should 
> not be added to the common_base or common configs. It can be added to 
> any architecture configuration, which needs it.
>

So is it thunderx the only arm64 to enable this feature by default?
I thought the dependency was the libnuma library support itself.

Thanks,
Sergio

> Regards,
> Hemant
>
>>
>>>
>>>>> We're still don't have dynamic build time configuration system.
>>>>> To make get/set_mempolicy work we need to include <numaif.h>
>>>>> and have libnuma for successful linkage.
>>>>> This means that the only option to not have libnuma as dependency
>>>>> is to return back configuration option 
>>>>> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>> as it was in the first version of the patch.
>>>>>
>>>>> There is, actually, the third option (besides 2 already described):
>>>>>
>>>>>     3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>        from the first version of the patch and *enable* it by 
>>>>> default.
>>>>>        In this case anyone who doesn't want to have libnuma as 
>>>>> dependency
>>>>>        will be able to disable the config option manually.
>>>>>
>>>>> Thomas, what do you think? Bruce? Sergio?
>>>> It should be enabled on x86 and ppc, and disabled in other
>>>> default configurations (ARM for now).
>>>
>>> Agree.
>>>
>>>>> P.S. We're always able to implement syscall wrappers by hands 
>>>>> without any
>>>>>       external dependencies, but I don't think it's a good decision.
>>>> I agree to use libnuma instead of re-inventing the wheel.
>>>> Let's just make it optional at build time and fallback on one node
>>>> if disabled.
>>>
>>> That is the simple way out.
>>>
>>> Sergio
>>
>
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  8:25                                         ` Sergio Gonzalez Monroy
@ 2017-06-21  8:36                                           ` Ilya Maximets
  2017-06-21  8:41                                           ` Jerin Jacob
  1 sibling, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21  8:36 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, Hemant Agrawal, Jerin Jacob
  Cc: Thomas Monjalon, dev, Bruce Richardson, David Marchand,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei

On 21.06.2017 11:25, Sergio Gonzalez Monroy wrote:
> On 21/06/2017 09:14, Hemant Agrawal wrote:
>> On 6/20/2017 9:11 PM, Jerin Jacob wrote:
>>> -----Original Message-----
>>>> Date: Tue, 20 Jun 2017 15:58:50 +0100
>>>> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
>>>> To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>>>>  <i.maximets@samsung.com>
>>>> CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce Richardson
>>>>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>>>>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>>>>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>>>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
>>>> Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
>>>> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>>>>  Thunderbird/45.1.1
>>>>
>>>> On 20/06/2017 15:35, Thomas Monjalon wrote:
>>>>> 20/06/2017 15:58, Ilya Maximets:
>>>>>> On 20.06.2017 16:07, Thomas Monjalon wrote:
>>>>>>> 19/06/2017 13:10, Hemant Agrawal:
>>>>>>>>>>> On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
>>>>>>>>>>>> So, there are 2 option:
>>>>>>>>>>>>
>>>>>>>>>>>>      1. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>>>>>         from the first version of the patch and disable it by default.
>>>>>>>>>>>>
>>>>>>>>>>>>      2. Keep patch as it is now and make everyone install libnuma
>>>>>>>>>>>>         for successful build.
>>>>>>>> +1 for option 1
>>>>>>>> It will be a issue and undesired dependency for SoCs, not supporting
>>>>>>>> NUMA architecture.
>>>>>>>>
>>>>>>>> It can be added to the config, who desired to use it by default.
>>>>>>> Yes I agree, it cannot be a dependency for architectures which
>>>>>>> do not support NUMA.
>>>>>>> Please can we rework the patch so that only one node is assumed
>>>>>>> if NUMA is disabled for the architecture?
>>>>
>>>> Ilya, I missed that libnuma is not supported on ARM.
>>>
>>> It is supported on arm64 and arm64 has NUMA machines(thunderx, thunderx2) too.
>>>
>>> [dpdk.org] $ dpkg-query -L libnuma-dev
>>> /.
>>> /usr
>>> /usr/lib
>>> /usr/lib/aarch64-linux-gnu
>>> /usr/lib/aarch64-linux-gnu/libnuma.a
>>> /usr/share
>>> /usr/share/man
>>> /usr/share/man/man3
>>> /usr/share/man/man3/numa.3.gz
>>> /usr/share/doc
>>> /usr/share/doc/libnuma-dev
>>> /usr/share/doc/libnuma-dev/copyright
>>> /usr/include
>>> /usr/include/numaif.h
>>> /usr/include/numa.h
>>> /usr/include/numacompat1.h
>>> /usr/lib/aarch64-linux-gnu/libnuma.so
>>>
>>
>> 1. There are many machines (arm/ppc), which do not support NUMA.
>>
>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
>>
> 
> I did find that link too, last modified 4 years ago.
> Despite that, I could not find any ARM references in libnuma sources, but Jerin proved that there is support for it.
> 
> http://oss.sgi.com/projects/libnuma/
> https://github.com/numactl/numactl
> 
>> 2. I could not locate it by default in Linaro toolchains.
>>
>> 3.  Since this is not a common across all platform. This option should not be added to the common_base or common configs. It can be added to any architecture configuration, which needs it.
>>
> 
> So is it thunderx the only arm64 to enable this feature by default?
> I thought the dependency was the libnuma library support itself.

ARMv7 is the only architecture without libnuma package in common distros.
So, in v6 I enabled this feature by default for x86, ppc and thunderx.
I didn't enable it for the whole ARMv8 just because thunderx is the only
platform which supports NUMA and has special defconfig in DPDK repository.

Best regards, Ilya Maximets.

>>>>
>>>>>> We're still don't have dynamic build time configuration system.
>>>>>> To make get/set_mempolicy work we need to include <numaif.h>
>>>>>> and have libnuma for successful linkage.
>>>>>> This means that the only option to not have libnuma as dependency
>>>>>> is to return back configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>> as it was in the first version of the patch.
>>>>>>
>>>>>> There is, actually, the third option (besides 2 already described):
>>>>>>
>>>>>>     3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>        from the first version of the patch and *enable* it by default.
>>>>>>        In this case anyone who doesn't want to have libnuma as dependency
>>>>>>        will be able to disable the config option manually.
>>>>>>
>>>>>> Thomas, what do you think? Bruce? Sergio?
>>>>> It should be enabled on x86 and ppc, and disabled in other
>>>>> default configurations (ARM for now).
>>>>
>>>> Agree.
>>>>
>>>>>> P.S. We're always able to implement syscall wrappers by hands without any
>>>>>>       external dependencies, but I don't think it's a good decision.
>>>>> I agree to use libnuma instead of re-inventing the wheel.
>>>>> Let's just make it optional at build time and fallback on one node
>>>>> if disabled.
>>>>
>>>> That is the simple way out.
>>>>
>>>> Sergio
>>>
>>
>>
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  8:25                                         ` Sergio Gonzalez Monroy
  2017-06-21  8:36                                           ` Ilya Maximets
@ 2017-06-21  8:41                                           ` Jerin Jacob
  2017-06-21  8:49                                             ` Thomas Monjalon
  1 sibling, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-21  8:41 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy
  Cc: Hemant Agrawal, Thomas Monjalon, Ilya Maximets, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

-----Original Message-----
> Date: Wed, 21 Jun 2017 09:25:25 +0100
> From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> To: Hemant Agrawal <hemant.agrawal@nxp.com>, Jerin Jacob
>  <jerin.jacob@caviumnetworks.com>
> CC: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>  <i.maximets@samsung.com>, dev@dpdk.org, Bruce Richardson
>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
>  Thunderbird/45.1.1
> 
> On 21/06/2017 09:14, Hemant Agrawal wrote:
> > On 6/20/2017 9:11 PM, Jerin Jacob wrote:
> > > -----Original Message-----
> > > > Date: Tue, 20 Jun 2017 15:58:50 +0100
> > > > From: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> > > > To: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
> > > >  <i.maximets@samsung.com>
> > > > CC: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>, Bruce
> > > > Richardson
> > > >  <bruce.richardson@intel.com>, David Marchand
> > > > <david.marchand@6wind.com>,
> > > >  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu
> > > > <yliu@fridaylinux.org>,
> > > >  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
> > > >  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
> > > > Subject: Re: [dpdk-dev] [PATCH v5 0/2] Balanced allocation of hugepages
> > > > User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; rv:45.0) Gecko/20100101
> > > >  Thunderbird/45.1.1
> > > > 
> > > > On 20/06/2017 15:35, Thomas Monjalon wrote:
> > > > > 20/06/2017 15:58, Ilya Maximets:
> > > > > > On 20.06.2017 16:07, Thomas Monjalon wrote:
> > > > > > > 19/06/2017 13:10, Hemant Agrawal:
> > > > > > > > > > > On Thu, Jun 08, 2017 at 02:21:58PM +0300, Ilya Maximets wrote:
> > > > > > > > > > > > So, there are 2 option:
> > > > > > > > > > > > 
> > > > > > > > > > > >      1. Return back config
> > > > > > > > > > > > option
> > > > > > > > > > > > RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > > > > > > > > > >         from the first version
> > > > > > > > > > > > of the patch and disable it by
> > > > > > > > > > > > default.
> > > > > > > > > > > > 
> > > > > > > > > > > >      2. Keep patch as it is now
> > > > > > > > > > > > and make everyone install
> > > > > > > > > > > > libnuma
> > > > > > > > > > > >         for successful build.
> > > > > > > > +1 for option 1
> > > > > > > > It will be a issue and undesired dependency for
> > > > > > > > SoCs, not supporting
> > > > > > > > NUMA architecture.
> > > > > > > > 
> > > > > > > > It can be added to the config, who desired to use it by default.
> > > > > > > Yes I agree, it cannot be a dependency for architectures which
> > > > > > > do not support NUMA.
> > > > > > > Please can we rework the patch so that only one node is assumed
> > > > > > > if NUMA is disabled for the architecture?
> > > > 
> > > > Ilya, I missed that libnuma is not supported on ARM.
> > > 
> > > It is supported on arm64 and arm64 has NUMA machines(thunderx,
> > > thunderx2) too.
> > > 
> > > [dpdk.org] $ dpkg-query -L libnuma-dev
> > > /.
> > > /usr
> > > /usr/lib
> > > /usr/lib/aarch64-linux-gnu
> > > /usr/lib/aarch64-linux-gnu/libnuma.a
> > > /usr/share
> > > /usr/share/man
> > > /usr/share/man/man3
> > > /usr/share/man/man3/numa.3.gz
> > > /usr/share/doc
> > > /usr/share/doc/libnuma-dev
> > > /usr/share/doc/libnuma-dev/copyright
> > > /usr/include
> > > /usr/include/numaif.h
> > > /usr/include/numa.h
> > > /usr/include/numacompat1.h
> > > /usr/lib/aarch64-linux-gnu/libnuma.so
> > > 
> > 
> > 1. There are many machines (arm/ppc), which do not support NUMA.
> > 
> > https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > 
> 
> I did find that link too, last modified 4 years ago.
> Despite that, I could not find any ARM references in libnuma sources, but
> Jerin proved that there is support for it.
> 
> http://oss.sgi.com/projects/libnuma/
> https://github.com/numactl/numactl

Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
I guess we are talking about build time time dependency with libnuma here.
Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
libnuma if it is present in rootfs. Just that at runtime, it will return
NUMA support not available. Correct?

How hard is detect the presence of "numaif.h" if existing build system does not
support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
if build environment has "numaif.h".

Some example in linux kernel build system:
http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh

> 
> > 2. I could not locate it by default in Linaro toolchains.
> > 
> > 3.  Since this is not a common across all platform. This option should
> > not be added to the common_base or common configs. It can be added to
> > any architecture configuration, which needs it.
> > 
> 
> So is it thunderx the only arm64 to enable this feature by default?
> I thought the dependency was the libnuma library support itself.
> 
> Thanks,
> Sergio
> 
> > Regards,
> > Hemant
> > 
> > > 
> > > > 
> > > > > > We're still don't have dynamic build time configuration system.
> > > > > > To make get/set_mempolicy work we need to include <numaif.h>
> > > > > > and have libnuma for successful linkage.
> > > > > > This means that the only option to not have libnuma as dependency
> > > > > > is to return back configuration option
> > > > > > RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > > > > as it was in the first version of the patch.
> > > > > > 
> > > > > > There is, actually, the third option (besides 2 already described):
> > > > > > 
> > > > > >     3. Return back config option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > > > >        from the first version of the patch and *enable*
> > > > > > it by default.
> > > > > >        In this case anyone who doesn't want to have
> > > > > > libnuma as dependency
> > > > > >        will be able to disable the config option manually.
> > > > > > 
> > > > > > Thomas, what do you think? Bruce? Sergio?
> > > > > It should be enabled on x86 and ppc, and disabled in other
> > > > > default configurations (ARM for now).
> > > > 
> > > > Agree.
> > > > 
> > > > > > P.S. We're always able to implement syscall wrappers by
> > > > > > hands without any
> > > > > >       external dependencies, but I don't think it's a good decision.
> > > > > I agree to use libnuma instead of re-inventing the wheel.
> > > > > Let's just make it optional at build time and fallback on one node
> > > > > if disabled.
> > > > 
> > > > That is the simple way out.
> > > > 
> > > > Sergio
> > > 
> > 
> > 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  8:41                                           ` Jerin Jacob
@ 2017-06-21  8:49                                             ` Thomas Monjalon
  2017-06-21  9:27                                               ` Jerin Jacob
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-21  8:49 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Sergio Gonzalez Monroy, Hemant Agrawal, Ilya Maximets, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

21/06/2017 10:41, Jerin Jacob:
> > > 1. There are many machines (arm/ppc), which do not support NUMA.
> > > 
> > > https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > > 
> > 
> > I did find that link too, last modified 4 years ago.
> > Despite that, I could not find any ARM references in libnuma sources, but
> > Jerin proved that there is support for it.
> > 
> > http://oss.sgi.com/projects/libnuma/
> > https://github.com/numactl/numactl
> 
> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> I guess we are talking about build time time dependency with libnuma here.
> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> libnuma if it is present in rootfs. Just that at runtime, it will return
> NUMA support not available. Correct?
> 
> How hard is detect the presence of "numaif.h" if existing build system does not
> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> if build environment has "numaif.h".
> 
> Some example in linux kernel build system:
> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh

I think we should not try to detect numaif.h, because it should be
an error on platform supporting NUMA.
If someone really wants to build DPDK without NUMA for x86, he should
disable it in the build config file.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v6 1/2] mem: balanced allocation of hugepages
  2017-06-21  8:04                         ` [PATCH v6 1/2] mem: balanced " Ilya Maximets
@ 2017-06-21  8:51                           ` Thomas Monjalon
  2017-06-21  8:58                             ` Bruce Richardson
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-21  8:51 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

21/06/2017 10:04, Ilya Maximets:
> +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n

We can stop inserting LIBRTE in our config options.
CONFIG_RTE_EAL_ is long enough :)

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v6 1/2] mem: balanced allocation of hugepages
  2017-06-21  8:51                           ` Thomas Monjalon
@ 2017-06-21  8:58                             ` Bruce Richardson
  2017-06-21  9:25                               ` Ilya Maximets
  2017-06-21  9:28                               ` Thomas Monjalon
  0 siblings, 2 replies; 99+ messages in thread
From: Bruce Richardson @ 2017-06-21  8:58 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Jerin Jacob

On Wed, Jun 21, 2017 at 10:51:58AM +0200, Thomas Monjalon wrote:
> 21/06/2017 10:04, Ilya Maximets:
> > +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
> 
> We can stop inserting LIBRTE in our config options.
> CONFIG_RTE_EAL_ is long enough :)
> 
Consistency. While I agree it's unneeded should have it in all or none,
and unless we change a bunch of existing ones, I think it means we have
it in all.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v6 1/2] mem: balanced allocation of hugepages
  2017-06-21  8:58                             ` Bruce Richardson
@ 2017-06-21  9:25                               ` Ilya Maximets
  2017-06-21  9:34                                 ` Bruce Richardson
  2017-06-21  9:28                               ` Thomas Monjalon
  1 sibling, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21  9:25 UTC (permalink / raw)
  To: Bruce Richardson, Thomas Monjalon
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei, Jerin Jacob

On 21.06.2017 11:58, Bruce Richardson wrote:
> On Wed, Jun 21, 2017 at 10:51:58AM +0200, Thomas Monjalon wrote:
>> 21/06/2017 10:04, Ilya Maximets:
>>> +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
>>
>> We can stop inserting LIBRTE in our config options.
>> CONFIG_RTE_EAL_ is long enough :)
>>
> Consistency. While I agree it's unneeded should have it in all or none,
> and unless we change a bunch of existing ones, I think it means we have
> it in all.

Hmm. There are few options named CONFIG_RTE_EAL_* and CONFIG_RTE_LIBRTE_EAL_*.
Also there is one strange CONFIG_RTE_LIBEAL_(USE_HPET).

Maybe we can prepare the patch to unify all that options later?
Or should I rename CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES to
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES in this series?

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  8:49                                             ` Thomas Monjalon
@ 2017-06-21  9:27                                               ` Jerin Jacob
  2017-06-21  9:58                                                 ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-21  9:27 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Sergio Gonzalez Monroy, Hemant Agrawal, Ilya Maximets, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

-----Original Message-----
> Date: Wed, 21 Jun 2017 10:49:14 +0200
> From: Thomas Monjalon <thomas@monjalon.net>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>  Pei <yulong.pei@intel.com>
> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> 
> 21/06/2017 10:41, Jerin Jacob:
> > > > 1. There are many machines (arm/ppc), which do not support NUMA.
> > > > 
> > > > https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > > > 
> > > 
> > > I did find that link too, last modified 4 years ago.
> > > Despite that, I could not find any ARM references in libnuma sources, but
> > > Jerin proved that there is support for it.
> > > 
> > > http://oss.sgi.com/projects/libnuma/
> > > https://github.com/numactl/numactl
> > 
> > Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> > I guess we are talking about build time time dependency with libnuma here.
> > Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> > libnuma if it is present in rootfs. Just that at runtime, it will return
> > NUMA support not available. Correct?
> > 
> > How hard is detect the presence of "numaif.h" if existing build system does not
> > support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > if build environment has "numaif.h".
> > 
> > Some example in linux kernel build system:
> > http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> 
> I think we should not try to detect numaif.h, because it should be
> an error on platform supporting NUMA.

I have installed libnuma on a NUMA and non NUMA machine.
Compiled and ran following code on those machine and it could detect
the numa availability. Could you add more details on the "error on
platform supporting NUMA".

➜ 83xx [ctest] $ cat main.c
#include <numaif.h>
#include <stdio.h>
#include <errno.h>

int main()
{
        /* Check if kernel supports NUMA. */
        if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
                printf("NUMA is not supported.\n");
        } else {
                printf("NUMA is supported.\n");
        }

}
➜ 83xx [ctest] $ gcc -Wall main.c -lnuma
# On non numa system
➜ 83xx [ctest] $ ./a.out
NUMA is not supported

# On numa machine
➜ GB-2S [~] $ ./a.out
NUMA is supported.


> If someone really wants to build DPDK without NUMA for x86, he should
> disable it in the build config file.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v6 1/2] mem: balanced allocation of hugepages
  2017-06-21  8:58                             ` Bruce Richardson
  2017-06-21  9:25                               ` Ilya Maximets
@ 2017-06-21  9:28                               ` Thomas Monjalon
  1 sibling, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-21  9:28 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Jerin Jacob

21/06/2017 10:58, Bruce Richardson:
> On Wed, Jun 21, 2017 at 10:51:58AM +0200, Thomas Monjalon wrote:
> > 21/06/2017 10:04, Ilya Maximets:
> > > +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
> > 
> > We can stop inserting LIBRTE in our config options.
> > CONFIG_RTE_EAL_ is long enough :)
> > 
> Consistency. While I agree it's unneeded should have it in all or none,
> and unless we change a bunch of existing ones, I think it means we have
> it in all.

It is already not consistent.
It could be cleaned in next release.
For now, we have to decide which one we prefer.
I prefer CONFIG_RTE_EAL_ and CONFIG_RTE_PMD_ for drivers.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v6 1/2] mem: balanced allocation of hugepages
  2017-06-21  9:25                               ` Ilya Maximets
@ 2017-06-21  9:34                                 ` Bruce Richardson
  0 siblings, 0 replies; 99+ messages in thread
From: Bruce Richardson @ 2017-06-21  9:34 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: Thomas Monjalon, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Jerin Jacob

On Wed, Jun 21, 2017 at 12:25:51PM +0300, Ilya Maximets wrote:
> On 21.06.2017 11:58, Bruce Richardson wrote:
> > On Wed, Jun 21, 2017 at 10:51:58AM +0200, Thomas Monjalon wrote:
> >> 21/06/2017 10:04, Ilya Maximets:
> >>> +CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES=n
> >>
> >> We can stop inserting LIBRTE in our config options.
> >> CONFIG_RTE_EAL_ is long enough :)
> >>
> > Consistency. While I agree it's unneeded should have it in all or none,
> > and unless we change a bunch of existing ones, I think it means we have
> > it in all.
> 
> Hmm. There are few options named CONFIG_RTE_EAL_* and CONFIG_RTE_LIBRTE_EAL_*.
> Also there is one strange CONFIG_RTE_LIBEAL_(USE_HPET).
> 
> Maybe we can prepare the patch to unify all that options later?
> Or should I rename CONFIG_RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES to
> CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES in this series?
Sure, if it's already insonsistent for EAL, then use the shorter name.
We can fix up the others later. These options may move or disappear if
we look to move to a different build system e.g. [1], so that may be a good
opportunity to adjust some names.

/Bruce

[1] http://dpdk.org/dev/patchwork/patch/25104/

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  9:27                                               ` Jerin Jacob
@ 2017-06-21  9:58                                                 ` Thomas Monjalon
  2017-06-21 10:29                                                   ` Jerin Jacob
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-21  9:58 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Sergio Gonzalez Monroy, Hemant Agrawal, Ilya Maximets, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

21/06/2017 11:27, Jerin Jacob:
> -----Original Message-----
> > Date: Wed, 21 Jun 2017 10:49:14 +0200
> > From: Thomas Monjalon <thomas@monjalon.net>
> > To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> > Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
> >  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
> >  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
> >  Marchand <david.marchand@6wind.com>, Heetae Ahn
> >  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
> >  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
> >  Pei <yulong.pei@intel.com>
> > Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> > 
> > 21/06/2017 10:41, Jerin Jacob:
> > > > > 1. There are many machines (arm/ppc), which do not support NUMA.
> > > > > 
> > > > > https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > > > > 
> > > > 
> > > > I did find that link too, last modified 4 years ago.
> > > > Despite that, I could not find any ARM references in libnuma sources, but
> > > > Jerin proved that there is support for it.
> > > > 
> > > > http://oss.sgi.com/projects/libnuma/
> > > > https://github.com/numactl/numactl
> > > 
> > > Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> > > I guess we are talking about build time time dependency with libnuma here.
> > > Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> > > libnuma if it is present in rootfs. Just that at runtime, it will return
> > > NUMA support not available. Correct?
> > > 
> > > How hard is detect the presence of "numaif.h" if existing build system does not
> > > support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > if build environment has "numaif.h".
> > > 
> > > Some example in linux kernel build system:
> > > http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> > 
> > I think we should not try to detect numaif.h, because it should be
> > an error on platform supporting NUMA.
> 
> I have installed libnuma on a NUMA and non NUMA machine.
> Compiled and ran following code on those machine and it could detect
> the numa availability. Could you add more details on the "error on
> platform supporting NUMA".

I was saying that we do not need to detect NUMA.
If we are building DPDK for a NUMA architecture and libnuma is not
available, then it will be a problem that the user must catch.
The easiest way to catch it, is to fail on the include of numaif.h.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v7 0/2] Balanced allocation of hugepages
       [not found]                       ` <CGME20170621100837eucas1p1c570092cac733a66d939ca7ff04ac9e6@eucas1p1.samsung.com>
@ 2017-06-21 10:08                         ` Ilya Maximets
       [not found]                           ` <CGME20170621100841eucas1p1114078b1d8a38920c3633e9bddbabc02@eucas1p1.samsung.com>
                                             ` (3 more replies)
  0 siblings, 4 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21 10:08 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Version 7:
	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	  returned. Enabled by default for x86, ppc and thunderx.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                           |   1 +
 config/common_linuxapp                       |   3 +
 config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
 config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
 lib/librte_eal/linuxapp/eal/Makefile         |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
 mk/rte.app.mk                                |   3 +
 8 files changed, 123 insertions(+), 4 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v7 1/2] mem: balanced allocation of hugepages
       [not found]                           ` <CGME20170621100841eucas1p1114078b1d8a38920c3633e9bddbabc02@eucas1p1.samsung.com>
@ 2017-06-21 10:08                             ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21 10:08 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp on x86, ppc and thunderx.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base                           |   1 +
 config/common_linuxapp                       |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc     |   3 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   |   3 +
 config/defconfig_arm64-thunderx-linuxapp-gcc |   3 +
 lib/librte_eal/linuxapp/eal/Makefile         |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
 mk/rte.app.mk                                |   3 +
 8 files changed, 119 insertions(+), 4 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..050526f 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,8 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f32766..2c67cdc 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 # to address minimum DMA alignment across all arm64 implementations.
 CONFIG_RTE_CACHE_LINE_SIZE=128
 
+# Most ARMv8 systems doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 CONFIG_RTE_EAL_IGB_UIO=n
 
 CONFIG_RTE_LIBRTE_FM10K_PMD=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4c..3e79fa8 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -37,6 +37,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
 CONFIG_RTE_MAX_NUMA_NODES=2
 CONFIG_RTE_MAX_LCORE=96
 
+# ThunderX supports NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 #
 # Compile PMD for octeontx sso event device
 #
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..ceadca7 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,9 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +351,21 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +374,82 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+	unsigned long maxnode = 0;
+	int node_id = -1;
+	bool numa_available = true;
+
+	/* Check if kernel supports NUMA. */
+	if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		numa_available = false;
+	}
+
+	if (orig && numa_available) {
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == RTE_MAX_NUMA_NODES) {
+				node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= RTE_MAX_NUMA_NODES;
+				}
+			} else {
+				node_id = j;
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			nodemask[node_id / ULONG_BITS] =
+						1UL << (node_id % ULONG_BITS);
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			/*
+			 * Due to old linux kernel bug (feature?) we have to
+			 * increase maxnode by 1. It will be unconditionally
+			 * decreased back to normal value inside the syscall
+			 * handler.
+			 */
+			if (set_mempolicy(MPOL_PREFERRED,
+					  nodemask, maxnode + 1) < 0) {
+				RTE_LOG(ERR, EAL,
+					"Failed to set policy MPOL_PREFERRED: "
+					"%s\n", strerror(errno));
+				return i;
+			}
+
+			nodemask[node_id / ULONG_BITS] = 0UL;
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -478,6 +560,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+		RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
 	return i;
 }
 
@@ -562,6 +648,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1091,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1113,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1157,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v7 2/2] config: enable vhost numa awareness by default
       [not found]                           ` <CGME20170621100845eucas1p2a457b1694d20de8e2d8126df679c43ae@eucas1p2.samsung.com>
@ 2017-06-21 10:08                             ` Ilya Maximets
  2017-06-27  9:20                               ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21 10:08 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_linuxapp                       | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc     | 1 +
 config/defconfig_arm64-armv8a-linuxapp-gcc   | 1 +
 config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
 4 files changed, 4 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 050526f..2e44434 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -43,6 +43,7 @@ CONFIG_RTE_KNI_KMOD=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
 CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 2c67cdc..d190afb 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
 
 # Most ARMv8 systems doesn't support NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 CONFIG_RTE_EAL_IGB_UIO=n
 
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index 3e79fa8..7b07b7d 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96
 
 # ThunderX supports NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 
 #
 # Compile PMD for octeontx sso event device
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21  9:58                                                 ` Thomas Monjalon
@ 2017-06-21 10:29                                                   ` Jerin Jacob
  2017-06-21 10:36                                                     ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-21 10:29 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Sergio Gonzalez Monroy, Hemant Agrawal, Ilya Maximets, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

-----Original Message-----
> Date: Wed, 21 Jun 2017 11:58:12 +0200
> From: Thomas Monjalon <thomas@monjalon.net>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>  Pei <yulong.pei@intel.com>
> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> 
> 21/06/2017 11:27, Jerin Jacob:
> > -----Original Message-----
> > > Date: Wed, 21 Jun 2017 10:49:14 +0200
> > > From: Thomas Monjalon <thomas@monjalon.net>
> > > To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> > > Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
> > >  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
> > >  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
> > >  Marchand <david.marchand@6wind.com>, Heetae Ahn
> > >  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
> > >  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
> > >  Pei <yulong.pei@intel.com>
> > > Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> > > 
> > > 21/06/2017 10:41, Jerin Jacob:
> > > > > > 1. There are many machines (arm/ppc), which do not support NUMA.
> > > > > > 
> > > > > > https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > > > > > 
> > > > > 
> > > > > I did find that link too, last modified 4 years ago.
> > > > > Despite that, I could not find any ARM references in libnuma sources, but
> > > > > Jerin proved that there is support for it.
> > > > > 
> > > > > http://oss.sgi.com/projects/libnuma/
> > > > > https://github.com/numactl/numactl
> > > > 
> > > > Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> > > > I guess we are talking about build time time dependency with libnuma here.
> > > > Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> > > > libnuma if it is present in rootfs. Just that at runtime, it will return
> > > > NUMA support not available. Correct?
> > > > 
> > > > How hard is detect the presence of "numaif.h" if existing build system does not
> > > > support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > > > if build environment has "numaif.h".
> > > > 
> > > > Some example in linux kernel build system:
> > > > http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> > > 
> > > I think we should not try to detect numaif.h, because it should be
> > > an error on platform supporting NUMA.
> > 
> > I have installed libnuma on a NUMA and non NUMA machine.
> > Compiled and ran following code on those machine and it could detect
> > the numa availability. Could you add more details on the "error on
> > platform supporting NUMA".
> 
> I was saying that we do not need to detect NUMA.
> If we are building DPDK for a NUMA architecture and libnuma is not
> available, then it will be a problem that the user must catch.
> The easiest way to catch it, is to fail on the include of numaif.h.

libnuma is not really _architecture_ depended.

Ilya Maximets patch disables NUMA support in common arm64 config.I
think, It is not correct, We should not disable on any archs generic config.

IMO, It should be enabled by default in common config and then we can
detect the presence of numaif.h, if not available OR a target does not need it
explicitly, proceed with disabling
RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.

No strong opinion on "failing the build" vs "printing a warning" in the
absence of numaif.h

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21 10:29                                                   ` Jerin Jacob
@ 2017-06-21 10:36                                                     ` Ilya Maximets
  2017-06-21 11:22                                                       ` Jerin Jacob
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-21 10:36 UTC (permalink / raw)
  To: Jerin Jacob, Thomas Monjalon
  Cc: Sergio Gonzalez Monroy, Hemant Agrawal, dev, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

On 21.06.2017 13:29, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Wed, 21 Jun 2017 11:58:12 +0200
>> From: Thomas Monjalon <thomas@monjalon.net>
>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>>  Pei <yulong.pei@intel.com>
>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
>>
>> 21/06/2017 11:27, Jerin Jacob:
>>> -----Original Message-----
>>>> Date: Wed, 21 Jun 2017 10:49:14 +0200
>>>> From: Thomas Monjalon <thomas@monjalon.net>
>>>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>>>> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>>>>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>>>>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>>>>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>>>>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>>>>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>>>>  Pei <yulong.pei@intel.com>
>>>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
>>>>
>>>> 21/06/2017 10:41, Jerin Jacob:
>>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
>>>>>>>
>>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
>>>>>>>
>>>>>>
>>>>>> I did find that link too, last modified 4 years ago.
>>>>>> Despite that, I could not find any ARM references in libnuma sources, but
>>>>>> Jerin proved that there is support for it.
>>>>>>
>>>>>> http://oss.sgi.com/projects/libnuma/
>>>>>> https://github.com/numactl/numactl
>>>>>
>>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
>>>>> I guess we are talking about build time time dependency with libnuma here.
>>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
>>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
>>>>> NUMA support not available. Correct?
>>>>>
>>>>> How hard is detect the presence of "numaif.h" if existing build system does not
>>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>> if build environment has "numaif.h".
>>>>>
>>>>> Some example in linux kernel build system:
>>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
>>>>
>>>> I think we should not try to detect numaif.h, because it should be
>>>> an error on platform supporting NUMA.
>>>
>>> I have installed libnuma on a NUMA and non NUMA machine.
>>> Compiled and ran following code on those machine and it could detect
>>> the numa availability. Could you add more details on the "error on
>>> platform supporting NUMA".
>>
>> I was saying that we do not need to detect NUMA.
>> If we are building DPDK for a NUMA architecture and libnuma is not
>> available, then it will be a problem that the user must catch.
>> The easiest way to catch it, is to fail on the include of numaif.h.
> 
> libnuma is not really _architecture_ depended.
> 
> Ilya Maximets patch disables NUMA support in common arm64 config.I
> think, It is not correct, We should not disable on any archs generic config.
> 
> IMO, It should be enabled by default in common config and then we can
> detect the presence of numaif.h, if not available OR a target does not need it
> explicitly, proceed with disabling
> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.

Detecting of headers is impossible until dpdk doesn't have dynamic build
configuration system like autotools, CMake or meson.
Right now we just can't do that.

> No strong opinion on "failing the build" vs "printing a warning" in the
> absence of numaif.h

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21 10:36                                                     ` Ilya Maximets
@ 2017-06-21 11:22                                                       ` Jerin Jacob
  2017-06-21 11:29                                                         ` Thomas Monjalon
  2017-06-27  9:13                                                         ` Hemant Agrawal
  0 siblings, 2 replies; 99+ messages in thread
From: Jerin Jacob @ 2017-06-21 11:22 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: Thomas Monjalon, Sergio Gonzalez Monroy, Hemant Agrawal, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

-----Original Message-----
> Date: Wed, 21 Jun 2017 13:36:58 +0300
> From: Ilya Maximets <i.maximets@samsung.com>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>, Thomas Monjalon
>  <thomas@monjalon.net>
> CC: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>  Agrawal <hemant.agrawal@nxp.com>, dev@dpdk.org, Bruce Richardson
>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
>  Thunderbird/45.8.0
> 
> On 21.06.2017 13:29, Jerin Jacob wrote:
> > -----Original Message-----
> >> Date: Wed, 21 Jun 2017 11:58:12 +0200
> >> From: Thomas Monjalon <thomas@monjalon.net>
> >> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> >> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
> >>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
> >>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
> >>  Marchand <david.marchand@6wind.com>, Heetae Ahn
> >>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
> >>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
> >>  Pei <yulong.pei@intel.com>
> >> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> >>
> >> 21/06/2017 11:27, Jerin Jacob:
> >>> -----Original Message-----
> >>>> Date: Wed, 21 Jun 2017 10:49:14 +0200
> >>>> From: Thomas Monjalon <thomas@monjalon.net>
> >>>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> >>>> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
> >>>>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
> >>>>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
> >>>>  Marchand <david.marchand@6wind.com>, Heetae Ahn
> >>>>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
> >>>>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
> >>>>  Pei <yulong.pei@intel.com>
> >>>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
> >>>>
> >>>> 21/06/2017 10:41, Jerin Jacob:
> >>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
> >>>>>>>
> >>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> >>>>>>>
> >>>>>>
> >>>>>> I did find that link too, last modified 4 years ago.
> >>>>>> Despite that, I could not find any ARM references in libnuma sources, but
> >>>>>> Jerin proved that there is support for it.
> >>>>>>
> >>>>>> http://oss.sgi.com/projects/libnuma/
> >>>>>> https://github.com/numactl/numactl
> >>>>>
> >>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> >>>>> I guess we are talking about build time time dependency with libnuma here.
> >>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> >>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
> >>>>> NUMA support not available. Correct?
> >>>>>
> >>>>> How hard is detect the presence of "numaif.h" if existing build system does not
> >>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>> if build environment has "numaif.h".
> >>>>>
> >>>>> Some example in linux kernel build system:
> >>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> >>>>
> >>>> I think we should not try to detect numaif.h, because it should be
> >>>> an error on platform supporting NUMA.
> >>>
> >>> I have installed libnuma on a NUMA and non NUMA machine.
> >>> Compiled and ran following code on those machine and it could detect
> >>> the numa availability. Could you add more details on the "error on
> >>> platform supporting NUMA".
> >>
> >> I was saying that we do not need to detect NUMA.
> >> If we are building DPDK for a NUMA architecture and libnuma is not
> >> available, then it will be a problem that the user must catch.
> >> The easiest way to catch it, is to fail on the include of numaif.h.
> > 
> > libnuma is not really _architecture_ depended.
> > 
> > Ilya Maximets patch disables NUMA support in common arm64 config.I
> > think, It is not correct, We should not disable on any archs generic config.
> > 
> > IMO, It should be enabled by default in common config and then we can
> > detect the presence of numaif.h, if not available OR a target does not need it
> > explicitly, proceed with disabling
> > RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
> 
> Detecting of headers is impossible until dpdk doesn't have dynamic build
> configuration system like autotools, CMake or meson.
> Right now we just can't do that.

I agree. Unless if we do something like linux kernel does it below
http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh

Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.

> 
> > No strong opinion on "failing the build" vs "printing a warning" in the
> > absence of numaif.h

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21 11:22                                                       ` Jerin Jacob
@ 2017-06-21 11:29                                                         ` Thomas Monjalon
  2017-06-27  9:13                                                         ` Hemant Agrawal
  1 sibling, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-21 11:29 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: Jerin Jacob, Sergio Gonzalez Monroy, Hemant Agrawal, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

21/06/2017 13:22, Jerin Jacob:
> From: Ilya Maximets <i.maximets@samsung.com>
> > On 21.06.2017 13:29, Jerin Jacob wrote:
> > > From: Thomas Monjalon <thomas@monjalon.net>
> > >> 21/06/2017 11:27, Jerin Jacob:
> > >>> From: Thomas Monjalon <thomas@monjalon.net>
> > >>>> 21/06/2017 10:41, Jerin Jacob:
> > >>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
> > >>>>>>>
> > >>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> > >>>>>>>
> > >>>>>>
> > >>>>>> I did find that link too, last modified 4 years ago.
> > >>>>>> Despite that, I could not find any ARM references in libnuma sources, but
> > >>>>>> Jerin proved that there is support for it.
> > >>>>>>
> > >>>>>> http://oss.sgi.com/projects/libnuma/
> > >>>>>> https://github.com/numactl/numactl
> > >>>>>
> > >>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> > >>>>> I guess we are talking about build time time dependency with libnuma here.
> > >>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> > >>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
> > >>>>> NUMA support not available. Correct?
> > >>>>>
> > >>>>> How hard is detect the presence of "numaif.h" if existing build system does not
> > >>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > >>>>> if build environment has "numaif.h".
> > >>>>>
> > >>>>> Some example in linux kernel build system:
> > >>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> > >>>>
> > >>>> I think we should not try to detect numaif.h, because it should be
> > >>>> an error on platform supporting NUMA.
> > >>>
> > >>> I have installed libnuma on a NUMA and non NUMA machine.
> > >>> Compiled and ran following code on those machine and it could detect
> > >>> the numa availability. Could you add more details on the "error on
> > >>> platform supporting NUMA".
> > >>
> > >> I was saying that we do not need to detect NUMA.
> > >> If we are building DPDK for a NUMA architecture and libnuma is not
> > >> available, then it will be a problem that the user must catch.
> > >> The easiest way to catch it, is to fail on the include of numaif.h.
> > > 
> > > libnuma is not really _architecture_ depended.
> > > 
> > > Ilya Maximets patch disables NUMA support in common arm64 config.I
> > > think, It is not correct, We should not disable on any archs generic config.
> > > 
> > > IMO, It should be enabled by default in common config and then we can
> > > detect the presence of numaif.h, if not available OR a target does not need it
> > > explicitly, proceed with disabling
> > > RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
> > 
> > Detecting of headers is impossible until dpdk doesn't have dynamic build
> > configuration system like autotools, CMake or meson.
> > Right now we just can't do that.
> 
> I agree. Unless if we do something like linux kernel does it below
> http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
> 
> Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
> generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
> any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.

In this case, you can enable it in common_base and disable it only for
armv7 and dpaa2.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v7 0/2] Balanced allocation of hugepages
  2017-06-21 10:08                         ` [PATCH v7 0/2] Balanced allocation of hugepages Ilya Maximets
       [not found]                           ` <CGME20170621100841eucas1p1114078b1d8a38920c3633e9bddbabc02@eucas1p1.samsung.com>
       [not found]                           ` <CGME20170621100845eucas1p2a457b1694d20de8e2d8126df679c43ae@eucas1p2.samsung.com>
@ 2017-06-26 10:44                           ` Ilya Maximets
  2017-06-26 14:07                             ` Jerin Jacob
  2017-06-26 15:33                             ` Sergio Gonzalez Monroy
       [not found]                           ` <CGME20170627084632eucas1p28133ee4b425b3938e2564fca03e1140b@eucas1p2.samsung.com>
  3 siblings, 2 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-26 10:44 UTC (permalink / raw)
  To: dev, Sergio Gonzalez Monroy, Thomas Monjalon, Bruce Richardson
  Cc: David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Jerin Jacob

So, what do you think about this version?
Is it ready for merge or some additional changes needed?

Best regards, Ilya Maximets.

On 21.06.2017 13:08, Ilya Maximets wrote:
> Version 7:
> 	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
> 
> Version 6:
> 	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> 	  returned. Enabled by default for x86, ppc and thunderx.
> 
> Version 5:
> 	* Fixed shared build. (Automated build test will fail
> 	  anyway because libnuma-devel not installed on build servers)
> 
> Version 4:
> 	* Fixed work on systems without NUMA by adding check for NUMA
> 	  support in kernel.
> 
> Version 3:
> 	* Implemented hybrid schema for allocation.
> 	* Fixed not needed mempolicy change while remapping. (orig = 0)
> 	* Added patch to enable VHOST_NUMA by default.
> 
> Version 2:
> 	* rebased (fuzz in Makefile)
> 
> Ilya Maximets (2):
>   mem: balanced allocation of hugepages
>   config: enable vhost numa awareness by default
> 
>  config/common_base                           |   1 +
>  config/common_linuxapp                       |   3 +
>  config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
>  config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
>  config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
>  lib/librte_eal/linuxapp/eal/Makefile         |   3 +
>  lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
>  mk/rte.app.mk                                |   3 +
>  8 files changed, 123 insertions(+), 4 deletions(-)
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v7 0/2] Balanced allocation of hugepages
  2017-06-26 10:44                           ` [PATCH v7 0/2] Balanced allocation of hugepages Ilya Maximets
@ 2017-06-26 14:07                             ` Jerin Jacob
  2017-06-26 15:33                             ` Sergio Gonzalez Monroy
  1 sibling, 0 replies; 99+ messages in thread
From: Jerin Jacob @ 2017-06-26 14:07 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, Sergio Gonzalez Monroy, Thomas Monjalon, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

-----Original Message-----
> Date: Mon, 26 Jun 2017 13:44:08 +0300
> From: Ilya Maximets <i.maximets@samsung.com>
> To: dev@dpdk.org, Sergio Gonzalez Monroy
>  <sergio.gonzalez.monroy@intel.com>, Thomas Monjalon <thomas@monjalon.net>,
>  Bruce Richardson <bruce.richardson@intel.com>
> CC: David Marchand <david.marchand@6wind.com>, Heetae Ahn
>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>  Pei <yulong.pei@intel.com>, Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Subject: Re: [PATCH v7 0/2] Balanced allocation of hugepages
> User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
>  Thunderbird/45.8.0
> 
> So, what do you think about this version?
> Is it ready for merge or some additional changes needed?

Looks like following comment is not addressed.
http://dpdk.org/ml/archives/dev/2017-June/068398.html

> 
> Best regards, Ilya Maximets.
> 
> On 21.06.2017 13:08, Ilya Maximets wrote:
> > Version 7:
> > 	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
> > 
> > Version 6:
> > 	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> > 	  returned. Enabled by default for x86, ppc and thunderx.
> > 
> > Version 5:
> > 	* Fixed shared build. (Automated build test will fail
> > 	  anyway because libnuma-devel not installed on build servers)
> > 
> > Version 4:
> > 	* Fixed work on systems without NUMA by adding check for NUMA
> > 	  support in kernel.
> > 
> > Version 3:
> > 	* Implemented hybrid schema for allocation.
> > 	* Fixed not needed mempolicy change while remapping. (orig = 0)
> > 	* Added patch to enable VHOST_NUMA by default.
> > 
> > Version 2:
> > 	* rebased (fuzz in Makefile)
> > 
> > Ilya Maximets (2):
> >   mem: balanced allocation of hugepages
> >   config: enable vhost numa awareness by default
> > 
> >  config/common_base                           |   1 +
> >  config/common_linuxapp                       |   3 +
> >  config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
> >  config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
> >  config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
> >  lib/librte_eal/linuxapp/eal/Makefile         |   3 +
> >  lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
> >  mk/rte.app.mk                                |   3 +
> >  8 files changed, 123 insertions(+), 4 deletions(-)
> > 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v7 0/2] Balanced allocation of hugepages
  2017-06-26 10:44                           ` [PATCH v7 0/2] Balanced allocation of hugepages Ilya Maximets
  2017-06-26 14:07                             ` Jerin Jacob
@ 2017-06-26 15:33                             ` Sergio Gonzalez Monroy
  2017-06-27  8:42                               ` Ilya Maximets
  1 sibling, 1 reply; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-26 15:33 UTC (permalink / raw)
  To: Ilya Maximets, dev, Thomas Monjalon, Bruce Richardson
  Cc: David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Jerin Jacob

On 26/06/2017 11:44, Ilya Maximets wrote:
> So, what do you think about this version?
> Is it ready for merge or some additional changes needed?

I was just having another look at it and was wondering if we should 
re-set the old policy instead of DEFAULT?

Also noticed that we probably should increase essential_memory by 
hugepage_sz in case of SIGBUS? I think there is an issue if we have more 
than one size.

Thanks,
Sergio

> Best regards, Ilya Maximets.
>
> On 21.06.2017 13:08, Ilya Maximets wrote:
>> Version 7:
>> 	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
>>
>> Version 6:
>> 	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> 	  returned. Enabled by default for x86, ppc and thunderx.
>>
>> Version 5:
>> 	* Fixed shared build. (Automated build test will fail
>> 	  anyway because libnuma-devel not installed on build servers)
>>
>> Version 4:
>> 	* Fixed work on systems without NUMA by adding check for NUMA
>> 	  support in kernel.
>>
>> Version 3:
>> 	* Implemented hybrid schema for allocation.
>> 	* Fixed not needed mempolicy change while remapping. (orig = 0)
>> 	* Added patch to enable VHOST_NUMA by default.
>>
>> Version 2:
>> 	* rebased (fuzz in Makefile)
>>
>> Ilya Maximets (2):
>>    mem: balanced allocation of hugepages
>>    config: enable vhost numa awareness by default
>>
>>   config/common_base                           |   1 +
>>   config/common_linuxapp                       |   3 +
>>   config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
>>   config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
>>   config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
>>   lib/librte_eal/linuxapp/eal/Makefile         |   3 +
>>   lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
>>   mk/rte.app.mk                                |   3 +
>>   8 files changed, 123 insertions(+), 4 deletions(-)
>>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v7 0/2] Balanced allocation of hugepages
  2017-06-26 15:33                             ` Sergio Gonzalez Monroy
@ 2017-06-27  8:42                               ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27  8:42 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, dev, Thomas Monjalon, Bruce Richardson
  Cc: David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Jerin Jacob

On 26.06.2017 18:33, Sergio Gonzalez Monroy wrote:
> On 26/06/2017 11:44, Ilya Maximets wrote:
>> So, what do you think about this version?
>> Is it ready for merge or some additional changes needed?
> 
> I was just having another look at it and was wondering if we should re-set the old policy instead of DEFAULT?

Yes. I tried to do that previously, but it requires some manipulations
get maximum nodemask size supported by kernel. So, I've implemented
this behaviour with help of libnuma which makes a lot of checks while
library initialisation (constructor). I'll send v8 with that soon.

> Also noticed that we probably should increase essential_memory by hugepage_sz in
> case of SIGBUS? I think there is an issue if we have more than one size.

Good catch. Also fixed in v8. Additionally I found that we need to restore
old mempolicy in case of any error. So I replaced all the 'return i' to
the out to proper termination point.

> 
> Thanks,
> Sergio
> 
>> Best regards, Ilya Maximets.
>>
>> On 21.06.2017 13:08, Ilya Maximets wrote:
>>> Version 7:
>>>     * RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
>>>
>>> Version 6:
>>>     * Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>       returned. Enabled by default for x86, ppc and thunderx.
>>>
>>> Version 5:
>>>     * Fixed shared build. (Automated build test will fail
>>>       anyway because libnuma-devel not installed on build servers)
>>>
>>> Version 4:
>>>     * Fixed work on systems without NUMA by adding check for NUMA
>>>       support in kernel.
>>>
>>> Version 3:
>>>     * Implemented hybrid schema for allocation.
>>>     * Fixed not needed mempolicy change while remapping. (orig = 0)
>>>     * Added patch to enable VHOST_NUMA by default.
>>>
>>> Version 2:
>>>     * rebased (fuzz in Makefile)
>>>
>>> Ilya Maximets (2):
>>>    mem: balanced allocation of hugepages
>>>    config: enable vhost numa awareness by default
>>>
>>>   config/common_base                           |   1 +
>>>   config/common_linuxapp                       |   3 +
>>>   config/defconfig_arm-armv7a-linuxapp-gcc     |   4 +
>>>   config/defconfig_arm64-armv8a-linuxapp-gcc   |   4 +
>>>   config/defconfig_arm64-thunderx-linuxapp-gcc |   4 +
>>>   lib/librte_eal/linuxapp/eal/Makefile         |   3 +
>>>   lib/librte_eal/linuxapp/eal/eal_memory.c     | 105 ++++++++++++++++++++++++++-
>>>   mk/rte.app.mk                                |   3 +
>>>   8 files changed, 123 insertions(+), 4 deletions(-)
>>>
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v8 0/2] Balanced allocation of hugepages
       [not found]                           ` <CGME20170627084632eucas1p28133ee4b425b3938e2564fca03e1140b@eucas1p2.samsung.com>
@ 2017-06-27  8:46                             ` Ilya Maximets
       [not found]                               ` <CGME20170627084637eucas1p2c591db905fa9f143fa5dbb3c08fae82f@eucas1p2.samsung.com>
                                                 ` (2 more replies)
  0 siblings, 3 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27  8:46 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Version 8:
	* helper functions from libnuma used to set mempolicy and
	  work with cpu mask.
	* Function now restores previous mempolicy instead of MPOL_DEFAULT.
	* Fixed essential_memory on SIGBUS.
	* Fixed restoring of mempolicy in case of errors (goto out).
	* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	  returned. Enabled by default for x86, ppc and thunderx.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                        |   1 +
 config/common_linuxapp                    |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   4 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   4 +
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 117 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 7 files changed, 126 insertions(+), 8 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v8 1/2] mem: balanced allocation of hugepages
       [not found]                               ` <CGME20170627084637eucas1p2c591db905fa9f143fa5dbb3c08fae82f@eucas1p2.samsung.com>
@ 2017-06-27  8:46                                 ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27  8:46 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base                        |   1 +
 config/common_linuxapp                    |   1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 117 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 7 files changed, 123 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
 
 CONFIG_RTE_PKTMBUF_HEADROOM=256
 
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 #
 # Compile Support Libraries for DPAA2
 #
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..6d2b199 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,77 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	int node_id = -1;
+	int essential_prev = 0;
+	int oldpolicy;
+	struct bitmask *oldmask = numa_allocate_nodemask();
+	bool have_numa = true;
+	unsigned long maxnode = 0;
+
+	/* Check if kernel supports NUMA. */
+	if (numa_available() != 0) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		have_numa = false;
+	}
+
+	if (orig && have_numa) {
+		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+		if (get_mempolicy(&oldpolicy,
+				  oldmask->maskp, oldmask->size + 1, 0, 0) < 0) {
+			RTE_LOG(ERR, EAL,
+				"Failed to get current mempolicy: %s. "
+				"Assuming MPOL_DEFAULT.\n", strerror(errno));
+			oldpolicy = MPOL_DEFAULT;
+		}
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < maxnode; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == maxnode) {
+				node_id = (node_id + 1) % maxnode;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= maxnode;
+				}
+			} else {
+				node_id = j;
+				essential_prev = essential_memory[j];
+
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			numa_set_preferred(node_id);
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +493,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		if (fd < 0) {
 			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
 					strerror(errno));
-			return i;
+			goto out;
 		}
 
 		/* map the segment, and populate page tables,
@@ -433,7 +504,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
 					strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		if (orig) {
@@ -458,7 +529,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 				munmap(virtaddr, hugepage_sz);
 				close(fd);
 				unlink(hugepg_tbl[i].filepath);
-				return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				essential_memory[node_id] = essential_prev;
+#endif
+				goto out;
 			}
 			*(int *)virtaddr = 0;
 		}
@@ -469,7 +543,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
 				__func__, strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		close(fd);
@@ -478,6 +552,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode) {
+		RTE_LOG(DEBUG, EAL,
+			"Restoring previous memory policy: %d\n", oldpolicy);
+		if (oldpolicy == MPOL_DEFAULT) {
+			numa_set_localalloc();
+		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
+					 oldmask->size + 1) < 0) {
+			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+				strerror(errno));
+			numa_set_localalloc();
+		}
+	}
+	numa_free_cpumask(oldmask);
+#endif
 	return i;
 }
 
@@ -562,6 +652,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1095,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1117,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1161,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v8 2/2] config: enable vhost numa awareness by default
       [not found]                               ` <CGME20170627084641eucas1p182cac065efef74445ffa234a6dcbb23d@eucas1p1.samsung.com>
@ 2017-06-27  8:46                                 ` Ilya Maximets
  2017-06-27  9:18                                   ` Hemant Agrawal
  2017-06-27  9:19                                   ` Thomas Monjalon
  0 siblings, 2 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27  8:46 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_linuxapp                    | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
 3 files changed, 3 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
 CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
 
 # Doesn't support NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 #
 # Compile Support Libraries for DPAA2
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-21 11:22                                                       ` Jerin Jacob
  2017-06-21 11:29                                                         ` Thomas Monjalon
@ 2017-06-27  9:13                                                         ` Hemant Agrawal
  2017-06-27  9:26                                                           ` Thomas Monjalon
  1 sibling, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27  9:13 UTC (permalink / raw)
  To: Jerin Jacob, Ilya Maximets
  Cc: Thomas Monjalon, Sergio Gonzalez Monroy, dev, Bruce Richardson,
	David Marchand, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei

On 6/21/2017 4:52 PM, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Wed, 21 Jun 2017 13:36:58 +0300
>> From: Ilya Maximets <i.maximets@samsung.com>
>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>, Thomas Monjalon
>>  <thomas@monjalon.net>
>> CC: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>>  Agrawal <hemant.agrawal@nxp.com>, dev@dpdk.org, Bruce Richardson
>>  <bruce.richardson@intel.com>, David Marchand <david.marchand@6wind.com>,
>>  Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>,
>>  Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>
>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
>> User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101
>>  Thunderbird/45.8.0
>>
>> On 21.06.2017 13:29, Jerin Jacob wrote:
>>> -----Original Message-----
>>>> Date: Wed, 21 Jun 2017 11:58:12 +0200
>>>> From: Thomas Monjalon <thomas@monjalon.net>
>>>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>>>> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>>>>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>>>>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>>>>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>>>>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>>>>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>>>>  Pei <yulong.pei@intel.com>
>>>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
>>>>
>>>> 21/06/2017 11:27, Jerin Jacob:
>>>>> -----Original Message-----
>>>>>> Date: Wed, 21 Jun 2017 10:49:14 +0200
>>>>>> From: Thomas Monjalon <thomas@monjalon.net>
>>>>>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>>>>>> Cc: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Hemant
>>>>>>  Agrawal <hemant.agrawal@nxp.com>, Ilya Maximets <i.maximets@samsung.com>,
>>>>>>  dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>, David
>>>>>>  Marchand <david.marchand@6wind.com>, Heetae Ahn
>>>>>>  <heetae82.ahn@samsung.com>, Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng
>>>>>>  Tan <jianfeng.tan@intel.com>, Neil Horman <nhorman@tuxdriver.com>, Yulong
>>>>>>  Pei <yulong.pei@intel.com>
>>>>>> Subject: Re: [PATCH v5 0/2] Balanced allocation of hugepages
>>>>>>
>>>>>> 21/06/2017 10:41, Jerin Jacob:
>>>>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
>>>>>>>>>
>>>>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
>>>>>>>>>
>>>>>>>>
>>>>>>>> I did find that link too, last modified 4 years ago.
>>>>>>>> Despite that, I could not find any ARM references in libnuma sources, but
>>>>>>>> Jerin proved that there is support for it.
>>>>>>>>
>>>>>>>> http://oss.sgi.com/projects/libnuma/
>>>>>>>> https://github.com/numactl/numactl
>>>>>>>
>>>>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
>>>>>>> I guess we are talking about build time time dependency with libnuma here.
>>>>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
>>>>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
>>>>>>> NUMA support not available. Correct?
>>>>>>>
>>>>>>> How hard is detect the presence of "numaif.h" if existing build system does not
>>>>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>> if build environment has "numaif.h".
>>>>>>>
>>>>>>> Some example in linux kernel build system:
>>>>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
>>>>>>
>>>>>> I think we should not try to detect numaif.h, because it should be
>>>>>> an error on platform supporting NUMA.
>>>>>
>>>>> I have installed libnuma on a NUMA and non NUMA machine.
>>>>> Compiled and ran following code on those machine and it could detect
>>>>> the numa availability. Could you add more details on the "error on
>>>>> platform supporting NUMA".
>>>>
>>>> I was saying that we do not need to detect NUMA.
>>>> If we are building DPDK for a NUMA architecture and libnuma is not
>>>> available, then it will be a problem that the user must catch.
>>>> The easiest way to catch it, is to fail on the include of numaif.h.
>>>
>>> libnuma is not really _architecture_ depended.
>>>
>>> Ilya Maximets patch disables NUMA support in common arm64 config.I
>>> think, It is not correct, We should not disable on any archs generic config.
>>>
>>> IMO, It should be enabled by default in common config and then we can
>>> detect the presence of numaif.h, if not available OR a target does not need it
>>> explicitly, proceed with disabling
>>> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
>>
>> Detecting of headers is impossible until dpdk doesn't have dynamic build
>> configuration system like autotools, CMake or meson.
>> Right now we just can't do that.
>
> I agree. Unless if we do something like linux kernel does it below
> http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
>
> Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
> generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
> any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.

No, this is not acceptable. it should not be enabled in generic arm64. 
It can be enabled in specific ARM platforms, which support NUMA 
architecture.
We also use generic ARM code on various of our platform when running 
with non-dpaa and/or virtio-net. So enabling it will break all those 
platforms.


>
>>
>>> No strong opinion on "failing the build" vs "printing a warning" in the
>>> absence of numaif.h
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  8:46                                 ` [PATCH v8 2/2] config: enable vhost numa awareness by default Ilya Maximets
@ 2017-06-27  9:18                                   ` Hemant Agrawal
  2017-06-27  9:21                                     ` Thomas Monjalon
  2017-06-27  9:19                                   ` Thomas Monjalon
  1 sibling, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27  9:18 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/27/2017 2:16 PM, Ilya Maximets wrote:
> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> configurations where libnuma is already a default dependency.
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_linuxapp                    | 1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>  3 files changed, 3 insertions(+)
>
> diff --git a/config/common_linuxapp b/config/common_linuxapp
> index 64bef87..74c7d64 100644
> --- a/config/common_linuxapp
> +++ b/config/common_linuxapp
> @@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
>  CONFIG_RTE_LIBRTE_KNI=y
>  CONFIG_RTE_LIBRTE_PMD_KNI=y
>  CONFIG_RTE_LIBRTE_VHOST=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=y
>  CONFIG_RTE_LIBRTE_PMD_VHOST=y
>  CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
>  CONFIG_RTE_LIBRTE_PMD_TAP=y
> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
> index e06b1d4..00bc2ab 100644
> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
> @@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
>
>  # NUMA is not supported on ARM
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  # ARM doesn't have support for vmware TSC map
>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> index f78449d..b061fb0 100644
> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
>
>  # Doesn't support NUMA
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  #
>  # Compile Support Libraries for DPAA2
>

-1
It should also be disabled for generic ARM64. This patch is breaking 
generic arm64 config tests on our platforms and creating a unnecessary 
dependency.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  8:46                                 ` [PATCH v8 2/2] config: enable vhost numa awareness by default Ilya Maximets
  2017-06-27  9:18                                   ` Hemant Agrawal
@ 2017-06-27  9:19                                   ` Thomas Monjalon
  2017-06-27 10:26                                     ` Ilya Maximets
  1 sibling, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-27  9:19 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

27/06/2017 10:46, Ilya Maximets:
> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> configurations where libnuma is already a default dependency.
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_linuxapp                    | 1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>  3 files changed, 3 insertions(+)

I forgot to ask you to update devtools/test-build.sh.
DPDK_DEP_NUMA can be removed.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v7 2/2] config: enable vhost numa awareness by default
  2017-06-21 10:08                             ` [PATCH v7 2/2] config: enable vhost numa awareness by default Ilya Maximets
@ 2017-06-27  9:20                               ` Hemant Agrawal
  0 siblings, 0 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27  9:20 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/21/2017 3:38 PM, Ilya Maximets wrote:
> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> configurations where libnuma is already a default dependency.
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_linuxapp                       | 1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc     | 1 +
>  config/defconfig_arm64-armv8a-linuxapp-gcc   | 1 +
>  config/defconfig_arm64-thunderx-linuxapp-gcc | 1 +
>  4 files changed, 4 insertions(+)
>
> diff --git a/config/common_linuxapp b/config/common_linuxapp
> index 050526f..2e44434 100644
> --- a/config/common_linuxapp
> +++ b/config/common_linuxapp
> @@ -43,6 +43,7 @@ CONFIG_RTE_KNI_KMOD=y
>  CONFIG_RTE_LIBRTE_KNI=y
>  CONFIG_RTE_LIBRTE_PMD_KNI=y
>  CONFIG_RTE_LIBRTE_VHOST=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=y
>  CONFIG_RTE_LIBRTE_PMD_VHOST=y
>  CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
>  CONFIG_RTE_LIBRTE_PMD_TAP=y
> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
> index e06b1d4..00bc2ab 100644
> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
> @@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
>
>  # NUMA is not supported on ARM
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  # ARM doesn't have support for vmware TSC map
>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
> diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
> index 2c67cdc..d190afb 100644
> --- a/config/defconfig_arm64-armv8a-linuxapp-gcc
> +++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
> @@ -49,6 +49,7 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
>
>  # Most ARMv8 systems doesn't support NUMA
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  CONFIG_RTE_EAL_IGB_UIO=n
>
> diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
> index 3e79fa8..7b07b7d 100644
> --- a/config/defconfig_arm64-thunderx-linuxapp-gcc
> +++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
> @@ -39,6 +39,7 @@ CONFIG_RTE_MAX_LCORE=96
>
>  # ThunderX supports NUMA
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=y
>
>  #
>  # Compile PMD for octeontx sso event device
>
This particular version of patch is:
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:18                                   ` Hemant Agrawal
@ 2017-06-27  9:21                                     ` Thomas Monjalon
  2017-06-27  9:41                                       ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-27  9:21 UTC (permalink / raw)
  To: Hemant Agrawal
  Cc: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

27/06/2017 11:18, Hemant Agrawal:
> On 6/27/2017 2:16 PM, Ilya Maximets wrote:
> > It is safe to enable LIBRTE_VHOST_NUMA by default for all
> > configurations where libnuma is already a default dependency.
> >
> > Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> > ---
> >  config/common_linuxapp                    | 1 +
> >  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
> >  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
> >  3 files changed, 3 insertions(+)
[...]
> > --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
> >
> >  # Doesn't support NUMA
> >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> > +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
> >
> >  #
> >  # Compile Support Libraries for DPAA2
> >
> 
> -1
> It should also be disabled for generic ARM64. This patch is breaking 
> generic arm64 config tests on our platforms and creating a unnecessary 
> dependency.

What do you mean? Which ARM64 platform is it breaking?
We can specifically disable it on more platforms.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-27  9:13                                                         ` Hemant Agrawal
@ 2017-06-27  9:26                                                           ` Thomas Monjalon
  2017-06-27  9:48                                                             ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-27  9:26 UTC (permalink / raw)
  To: Hemant Agrawal
  Cc: Jerin Jacob, Ilya Maximets, Sergio Gonzalez Monroy, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

27/06/2017 11:13, Hemant Agrawal:
> On 6/21/2017 4:52 PM, Jerin Jacob wrote:
> > From: Ilya Maximets <i.maximets@samsung.com>
> >>> From: Thomas Monjalon <thomas@monjalon.net>
> >>>>>> 21/06/2017 10:41, Jerin Jacob:
> >>>>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
> >>>>>>>>>
> >>>>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
> >>>>>>>>>
> >>>>>>>>
> >>>>>>>> I did find that link too, last modified 4 years ago.
> >>>>>>>> Despite that, I could not find any ARM references in libnuma sources, but
> >>>>>>>> Jerin proved that there is support for it.
> >>>>>>>>
> >>>>>>>> http://oss.sgi.com/projects/libnuma/
> >>>>>>>> https://github.com/numactl/numactl
> >>>>>>>
> >>>>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
> >>>>>>> I guess we are talking about build time time dependency with libnuma here.
> >>>>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
> >>>>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
> >>>>>>> NUMA support not available. Correct?
> >>>>>>>
> >>>>>>> How hard is detect the presence of "numaif.h" if existing build system does not
> >>>>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> >>>>>>> if build environment has "numaif.h".
> >>>>>>>
> >>>>>>> Some example in linux kernel build system:
> >>>>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
> >>>>>>
> >>>>>> I think we should not try to detect numaif.h, because it should be
> >>>>>> an error on platform supporting NUMA.
> >>>>>
> >>>>> I have installed libnuma on a NUMA and non NUMA machine.
> >>>>> Compiled and ran following code on those machine and it could detect
> >>>>> the numa availability. Could you add more details on the "error on
> >>>>> platform supporting NUMA".
> >>>>
> >>>> I was saying that we do not need to detect NUMA.
> >>>> If we are building DPDK for a NUMA architecture and libnuma is not
> >>>> available, then it will be a problem that the user must catch.
> >>>> The easiest way to catch it, is to fail on the include of numaif.h.
> >>>
> >>> libnuma is not really _architecture_ depended.
> >>>
> >>> Ilya Maximets patch disables NUMA support in common arm64 config.I
> >>> think, It is not correct, We should not disable on any archs generic config.
> >>>
> >>> IMO, It should be enabled by default in common config and then we can
> >>> detect the presence of numaif.h, if not available OR a target does not need it
> >>> explicitly, proceed with disabling
> >>> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
> >>
> >> Detecting of headers is impossible until dpdk doesn't have dynamic build
> >> configuration system like autotools, CMake or meson.
> >> Right now we just can't do that.
> >
> > I agree. Unless if we do something like linux kernel does it below
> > http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
> >
> > Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
> > generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
> > any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
> 
> No, this is not acceptable. it should not be enabled in generic arm64. 
> It can be enabled in specific ARM platforms, which support NUMA 
> architecture.
> We also use generic ARM code on various of our platform when running 
> with non-dpaa and/or virtio-net. So enabling it will break all those 
> platforms.

Which platforms?
It is your non-upstreamed code. You have to deal with it.
You should disable NUMA in the config of these platforms.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:21                                     ` Thomas Monjalon
@ 2017-06-27  9:41                                       ` Hemant Agrawal
  2017-06-27  9:59                                         ` Thomas Monjalon
  2017-06-27  9:59                                         ` Jerin Jacob
  0 siblings, 2 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27  9:41 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
> 27/06/2017 11:18, Hemant Agrawal:
>> On 6/27/2017 2:16 PM, Ilya Maximets wrote:
>>> It is safe to enable LIBRTE_VHOST_NUMA by default for all
>>> configurations where libnuma is already a default dependency.
>>>
>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>> ---
>>>  config/common_linuxapp                    | 1 +
>>>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>>>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>>>  3 files changed, 3 insertions(+)
> [...]
>>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
>>>
>>>  # Doesn't support NUMA
>>>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>>>
>>>  #
>>>  # Compile Support Libraries for DPAA2
>>>
>>
>> -1
>> It should also be disabled for generic ARM64. This patch is breaking
>> generic arm64 config tests on our platforms and creating a unnecessary
>> dependency.
>
> What do you mean? Which ARM64 platform is it breaking?
> We can specifically disable it on more platforms.
>
Unlike x86, ARM only represent a core architecture.
Different platforms can integrate these cores differently in their SoCs.
The stock ARM v8 cores do not provide support for NUMA in my knowledge.
Some vendors have modified ARM cores (e.g. Cavium) to support NUMA 
architecture. However that is not a common phenomena.
NUMA config should not be default for generic ARM config. It should be 
enabled only for architecture supporting it.

So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors 
include NXP. e.g. We use this config on several of our low end systems 
(non-dpaa). Also, we use it when running in VM with virtio interfaces on 
all of our different platforms (non-dpaa, dpaa1, dpaa2 etc).

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v5 0/2] Balanced allocation of hugepages
  2017-06-27  9:26                                                           ` Thomas Monjalon
@ 2017-06-27  9:48                                                             ` Hemant Agrawal
  0 siblings, 0 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27  9:48 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Jerin Jacob, Ilya Maximets, Sergio Gonzalez Monroy, dev,
	Bruce Richardson, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei

On 6/27/2017 2:56 PM, Thomas Monjalon wrote:
> 27/06/2017 11:13, Hemant Agrawal:
>> On 6/21/2017 4:52 PM, Jerin Jacob wrote:
>>> From: Ilya Maximets <i.maximets@samsung.com>
>>>>> From: Thomas Monjalon <thomas@monjalon.net>
>>>>>>>> 21/06/2017 10:41, Jerin Jacob:
>>>>>>>>>>> 1. There are many machines (arm/ppc), which do not support NUMA.
>>>>>>>>>>>
>>>>>>>>>>> https://wiki.linaro.org/LEG/Engineering/Kernel/NUMA
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> I did find that link too, last modified 4 years ago.
>>>>>>>>>> Despite that, I could not find any ARM references in libnuma sources, but
>>>>>>>>>> Jerin proved that there is support for it.
>>>>>>>>>>
>>>>>>>>>> http://oss.sgi.com/projects/libnuma/
>>>>>>>>>> https://github.com/numactl/numactl
>>>>>>>>>
>>>>>>>>> Those Linaro links are very old. ARM64 NUMA supported has been added in 4.7 kernel.
>>>>>>>>> I guess we are talking about build time time dependency with libnuma here.
>>>>>>>>> Correct? I think, Even with old arm64 kernel(< 4.6), You can build against
>>>>>>>>> libnuma if it is present in rootfs. Just that at runtime, it will return
>>>>>>>>> NUMA support not available. Correct?
>>>>>>>>>
>>>>>>>>> How hard is detect the presence of "numaif.h" if existing build system does not
>>>>>>>>> support it? If it trivial, we can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>>>>>>>>> if build environment has "numaif.h".
>>>>>>>>>
>>>>>>>>> Some example in linux kernel build system:
>>>>>>>>> http://lxr.linux.no/linux+v4.10.1/scripts/gcc-goto.sh
>>>>>>>>
>>>>>>>> I think we should not try to detect numaif.h, because it should be
>>>>>>>> an error on platform supporting NUMA.
>>>>>>>
>>>>>>> I have installed libnuma on a NUMA and non NUMA machine.
>>>>>>> Compiled and ran following code on those machine and it could detect
>>>>>>> the numa availability. Could you add more details on the "error on
>>>>>>> platform supporting NUMA".
>>>>>>
>>>>>> I was saying that we do not need to detect NUMA.
>>>>>> If we are building DPDK for a NUMA architecture and libnuma is not
>>>>>> available, then it will be a problem that the user must catch.
>>>>>> The easiest way to catch it, is to fail on the include of numaif.h.
>>>>>
>>>>> libnuma is not really _architecture_ depended.
>>>>>
>>>>> Ilya Maximets patch disables NUMA support in common arm64 config.I
>>>>> think, It is not correct, We should not disable on any archs generic config.
>>>>>
>>>>> IMO, It should be enabled by default in common config and then we can
>>>>> detect the presence of numaif.h, if not available OR a target does not need it
>>>>> explicitly, proceed with disabling
>>>>> RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES. I think, That is more portable.
>>>>
>>>> Detecting of headers is impossible until dpdk doesn't have dynamic build
>>>> configuration system like autotools, CMake or meson.
>>>> Right now we just can't do that.
>>>
>>> I agree. Unless if we do something like linux kernel does it below
>>> http://elixir.free-electrons.com/linux/latest/source/scripts/kconfig/lxdialog/check-lxdialog.sh
>>>
>>> Either way, I think, you can enable RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES in
>>> generic arm64 config and disable on defconfig_arm64-dpaa2-linuxapp-gcc(as Hemant requested) or
>>> any sub arch target that does not need in RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES.
>>
>> No, this is not acceptable. it should not be enabled in generic arm64.
>> It can be enabled in specific ARM platforms, which support NUMA
>> architecture.
>> We also use generic ARM code on various of our platform when running
>> with non-dpaa and/or virtio-net. So enabling it will break all those
>> platforms.
>
> Which platforms?
> It is your non-upstreamed code. You have to deal with it.
> You should disable NUMA in the config of these platforms.
>
>
See my reply in other thread. This is nothing to do with up-streaming.

All NXP - low end non-dpaa platforms, which don't have any platform 
specific code, we use "arm64-armv8a-linuxapp-gcc" as the build config.

There is no need to create special configs for these platforms.
Creating a "non-NUMA" generic config will be an over-kill.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:41                                       ` Hemant Agrawal
@ 2017-06-27  9:59                                         ` Thomas Monjalon
  2017-06-27  9:59                                         ` Jerin Jacob
  1 sibling, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-27  9:59 UTC (permalink / raw)
  To: Hemant Agrawal, Jerin Jacob, Jianbo Liu
  Cc: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson

27/06/2017 11:41, Hemant Agrawal:
> On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
> > 27/06/2017 11:18, Hemant Agrawal:
> >> On 6/27/2017 2:16 PM, Ilya Maximets wrote:
> >>> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> >>> configurations where libnuma is already a default dependency.
> >>>
> >>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> >>> ---
> >>>  config/common_linuxapp                    | 1 +
> >>>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
> >>>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
> >>>  3 files changed, 3 insertions(+)
> > [...]
> >>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> >>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> >>> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
> >>>
> >>>  # Doesn't support NUMA
> >>>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> >>> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
> >>>
> >>>  #
> >>>  # Compile Support Libraries for DPAA2
> >>>
> >>
> >> -1
> >> It should also be disabled for generic ARM64. This patch is breaking
> >> generic arm64 config tests on our platforms and creating a unnecessary
> >> dependency.
> >
> > What do you mean? Which ARM64 platform is it breaking?
> > We can specifically disable it on more platforms.
> >
> Unlike x86, ARM only represent a core architecture.
> Different platforms can integrate these cores differently in their SoCs.
> The stock ARM v8 cores do not provide support for NUMA in my knowledge.
> Some vendors have modified ARM cores (e.g. Cavium) to support NUMA 
> architecture. However that is not a common phenomena.
> NUMA config should not be default for generic ARM config. It should be 
> enabled only for architecture supporting it.
> 
> So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors 
> include NXP. e.g. We use this config on several of our low end systems 
> (non-dpaa). Also, we use it when running in VM with virtio interfaces on 
> all of our different platforms (non-dpaa, dpaa1, dpaa2 etc).

We need more opinions from ARM and Cavium.

The general idea in DPDK config is to enable as much feature as we can.
It conflicts with the general availability of NUMA on ARMv8.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:41                                       ` Hemant Agrawal
  2017-06-27  9:59                                         ` Thomas Monjalon
@ 2017-06-27  9:59                                         ` Jerin Jacob
  2017-06-27 12:17                                           ` Hemant Agrawal
  1 sibling, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-27  9:59 UTC (permalink / raw)
  To: Hemant Agrawal
  Cc: Thomas Monjalon, Ilya Maximets, dev, David Marchand,
	Sergio Gonzalez Monroy, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Bruce Richardson

-----Original Message-----
> Date: Tue, 27 Jun 2017 15:11:07 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: Thomas Monjalon <thomas@monjalon.net>
> CC: Ilya Maximets <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
>  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
>  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
>  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
>  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
>  Bruce Richardson <bruce.richardson@intel.com>, Jerin Jacob
>  <jerin.jacob@caviumnetworks.com>
> Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
> User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
>  Thunderbird/45.8.0
> 
> On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
> > 27/06/2017 11:18, Hemant Agrawal:
> > > On 6/27/2017 2:16 PM, Ilya Maximets wrote:
> > > > It is safe to enable LIBRTE_VHOST_NUMA by default for all
> > > > configurations where libnuma is already a default dependency.
> > > > 
> > > > Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> > > > ---
> > > >  config/common_linuxapp                    | 1 +
> > > >  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
> > > >  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
> > > >  3 files changed, 3 insertions(+)
> > [...]
> > > > --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > > > +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > > > @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
> > > > 
> > > >  # Doesn't support NUMA
> > > >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> > > > +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
> > > > 
> > > >  #
> > > >  # Compile Support Libraries for DPAA2
> > > > 
> > > 
> > > -1
> > > It should also be disabled for generic ARM64. This patch is breaking
> > > generic arm64 config tests on our platforms and creating a unnecessary
> > > dependency.
> > 
> > What do you mean? Which ARM64 platform is it breaking?
> > We can specifically disable it on more platforms.
> > 
> Unlike x86, ARM only represent a core architecture.
> Different platforms can integrate these cores differently in their SoCs.
> The stock ARM v8 cores do not provide support for NUMA in my knowledge.

A72 is just _an_ implementation of armv8. Not ARMv8 specification
itself. By specification it is NUMA capable and there are NUMA
implementation too.

> Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
> architecture. However that is not a common phenomena.
> NUMA config should not be default for generic ARM config. It should be
> enabled only for architecture supporting it.

It just an build time dependency. Right? If you feed the libnuma package,
it will NON NUMA as well. Right? ARM64 libnuma package is already
available for major distributions.

My point is, I don't want to make arm64 generic config an exceptional case,
If DPDK common config creates libnuma dependency then there is no reason
for arm64 not have it. It is same for x86 and powerpc, non numa systems
too. Right?

> 
> So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
> include NXP. e.g. We use this config on several of our low end systems
> (non-dpaa). Also, we use it when running in VM with virtio interfaces on all
> of our different platforms (non-dpaa, dpaa1, dpaa2 etc).

On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
with NUMA and if want to keep creating new targets there is no end to it.

How hard is to install libnuma on VM? There is already package for it.


> 
> 
> 
> 
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v9 0/2] Balanced allocation of hugepages
       [not found]                               ` <CGME20170627102447eucas1p15a57bbaaf46944c0935d4ef71b55cd83@eucas1p1.samsung.com>
@ 2017-06-27 10:24                                 ` Ilya Maximets
       [not found]                                   ` <CGME20170627102451eucas1p2254d8679f70e261b9db9d2123aa80091@eucas1p2.samsung.com>
                                                     ` (2 more replies)
  0 siblings, 3 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27 10:24 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Version 9:
	* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
	  anymore.
	* Fixed out of bound write to essential_memory in case
	  where socket-mem not specified and SIGBUS occured.

Version 8:
	* helper functions from libnuma used to set mempolicy and
	  work with cpu mask.
	* Function now restores previous mempolicy instead of MPOL_DEFAULT.
	* Fixed essential_memory on SIGBUS.
	* Fixed restoring of mempolicy in case of errors (goto out).
	* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	  returned. Enabled by default for x86, ppc and thunderx.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                        |   1 +
 config/common_linuxapp                    |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   4 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   4 +
 devtools/test-build.sh                    |   4 -
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 8 files changed, 129 insertions(+), 12 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v9 1/2] mem: balanced allocation of hugepages
       [not found]                                   ` <CGME20170627102451eucas1p2254d8679f70e261b9db9d2123aa80091@eucas1p2.samsung.com>
@ 2017-06-27 10:24                                     ` Ilya Maximets
  2017-06-28 10:30                                       ` Sergio Gonzalez Monroy
  2017-06-29  5:32                                       ` Hemant Agrawal
  0 siblings, 2 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27 10:24 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_base                        |   1 +
 config/common_linuxapp                    |   1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 7 files changed, 126 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
 
 CONFIG_RTE_PKTMBUF_HEADROOM=256
 
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
 #
 # Compile Support Libraries for DPAA2
 #
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	int node_id = -1;
+	int essential_prev = 0;
+	int oldpolicy;
+	struct bitmask *oldmask = numa_allocate_nodemask();
+	bool have_numa = true;
+	unsigned long maxnode = 0;
+
+	/* Check if kernel supports NUMA. */
+	if (numa_available() != 0) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		have_numa = false;
+	}
+
+	if (orig && have_numa) {
+		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+		if (get_mempolicy(&oldpolicy, oldmask->maskp,
+				  oldmask->size + 1, 0, 0) < 0) {
+			RTE_LOG(ERR, EAL,
+				"Failed to get current mempolicy: %s. "
+				"Assuming MPOL_DEFAULT.\n", strerror(errno));
+			oldpolicy = MPOL_DEFAULT;
+		}
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < maxnode; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == maxnode) {
+				node_id = (node_id + 1) % maxnode;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= maxnode;
+				}
+				essential_prev = 0;
+			} else {
+				node_id = j;
+				essential_prev = essential_memory[j];
+
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			numa_set_preferred(node_id);
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		if (fd < 0) {
 			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
 					strerror(errno));
-			return i;
+			goto out;
 		}
 
 		/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
 					strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 				munmap(virtaddr, hugepage_sz);
 				close(fd);
 				unlink(hugepg_tbl[i].filepath);
-				return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				if (maxnode)
+					essential_memory[node_id] =
+						essential_prev;
+#endif
+				goto out;
 			}
 			*(int *)virtaddr = 0;
 		}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
 				__func__, strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode) {
+		RTE_LOG(DEBUG, EAL,
+			"Restoring previous memory policy: %d\n", oldpolicy);
+		if (oldpolicy == MPOL_DEFAULT) {
+			numa_set_localalloc();
+		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
+					 oldmask->size + 1) < 0) {
+			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+				strerror(errno));
+			numa_set_localalloc();
+		}
+	}
+	numa_free_cpumask(oldmask);
+#endif
 	return i;
 }
 
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v9 2/2] config: enable vhost numa awareness by default
       [not found]                                   ` <CGME20170627102454eucas1p14b2a1024d77158ad0bf40d62e6ad4365@eucas1p1.samsung.com>
@ 2017-06-27 10:24                                     ` Ilya Maximets
  2017-06-29  5:31                                       ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27 10:24 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Ilya Maximets

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

DPDK_DEP_NUMA not needed anymore.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 config/common_linuxapp                    | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
 devtools/test-build.sh                    | 4 ----
 4 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
 CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
 
 # Doesn't support NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 #
 # Compile Support Libraries for DPAA2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7..0dbc04a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,6 @@ default_path=$PATH
 # - DPDK_DEP_ISAL_CRYPTO (y/[n])
 # - DPDK_DEP_LDFLAGS
 # - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
 # - DPDK_DEP_PCAP (y/[n])
 # - DPDK_DEP_SSL (y/[n])
 # - DPDK_DEP_SZE (y/[n])
@@ -124,7 +123,6 @@ reset_env ()
 	unset DPDK_DEP_ISAL_CRYPTO
 	unset DPDK_DEP_LDFLAGS
 	unset DPDK_DEP_MOFED
-	unset DPDK_DEP_NUMA
 	unset DPDK_DEP_PCAP
 	unset DPDK_DEP_SSL
 	unset DPDK_DEP_SZE
@@ -163,8 +161,6 @@ config () # <directory> <target> <options>
 		sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
 
 		# Automatic configuration
-		test "$DPDK_DEP_NUMA" != y || \
-		sed -ri               's,(NUMA=)n,\1y,' $1/.config
 		sed -ri    's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
 		sed -ri             's,(BYPASS=)n,\1y,' $1/.config
 		test "$DPDK_DEP_ARCHIVE" != y || \
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:19                                   ` Thomas Monjalon
@ 2017-06-27 10:26                                     ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-27 10:26 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 27.06.2017 12:19, Thomas Monjalon wrote:
> 27/06/2017 10:46, Ilya Maximets:
>> It is safe to enable LIBRTE_VHOST_NUMA by default for all
>> configurations where libnuma is already a default dependency.
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>>  config/common_linuxapp                    | 1 +
>>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>>  3 files changed, 3 insertions(+)
> 
> I forgot to ask you to update devtools/test-build.sh.
> DPDK_DEP_NUMA can be removed.

Ok. Fixed in v9.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27  9:59                                         ` Jerin Jacob
@ 2017-06-27 12:17                                           ` Hemant Agrawal
  2017-06-27 12:45                                             ` Jerin Jacob
  0 siblings, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27 12:17 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ilya Maximets, dev, David Marchand,
	Sergio Gonzalez Monroy, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Bruce Richardson

On 6/27/2017 3:29 PM, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Tue, 27 Jun 2017 15:11:07 +0530
>> From: Hemant Agrawal <hemant.agrawal@nxp.com>
>> To: Thomas Monjalon <thomas@monjalon.net>
>> CC: Ilya Maximets <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
>>  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
>>  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
>>  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
>>  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
>>  Bruce Richardson <bruce.richardson@intel.com>, Jerin Jacob
>>  <jerin.jacob@caviumnetworks.com>
>> Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
>> User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
>>  Thunderbird/45.8.0
>>
>> On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
>>> 27/06/2017 11:18, Hemant Agrawal:
>>>> On 6/27/2017 2:16 PM, Ilya Maximets wrote:
>>>>> It is safe to enable LIBRTE_VHOST_NUMA by default for all
>>>>> configurations where libnuma is already a default dependency.
>>>>>
>>>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>>>> ---
>>>>>  config/common_linuxapp                    | 1 +
>>>>>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>>>>>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>>>>>  3 files changed, 3 insertions(+)
>>> [...]
>>>>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>>>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>>>> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
>>>>>
>>>>>  # Doesn't support NUMA
>>>>>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>>>> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>>>>>
>>>>>  #
>>>>>  # Compile Support Libraries for DPAA2
>>>>>
>>>>
>>>> -1
>>>> It should also be disabled for generic ARM64. This patch is breaking
>>>> generic arm64 config tests on our platforms and creating a unnecessary
>>>> dependency.
>>>
>>> What do you mean? Which ARM64 platform is it breaking?
>>> We can specifically disable it on more platforms.
>>>
>> Unlike x86, ARM only represent a core architecture.
>> Different platforms can integrate these cores differently in their SoCs.
>> The stock ARM v8 cores do not provide support for NUMA in my knowledge.
>
> A72 is just _an_ implementation of armv8. Not ARMv8 specification
> itself. By specification it is NUMA capable and there are NUMA
> implementation too.
>
>> Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
>> architecture. However that is not a common phenomena.
>> NUMA config should not be default for generic ARM config. It should be
>> enabled only for architecture supporting it.
>
> It just an build time dependency. Right? If you feed the libnuma package,
> it will NON NUMA as well. Right? ARM64 libnuma package is already
> available for major distributions.

yes, libnuma will work for non-NUMA.
>
> My point is, I don't want to make arm64 generic config an exceptional case,
> If DPDK common config creates libnuma dependency then there is no reason
> for arm64 not have it. It is same for x86 and powerpc, non numa systems
> too. Right?

x86 and powerpc configs are single vendor based.
Common should be common and generic.

Why to create a unnecessary dependency, when we know that the support is 
not uniform?  It adds difficulties e.g. For the ARM cross compilation, 
will also have to cross compile libnuma-dev. Makefile will need a path 
for specifying the lib and include paths for libnuma and numa.h.


>
>>
>> So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
>> include NXP. e.g. We use this config on several of our low end systems
>> (non-dpaa). Also, we use it when running in VM with virtio interfaces on all
>> of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
>
> On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
> with NUMA and if want to keep creating new targets there is no end to it.
>
> How hard is to install libnuma on VM? There is already package for it.
>
>
>>
>>
>>
>>
>>
>>
>>
>>
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27 12:17                                           ` Hemant Agrawal
@ 2017-06-27 12:45                                             ` Jerin Jacob
  2017-06-27 13:00                                               ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-27 12:45 UTC (permalink / raw)
  To: Hemant Agrawal
  Cc: Thomas Monjalon, Ilya Maximets, dev, David Marchand,
	Sergio Gonzalez Monroy, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Bruce Richardson

-----Original Message-----
> Date: Tue, 27 Jun 2017 17:47:44 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> CC: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>  <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
>  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
>  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
>  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
>  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
>  Bruce Richardson <bruce.richardson@intel.com>
> Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
> User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
>  Thunderbird/45.8.0
> 
> On 6/27/2017 3:29 PM, Jerin Jacob wrote:
> > -----Original Message-----
> > > Date: Tue, 27 Jun 2017 15:11:07 +0530
> > > From: Hemant Agrawal <hemant.agrawal@nxp.com>
> > > To: Thomas Monjalon <thomas@monjalon.net>
> > > CC: Ilya Maximets <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
> > >  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
> > >  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
> > >  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
> > >  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
> > >  Bruce Richardson <bruce.richardson@intel.com>, Jerin Jacob
> > >  <jerin.jacob@caviumnetworks.com>
> > > Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
> > > User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
> > >  Thunderbird/45.8.0
> > > 
> > > On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
> > > > 27/06/2017 11:18, Hemant Agrawal:
> > > > > On 6/27/2017 2:16 PM, Ilya Maximets wrote:
> > > > > > It is safe to enable LIBRTE_VHOST_NUMA by default for all
> > > > > > configurations where libnuma is already a default dependency.
> > > > > > 
> > > > > > Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> > > > > > ---
> > > > > >  config/common_linuxapp                    | 1 +
> > > > > >  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
> > > > > >  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
> > > > > >  3 files changed, 3 insertions(+)
> > > > [...]
> > > > > > --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > > > > > +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> > > > > > @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
> > > > > > 
> > > > > >  # Doesn't support NUMA
> > > > > >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> > > > > > +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
> > > > > > 
> > > > > >  #
> > > > > >  # Compile Support Libraries for DPAA2
> > > > > > 
> > > > > 
> > > > > -1
> > > > > It should also be disabled for generic ARM64. This patch is breaking
> > > > > generic arm64 config tests on our platforms and creating a unnecessary
> > > > > dependency.
> > > > 
> > > > What do you mean? Which ARM64 platform is it breaking?
> > > > We can specifically disable it on more platforms.
> > > > 
> > > Unlike x86, ARM only represent a core architecture.
> > > Different platforms can integrate these cores differently in their SoCs.
> > > The stock ARM v8 cores do not provide support for NUMA in my knowledge.
> > 
> > A72 is just _an_ implementation of armv8. Not ARMv8 specification
> > itself. By specification it is NUMA capable and there are NUMA
> > implementation too.
> > 
> > > Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
> > > architecture. However that is not a common phenomena.
> > > NUMA config should not be default for generic ARM config. It should be
> > > enabled only for architecture supporting it.
> > 
> > It just an build time dependency. Right? If you feed the libnuma package,
> > it will NON NUMA as well. Right? ARM64 libnuma package is already
> > available for major distributions.
> 
> yes, libnuma will work for non-NUMA.
> > 
> > My point is, I don't want to make arm64 generic config an exceptional case,
> > If DPDK common config creates libnuma dependency then there is no reason
> > for arm64 not have it. It is same for x86 and powerpc, non numa systems
> > too. Right?
> 
> x86 and powerpc configs are single vendor based.
> Common should be common and generic.

Yes. What I understand by common is that it should work on functionality on _all_ the
armv8 targets. If you don't include NUMA then it will have functionality issue
with NUMA targets.

The ARM64 Linux kernel took the similar approach. The default config has all
options and NUMA is _enabled_ even it is not supported on A72.

http://elixir.free-electrons.com/linux/latest/source/arch/arm64/configs/defconfig#L77


> 
> Why to create a unnecessary dependency, when we know that the support is not
> uniform?  It adds difficulties e.g. For the ARM cross compilation, will also
> have to cross compile libnuma-dev. Makefile will need a path for specifying
> the lib and include paths for libnuma and numa.h.

Yes. I agree. Cross compilation needs additional step. On the other
hand, If we don't include NUMA in common config, We need to add new targets on
all new SoCs(like thunderx2). IMO, In order to reduce the config, I think,
this is the better way.(and it is not hard to disable NUMA for cross
compilation mode if not interested)

> 
> 
> > 
> > > 
> > > So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
> > > include NXP. e.g. We use this config on several of our low end systems
> > > (non-dpaa). Also, we use it when running in VM with virtio interfaces on all
> > > of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
> > 
> > On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
> > with NUMA and if want to keep creating new targets there is no end to it.
> > 
> > How hard is to install libnuma on VM? There is already package for it.
> > 
> > 
> > > 
> > > 
> > > 
> > > 
> > > 
> > > 
> > > 
> > > 
> > 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
  2017-06-27 12:45                                             ` Jerin Jacob
@ 2017-06-27 13:00                                               ` Hemant Agrawal
  0 siblings, 0 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-27 13:00 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ilya Maximets, dev, David Marchand,
	Sergio Gonzalez Monroy, Heetae Ahn, Yuanhan Liu, Jianfeng Tan,
	Neil Horman, Yulong Pei, Bruce Richardson

On 6/27/2017 6:15 PM, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Tue, 27 Jun 2017 17:47:44 +0530
>> From: Hemant Agrawal <hemant.agrawal@nxp.com>
>> To: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>> CC: Thomas Monjalon <thomas@monjalon.net>, Ilya Maximets
>>  <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
>>  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
>>  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
>>  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
>>  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
>>  Bruce Richardson <bruce.richardson@intel.com>
>> Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
>> User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
>>  Thunderbird/45.8.0
>>
>> On 6/27/2017 3:29 PM, Jerin Jacob wrote:
>>> -----Original Message-----
>>>> Date: Tue, 27 Jun 2017 15:11:07 +0530
>>>> From: Hemant Agrawal <hemant.agrawal@nxp.com>
>>>> To: Thomas Monjalon <thomas@monjalon.net>
>>>> CC: Ilya Maximets <i.maximets@samsung.com>, dev@dpdk.org, David Marchand
>>>>  <david.marchand@6wind.com>, Sergio Gonzalez Monroy
>>>>  <sergio.gonzalez.monroy@intel.com>, Heetae Ahn <heetae82.ahn@samsung.com>,
>>>>  Yuanhan Liu <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>,
>>>>  Neil Horman <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>,
>>>>  Bruce Richardson <bruce.richardson@intel.com>, Jerin Jacob
>>>>  <jerin.jacob@caviumnetworks.com>
>>>> Subject: Re: [PATCH v8 2/2] config: enable vhost numa awareness by default
>>>> User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101
>>>>  Thunderbird/45.8.0
>>>>
>>>> On 6/27/2017 2:51 PM, Thomas Monjalon wrote:
>>>>> 27/06/2017 11:18, Hemant Agrawal:
>>>>>> On 6/27/2017 2:16 PM, Ilya Maximets wrote:
>>>>>>> It is safe to enable LIBRTE_VHOST_NUMA by default for all
>>>>>>> configurations where libnuma is already a default dependency.
>>>>>>>
>>>>>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>>>>>> ---
>>>>>>>  config/common_linuxapp                    | 1 +
>>>>>>>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>>>>>>>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>>>>>>>  3 files changed, 3 insertions(+)
>>>>> [...]
>>>>>>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>>>>>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>>>>>> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
>>>>>>>
>>>>>>>  # Doesn't support NUMA
>>>>>>>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>>>>>> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>>>>>>>
>>>>>>>  #
>>>>>>>  # Compile Support Libraries for DPAA2
>>>>>>>
>>>>>>
>>>>>> -1
>>>>>> It should also be disabled for generic ARM64. This patch is breaking
>>>>>> generic arm64 config tests on our platforms and creating a unnecessary
>>>>>> dependency.
>>>>>
>>>>> What do you mean? Which ARM64 platform is it breaking?
>>>>> We can specifically disable it on more platforms.
>>>>>
>>>> Unlike x86, ARM only represent a core architecture.
>>>> Different platforms can integrate these cores differently in their SoCs.
>>>> The stock ARM v8 cores do not provide support for NUMA in my knowledge.
>>>
>>> A72 is just _an_ implementation of armv8. Not ARMv8 specification
>>> itself. By specification it is NUMA capable and there are NUMA
>>> implementation too.
>>>
>>>> Some vendors have modified ARM cores (e.g. Cavium) to support NUMA
>>>> architecture. However that is not a common phenomena.
>>>> NUMA config should not be default for generic ARM config. It should be
>>>> enabled only for architecture supporting it.
>>>
>>> It just an build time dependency. Right? If you feed the libnuma package,
>>> it will NON NUMA as well. Right? ARM64 libnuma package is already
>>> available for major distributions.
>>
>> yes, libnuma will work for non-NUMA.
>>>
>>> My point is, I don't want to make arm64 generic config an exceptional case,
>>> If DPDK common config creates libnuma dependency then there is no reason
>>> for arm64 not have it. It is same for x86 and powerpc, non numa systems
>>> too. Right?
>>
>> x86 and powerpc configs are single vendor based.
>> Common should be common and generic.
>
> Yes. What I understand by common is that it should work on functionality on _all_ the
> armv8 targets. If you don't include NUMA then it will have functionality issue
> with NUMA targets.
>
> The ARM64 Linux kernel took the similar approach. The default config has all
> options and NUMA is _enabled_ even it is not supported on A72.
>
> http://elixir.free-electrons.com/linux/latest/source/arch/arm64/configs/defconfig#L77
>

Ok!  Not able to think of any other issue for now.

>
>>
>> Why to create a unnecessary dependency, when we know that the support is not
>> uniform?  It adds difficulties e.g. For the ARM cross compilation, will also
>> have to cross compile libnuma-dev. Makefile will need a path for specifying
>> the lib and include paths for libnuma and numa.h.
>
> Yes. I agree. Cross compilation needs additional step. On the other
> hand, If we don't include NUMA in common config, We need to add new targets on
> all new SoCs(like thunderx2). IMO, In order to reduce the config, I think,
> this is the better way.(and it is not hard to disable NUMA for cross
> compilation mode if not interested)
>
>>
>>
>>>
>>>>
>>>> So, *arm64-armv8a-linuxapp-gcc* config is being used by several vendors
>>>> include NXP. e.g. We use this config on several of our low end systems
>>>> (non-dpaa). Also, we use it when running in VM with virtio interfaces on all
>>>> of our different platforms (non-dpaa, dpaa1, dpaa2 etc).
>>>
>>> On the same note, arm64-armv8a-linuxapp-gcc used by other vendors for Server machines
>>> with NUMA and if want to keep creating new targets there is no end to it.
>>>
>>> How hard is to install libnuma on VM? There is already package for it.
>>>
>>>
>>>>
>>>>
>>>>
>>>>
>>>>
>>>>
>>>>
>>>>
>>>
>>
>>
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v9 1/2] mem: balanced allocation of hugepages
  2017-06-27 10:24                                     ` [PATCH v9 1/2] mem: balanced " Ilya Maximets
@ 2017-06-28 10:30                                       ` Sergio Gonzalez Monroy
  2017-06-29  5:32                                       ` Hemant Agrawal
  1 sibling, 0 replies; 99+ messages in thread
From: Sergio Gonzalez Monroy @ 2017-06-28 10:30 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 27/06/2017 11:24, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying attention
> from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of available
> hugepages for application limited by cgroups or hugetlbfs and
> memory requested not only from the first socket.
>
> Example:
> 	# 90 x 1GB hugepages availavle in a system
>
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
>
> 	This happens beacause all allocated pages are
> 	on socket 0.
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
> to one of requested nodes using following schema:
>
> 	1) Allocate essential hugepages:
> 		1.1) Allocate as many hugepages from numa N to
> 		     only fit requested memory for this numa.
> 		1.2) repeat 1.1 for all numa nodes.
> 	2) Try to map all remaining free hugepages in a round-robin
> 	   fashion.
> 	3) Sort pages and choose the most suitable.
>
> In this case all essential memory will be allocated and all remaining
> pages will be fairly distributed between all requested nodes.
>
> New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
> enabled by default for linuxapp except armv7 and dpaa2.
> Enabling of this option adds libnuma as a dependency for EAL.
>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>   config/common_base                        |   1 +
>   config/common_linuxapp                    |   1 +
>   config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
>   config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
>   lib/librte_eal/linuxapp/eal/Makefile      |   3 +
>   lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
>   mk/rte.app.mk                             |   3 +
>   7 files changed, 126 insertions(+), 8 deletions(-)

Good stuff Ilya!

Hemant, Jerin, could you also ack the patch if you are happy with it? 
Thanks.

Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v9 2/2] config: enable vhost numa awareness by default
  2017-06-27 10:24                                     ` [PATCH v9 2/2] config: enable vhost numa awareness by default Ilya Maximets
@ 2017-06-29  5:31                                       ` Hemant Agrawal
  0 siblings, 0 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-29  5:31 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/27/2017 3:54 PM, Ilya Maximets wrote:
> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> configurations where libnuma is already a default dependency.
>
> DPDK_DEP_NUMA not needed anymore.
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_linuxapp                    | 1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>  devtools/test-build.sh                    | 4 ----
>  4 files changed, 3 insertions(+), 4 deletions(-)
>
> diff --git a/config/common_linuxapp b/config/common_linuxapp
> index 64bef87..74c7d64 100644
> --- a/config/common_linuxapp
> +++ b/config/common_linuxapp
> @@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
>  CONFIG_RTE_LIBRTE_KNI=y
>  CONFIG_RTE_LIBRTE_PMD_KNI=y
>  CONFIG_RTE_LIBRTE_VHOST=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=y
>  CONFIG_RTE_LIBRTE_PMD_VHOST=y
>  CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
>  CONFIG_RTE_LIBRTE_PMD_TAP=y
> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
> index e06b1d4..00bc2ab 100644
> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
> @@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
>
>  # NUMA is not supported on ARM
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  # ARM doesn't have support for vmware TSC map
>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> index f78449d..b061fb0 100644
> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> @@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
>
>  # Doesn't support NUMA
>  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> +CONFIG_RTE_LIBRTE_VHOST_NUMA=n
>
>  #
>  # Compile Support Libraries for DPAA2
> diff --git a/devtools/test-build.sh b/devtools/test-build.sh
> index 61bdce7..0dbc04a 100755
> --- a/devtools/test-build.sh
> +++ b/devtools/test-build.sh
> @@ -41,7 +41,6 @@ default_path=$PATH
>  # - DPDK_DEP_ISAL_CRYPTO (y/[n])
>  # - DPDK_DEP_LDFLAGS
>  # - DPDK_DEP_MOFED (y/[n])
> -# - DPDK_DEP_NUMA (y/[n])
>  # - DPDK_DEP_PCAP (y/[n])
>  # - DPDK_DEP_SSL (y/[n])
>  # - DPDK_DEP_SZE (y/[n])
> @@ -124,7 +123,6 @@ reset_env ()
>  	unset DPDK_DEP_ISAL_CRYPTO
>  	unset DPDK_DEP_LDFLAGS
>  	unset DPDK_DEP_MOFED
> -	unset DPDK_DEP_NUMA
>  	unset DPDK_DEP_PCAP
>  	unset DPDK_DEP_SSL
>  	unset DPDK_DEP_SZE
> @@ -163,8 +161,6 @@ config () # <directory> <target> <options>
>  		sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
>
>  		# Automatic configuration
> -		test "$DPDK_DEP_NUMA" != y || \
> -		sed -ri               's,(NUMA=)n,\1y,' $1/.config
>  		sed -ri    's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
>  		sed -ri             's,(BYPASS=)n,\1y,' $1/.config
>  		test "$DPDK_DEP_ARCHIVE" != y || \
>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v9 1/2] mem: balanced allocation of hugepages
  2017-06-27 10:24                                     ` [PATCH v9 1/2] mem: balanced " Ilya Maximets
  2017-06-28 10:30                                       ` Sergio Gonzalez Monroy
@ 2017-06-29  5:32                                       ` Hemant Agrawal
  2017-06-29  5:48                                         ` Ilya Maximets
  1 sibling, 1 reply; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-29  5:32 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/27/2017 3:54 PM, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying attention
> from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of available
> hugepages for application limited by cgroups or hugetlbfs and
> memory requested not only from the first socket.
>
> Example:
> 	# 90 x 1GB hugepages availavle in a system
>
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
>
> 	This happens beacause all allocated pages are
> 	on socket 0.
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
> to one of requested nodes using following schema:
>
> 	1) Allocate essential hugepages:
> 		1.1) Allocate as many hugepages from numa N to
> 		     only fit requested memory for this numa.
> 		1.2) repeat 1.1 for all numa nodes.
> 	2) Try to map all remaining free hugepages in a round-robin
> 	   fashion.
> 	3) Sort pages and choose the most suitable.
>
> In this case all essential memory will be allocated and all remaining
> pages will be fairly distributed between all requested nodes.
>
> New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
> enabled by default for linuxapp except armv7 and dpaa2.
> Enabling of this option adds libnuma as a dependency for EAL.
>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> ---
>  config/common_base                        |   1 +
>  config/common_linuxapp                    |   1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
>  lib/librte_eal/linuxapp/eal/Makefile      |   3 +
>  lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
>  mk/rte.app.mk                             |   3 +
>  7 files changed, 126 insertions(+), 8 deletions(-)
>
> diff --git a/config/common_base b/config/common_base
> index f6aafd1..660588a 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
>  CONFIG_RTE_EAL_IGB_UIO=n
>  CONFIG_RTE_EAL_VFIO=n
>  CONFIG_RTE_MALLOC_DEBUG=n
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>
>  #
>  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
> diff --git a/config/common_linuxapp b/config/common_linuxapp
> index b3cf41b..64bef87 100644
> --- a/config/common_linuxapp
> +++ b/config/common_linuxapp
> @@ -35,6 +35,7 @@
>  CONFIG_RTE_EXEC_ENV="linuxapp"
>  CONFIG_RTE_EXEC_ENV_LINUXAPP=y
>
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>  CONFIG_RTE_EAL_IGB_UIO=y
>  CONFIG_RTE_EAL_VFIO=y
>  CONFIG_RTE_KNI_KMOD=y
> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
> index 19607eb..e06b1d4 100644
> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
> @@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
>  CONFIG_RTE_TOOLCHAIN="gcc"
>  CONFIG_RTE_TOOLCHAIN_GCC=y
>
> +# NUMA is not supported on ARM
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +
>  # ARM doesn't have support for vmware TSC map
>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
>
> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> index 2304ab6..f78449d 100644
> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> @@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
>
>  CONFIG_RTE_PKTMBUF_HEADROOM=256
>
> +# Doesn't support NUMA
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
> +

DPAA2 does not support NUMA so,
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

>  #
>  # Compile Support Libraries for DPAA2
>  #
> diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
> index 640afd0..8651e27 100644
> --- a/lib/librte_eal/linuxapp/eal/Makefile
> +++ b/lib/librte_eal/linuxapp/eal/Makefile
> @@ -50,6 +50,9 @@ LDLIBS += -ldl
>  LDLIBS += -lpthread
>  LDLIBS += -lgcc_s
>  LDLIBS += -lrt
> +ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +LDLIBS += -lnuma
> +endif
>
>  # specific to linuxapp exec-env
>  SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index e17c9cb..647d89c 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -54,6 +54,10 @@
>  #include <sys/time.h>
>  #include <signal.h>
>  #include <setjmp.h>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +#include <numa.h>
> +#include <numaif.h>
> +#endif
>
>  #include <rte_log.h>
>  #include <rte_memory.h>
> @@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
>  	return sigsetjmp(huge_jmpenv, 1);
>  }
>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +/* Callback for numa library. */
> +void numa_error(char *where)
> +{
> +	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
> +}
> +#endif
> +
>  /*
>   * Mmap all hugepages of hugepage table: it first open a file in
>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
> @@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
>   * map continguous physical blocks in contiguous virtual blocks.
>   */
>  static unsigned
> -map_all_hugepages(struct hugepage_file *hugepg_tbl,
> -		struct hugepage_info *hpi, int orig)
> +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
> +		  uint64_t *essential_memory __rte_unused, int orig)
>  {
>  	int fd;
>  	unsigned i;
>  	void *virtaddr;
>  	void *vma_addr = NULL;
>  	size_t vma_len = 0;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +	int node_id = -1;
> +	int essential_prev = 0;
> +	int oldpolicy;
> +	struct bitmask *oldmask = numa_allocate_nodemask();
> +	bool have_numa = true;
> +	unsigned long maxnode = 0;
> +
> +	/* Check if kernel supports NUMA. */
> +	if (numa_available() != 0) {
> +		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
> +		have_numa = false;
> +	}
> +
> +	if (orig && have_numa) {
> +		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
> +		if (get_mempolicy(&oldpolicy, oldmask->maskp,
> +				  oldmask->size + 1, 0, 0) < 0) {
> +			RTE_LOG(ERR, EAL,
> +				"Failed to get current mempolicy: %s. "
> +				"Assuming MPOL_DEFAULT.\n", strerror(errno));
> +			oldpolicy = MPOL_DEFAULT;
> +		}
> +		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> +			if (internal_config.socket_mem[i])
> +				maxnode = i + 1;
> +	}
> +#endif
>
>  	for (i = 0; i < hpi->num_pages[0]; i++) {
>  		uint64_t hugepage_sz = hpi->hugepage_sz;
>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +		if (maxnode) {
> +			unsigned int j;
> +
> +			for (j = 0; j < maxnode; j++)
> +				if (essential_memory[j])
> +					break;
> +
> +			if (j == maxnode) {
> +				node_id = (node_id + 1) % maxnode;
> +				while (!internal_config.socket_mem[node_id]) {
> +					node_id++;
> +					node_id %= maxnode;
> +				}
> +				essential_prev = 0;
> +			} else {
> +				node_id = j;
> +				essential_prev = essential_memory[j];
> +
> +				if (essential_memory[j] < hugepage_sz)
> +					essential_memory[j] = 0;
> +				else
> +					essential_memory[j] -= hugepage_sz;
> +			}
> +
> +			RTE_LOG(DEBUG, EAL,
> +				"Setting policy MPOL_PREFERRED for socket %d\n",
> +				node_id);
> +			numa_set_preferred(node_id);
> +		}
> +#endif
> +
>  		if (orig) {
>  			hugepg_tbl[i].file_id = i;
>  			hugepg_tbl[i].size = hugepage_sz;
> @@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		if (fd < 0) {
>  			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
>  					strerror(errno));
> -			return i;
> +			goto out;
>  		}
>
>  		/* map the segment, and populate page tables,
> @@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
>  					strerror(errno));
>  			close(fd);
> -			return i;
> +			goto out;
>  		}
>
>  		if (orig) {
> @@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  				munmap(virtaddr, hugepage_sz);
>  				close(fd);
>  				unlink(hugepg_tbl[i].filepath);
> -				return i;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +				if (maxnode)
> +					essential_memory[node_id] =
> +						essential_prev;
> +#endif
> +				goto out;
>  			}
>  			*(int *)virtaddr = 0;
>  		}
> @@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
>  				__func__, strerror(errno));
>  			close(fd);
> -			return i;
> +			goto out;
>  		}
>
>  		close(fd);
> @@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		vma_len -= hugepage_sz;
>  	}
>
> +out:
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +	if (maxnode) {
> +		RTE_LOG(DEBUG, EAL,
> +			"Restoring previous memory policy: %d\n", oldpolicy);
> +		if (oldpolicy == MPOL_DEFAULT) {
> +			numa_set_localalloc();
> +		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
> +					 oldmask->size + 1) < 0) {
> +			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
> +				strerror(errno));
> +			numa_set_localalloc();
> +		}
> +	}
> +	numa_free_cpumask(oldmask);
> +#endif
>  	return i;
>  }
>
> @@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
>  			if (hugepg_tbl[i].orig_va == va) {
>  				hugepg_tbl[i].socket_id = socket_id;
>  				hp_count++;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +				RTE_LOG(DEBUG, EAL,
> +					"Hugepage %s is on socket %d\n",
> +					hugepg_tbl[i].filepath, socket_id);
> +#endif
>  			}
>  		}
>  	}
> @@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
>
>  	huge_register_sigbus();
>
> +	/* make a copy of socket_mem, needed for balanced allocation. */
> +	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> +		memory[i] = internal_config.socket_mem[i];
> +
> +
>  	/* map all hugepages and sort them */
>  	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
>  		unsigned pages_old, pages_new;
> @@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
>
>  		/* map all hugepages available */
>  		pages_old = hpi->num_pages[0];
> -		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
> +		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
> +					      memory, 1);
>  		if (pages_new < pages_old) {
>  			RTE_LOG(DEBUG, EAL,
>  				"%d not %d hugepages of size %u MB allocated\n",
> @@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
>  		      sizeof(struct hugepage_file), cmp_physaddr);
>
>  		/* remap all hugepages */
> -		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
> +		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
>  		    hpi->num_pages[0]) {
>  			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
>  					(unsigned)(hpi->hugepage_sz / 0x100000));
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index bcaf1b3..4fe22d1 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
>  # The static libraries do not know their dependencies.
>  # So linking with static library requires explicit dependencies.
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
> +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
> +endif
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v9 1/2] mem: balanced allocation of hugepages
  2017-06-29  5:32                                       ` Hemant Agrawal
@ 2017-06-29  5:48                                         ` Ilya Maximets
  2017-06-29  6:08                                           ` Ilya Maximets
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-29  5:48 UTC (permalink / raw)
  To: Hemant Agrawal, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 29.06.2017 08:32, Hemant Agrawal wrote:
> On 6/27/2017 3:54 PM, Ilya Maximets wrote:
>> Currently EAL allocates hugepages one by one not paying attention
>> from which NUMA node allocation was done.
>>
>> Such behaviour leads to allocation failure if number of available
>> hugepages for application limited by cgroups or hugetlbfs and
>> memory requested not only from the first socket.
>>
>> Example:
>>     # 90 x 1GB hugepages availavle in a system
>>
>>     cgcreate -g hugetlb:/test
>>     # Limit to 32GB of hugepages
>>     cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>     # Request 4GB from each of 2 sockets
>>     cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>
>>     EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>     EAL: 32 not 90 hugepages of size 1024 MB allocated
>>     EAL: Not enough memory available on socket 1!
>>          Requested: 4096MB, available: 0MB
>>     PANIC in rte_eal_init():
>>     Cannot init memory
>>
>>     This happens beacause all allocated pages are
>>     on socket 0.
>>
>> Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
>> to one of requested nodes using following schema:
>>
>>     1) Allocate essential hugepages:
>>         1.1) Allocate as many hugepages from numa N to
>>              only fit requested memory for this numa.
>>         1.2) repeat 1.1 for all numa nodes.
>>     2) Try to map all remaining free hugepages in a round-robin
>>        fashion.
>>     3) Sort pages and choose the most suitable.
>>
>> In this case all essential memory will be allocated and all remaining
>> pages will be fairly distributed between all requested nodes.
>>
>> New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
>> enabled by default for linuxapp except armv7 and dpaa2.
>> Enabling of this option adds libnuma as a dependency for EAL.
>>
>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>
>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>> ---
>>  config/common_base                        |   1 +
>>  config/common_linuxapp                    |   1 +
>>  config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
>>  config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
>>  lib/librte_eal/linuxapp/eal/Makefile      |   3 +
>>  lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
>>  mk/rte.app.mk                             |   3 +
>>  7 files changed, 126 insertions(+), 8 deletions(-)
>>
>> diff --git a/config/common_base b/config/common_base
>> index f6aafd1..660588a 100644
>> --- a/config/common_base
>> +++ b/config/common_base
>> @@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
>>  CONFIG_RTE_EAL_IGB_UIO=n
>>  CONFIG_RTE_EAL_VFIO=n
>>  CONFIG_RTE_MALLOC_DEBUG=n
>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>>
>>  #
>>  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
>> diff --git a/config/common_linuxapp b/config/common_linuxapp
>> index b3cf41b..64bef87 100644
>> --- a/config/common_linuxapp
>> +++ b/config/common_linuxapp
>> @@ -35,6 +35,7 @@
>>  CONFIG_RTE_EXEC_ENV="linuxapp"
>>  CONFIG_RTE_EXEC_ENV_LINUXAPP=y
>>
>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>  CONFIG_RTE_EAL_IGB_UIO=y
>>  CONFIG_RTE_EAL_VFIO=y
>>  CONFIG_RTE_KNI_KMOD=y
>> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
>> index 19607eb..e06b1d4 100644
>> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
>> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
>> @@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
>>  CONFIG_RTE_TOOLCHAIN="gcc"
>>  CONFIG_RTE_TOOLCHAIN_GCC=y
>>
>> +# NUMA is not supported on ARM
>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>> +
>>  # ARM doesn't have support for vmware TSC map
>>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
>>
>> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>> index 2304ab6..f78449d 100644
>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>> @@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
>>
>>  CONFIG_RTE_PKTMBUF_HEADROOM=256
>>
>> +# Doesn't support NUMA
>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>> +
> 
> DPAA2 does not support NUMA so,
> CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n

Oh, sorry. Just typo.
Thanks for catching this.


Sergio, I'll send v10 with only this change and will keep your
acked-by because the change is trivial.

>>  #
>>  # Compile Support Libraries for DPAA2
>>  #
>> diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
>> index 640afd0..8651e27 100644
>> --- a/lib/librte_eal/linuxapp/eal/Makefile
>> +++ b/lib/librte_eal/linuxapp/eal/Makefile
>> @@ -50,6 +50,9 @@ LDLIBS += -ldl
>>  LDLIBS += -lpthread
>>  LDLIBS += -lgcc_s
>>  LDLIBS += -lrt
>> +ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
>> +LDLIBS += -lnuma
>> +endif
>>
>>  # specific to linuxapp exec-env
>>  SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
>> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
>> index e17c9cb..647d89c 100644
>> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
>> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
>> @@ -54,6 +54,10 @@
>>  #include <sys/time.h>
>>  #include <signal.h>
>>  #include <setjmp.h>
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +#include <numa.h>
>> +#include <numaif.h>
>> +#endif
>>
>>  #include <rte_log.h>
>>  #include <rte_memory.h>
>> @@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
>>      return sigsetjmp(huge_jmpenv, 1);
>>  }
>>
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +/* Callback for numa library. */
>> +void numa_error(char *where)
>> +{
>> +    RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
>> +}
>> +#endif
>> +
>>  /*
>>   * Mmap all hugepages of hugepage table: it first open a file in
>>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
>> @@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
>>   * map continguous physical blocks in contiguous virtual blocks.
>>   */
>>  static unsigned
>> -map_all_hugepages(struct hugepage_file *hugepg_tbl,
>> -        struct hugepage_info *hpi, int orig)
>> +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>> +          uint64_t *essential_memory __rte_unused, int orig)
>>  {
>>      int fd;
>>      unsigned i;
>>      void *virtaddr;
>>      void *vma_addr = NULL;
>>      size_t vma_len = 0;
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +    int node_id = -1;
>> +    int essential_prev = 0;
>> +    int oldpolicy;
>> +    struct bitmask *oldmask = numa_allocate_nodemask();
>> +    bool have_numa = true;
>> +    unsigned long maxnode = 0;
>> +
>> +    /* Check if kernel supports NUMA. */
>> +    if (numa_available() != 0) {
>> +        RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
>> +        have_numa = false;
>> +    }
>> +
>> +    if (orig && have_numa) {
>> +        RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
>> +        if (get_mempolicy(&oldpolicy, oldmask->maskp,
>> +                  oldmask->size + 1, 0, 0) < 0) {
>> +            RTE_LOG(ERR, EAL,
>> +                "Failed to get current mempolicy: %s. "
>> +                "Assuming MPOL_DEFAULT.\n", strerror(errno));
>> +            oldpolicy = MPOL_DEFAULT;
>> +        }
>> +        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
>> +            if (internal_config.socket_mem[i])
>> +                maxnode = i + 1;
>> +    }
>> +#endif
>>
>>      for (i = 0; i < hpi->num_pages[0]; i++) {
>>          uint64_t hugepage_sz = hpi->hugepage_sz;
>>
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +        if (maxnode) {
>> +            unsigned int j;
>> +
>> +            for (j = 0; j < maxnode; j++)
>> +                if (essential_memory[j])
>> +                    break;
>> +
>> +            if (j == maxnode) {
>> +                node_id = (node_id + 1) % maxnode;
>> +                while (!internal_config.socket_mem[node_id]) {
>> +                    node_id++;
>> +                    node_id %= maxnode;
>> +                }
>> +                essential_prev = 0;
>> +            } else {
>> +                node_id = j;
>> +                essential_prev = essential_memory[j];
>> +
>> +                if (essential_memory[j] < hugepage_sz)
>> +                    essential_memory[j] = 0;
>> +                else
>> +                    essential_memory[j] -= hugepage_sz;
>> +            }
>> +
>> +            RTE_LOG(DEBUG, EAL,
>> +                "Setting policy MPOL_PREFERRED for socket %d\n",
>> +                node_id);
>> +            numa_set_preferred(node_id);
>> +        }
>> +#endif
>> +
>>          if (orig) {
>>              hugepg_tbl[i].file_id = i;
>>              hugepg_tbl[i].size = hugepage_sz;
>> @@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>          if (fd < 0) {
>>              RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
>>                      strerror(errno));
>> -            return i;
>> +            goto out;
>>          }
>>
>>          /* map the segment, and populate page tables,
>> @@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>              RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
>>                      strerror(errno));
>>              close(fd);
>> -            return i;
>> +            goto out;
>>          }
>>
>>          if (orig) {
>> @@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>                  munmap(virtaddr, hugepage_sz);
>>                  close(fd);
>>                  unlink(hugepg_tbl[i].filepath);
>> -                return i;
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +                if (maxnode)
>> +                    essential_memory[node_id] =
>> +                        essential_prev;
>> +#endif
>> +                goto out;
>>              }
>>              *(int *)virtaddr = 0;
>>          }
>> @@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>              RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
>>                  __func__, strerror(errno));
>>              close(fd);
>> -            return i;
>> +            goto out;
>>          }
>>
>>          close(fd);
>> @@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>          vma_len -= hugepage_sz;
>>      }
>>
>> +out:
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +    if (maxnode) {
>> +        RTE_LOG(DEBUG, EAL,
>> +            "Restoring previous memory policy: %d\n", oldpolicy);
>> +        if (oldpolicy == MPOL_DEFAULT) {
>> +            numa_set_localalloc();
>> +        } else if (set_mempolicy(oldpolicy, oldmask->maskp,
>> +                     oldmask->size + 1) < 0) {
>> +            RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
>> +                strerror(errno));
>> +            numa_set_localalloc();
>> +        }
>> +    }
>> +    numa_free_cpumask(oldmask);
>> +#endif
>>      return i;
>>  }
>>
>> @@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
>>              if (hugepg_tbl[i].orig_va == va) {
>>                  hugepg_tbl[i].socket_id = socket_id;
>>                  hp_count++;
>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>> +                RTE_LOG(DEBUG, EAL,
>> +                    "Hugepage %s is on socket %d\n",
>> +                    hugepg_tbl[i].filepath, socket_id);
>> +#endif
>>              }
>>          }
>>      }
>> @@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
>>
>>      huge_register_sigbus();
>>
>> +    /* make a copy of socket_mem, needed for balanced allocation. */
>> +    for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
>> +        memory[i] = internal_config.socket_mem[i];
>> +
>> +
>>      /* map all hugepages and sort them */
>>      for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
>>          unsigned pages_old, pages_new;
>> @@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
>>
>>          /* map all hugepages available */
>>          pages_old = hpi->num_pages[0];
>> -        pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
>> +        pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
>> +                          memory, 1);
>>          if (pages_new < pages_old) {
>>              RTE_LOG(DEBUG, EAL,
>>                  "%d not %d hugepages of size %u MB allocated\n",
>> @@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
>>                sizeof(struct hugepage_file), cmp_physaddr);
>>
>>          /* remap all hugepages */
>> -        if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
>> +        if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
>>              hpi->num_pages[0]) {
>>              RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
>>                      (unsigned)(hpi->hugepage_sz / 0x100000));
>> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
>> index bcaf1b3..4fe22d1 100644
>> --- a/mk/rte.app.mk
>> +++ b/mk/rte.app.mk
>> @@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
>>  # The static libraries do not know their dependencies.
>>  # So linking with static library requires explicit dependencies.
>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
>> +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
>> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
>> +endif
>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
>>
> 
> 
> 
> 
> 

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v10 0/2] Balanced allocation of hugepages
       [not found]                                   ` <CGME20170629055928eucas1p17e823d821cfe95953bfa59dc9883ca4f@eucas1p1.samsung.com>
@ 2017-06-29  5:59                                     ` Ilya Maximets
       [not found]                                       ` <CGME20170629055933eucas1p1e5eba5f07850f63f9afbd48e6ca64c42@eucas1p1.samsung.com>
                                                         ` (3 more replies)
  0 siblings, 4 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-29  5:59 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Hemant Agrawal, Ilya Maximets

Version 10:
	* Fixed typo in DPAA2 config.

Version 9:
	* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
	  anymore.
	* Fixed out of bound write to essential_memory in case
	  where socket-mem not specified and SIGBUS occured.

Version 8:
	* helper functions from libnuma used to set mempolicy and
	  work with cpu mask.
	* Function now restores previous mempolicy instead of MPOL_DEFAULT.
	* Fixed essential_memory on SIGBUS.
	* Fixed restoring of mempolicy in case of errors (goto out).
	* Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
	  returned. Enabled by default for x86, ppc and thunderx.

Version 5:
	* Fixed shared build. (Automated build test will fail
	  anyway because libnuma-devel not installed on build servers)

Version 4:
	* Fixed work on systems without NUMA by adding check for NUMA
	  support in kernel.

Version 3:
	* Implemented hybrid schema for allocation.
	* Fixed not needed mempolicy change while remapping. (orig = 0)
	* Added patch to enable VHOST_NUMA by default.

Version 2:
	* rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost numa awareness by default

 config/common_base                        |   1 +
 config/common_linuxapp                    |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   4 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   4 +
 devtools/test-build.sh                    |   4 -
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 8 files changed, 129 insertions(+), 12 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v10 1/2] mem: balanced allocation of hugepages
       [not found]                                       ` <CGME20170629055933eucas1p1e5eba5f07850f63f9afbd48e6ca64c42@eucas1p1.samsung.com>
@ 2017-06-29  5:59                                         ` Ilya Maximets
  2017-06-29  7:03                                           ` Hemant Agrawal
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-29  5:59 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Hemant Agrawal, Ilya Maximets

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
---
 config/common_base                        |   1 +
 config/common_linuxapp                    |   1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 7 files changed, 126 insertions(+), 8 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..64bef87 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab6..f78449d 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
 
 CONFIG_RTE_PKTMBUF_HEADROOM=256
 
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 #
 # Compile Support Libraries for DPAA2
 #
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..647d89c 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	int node_id = -1;
+	int essential_prev = 0;
+	int oldpolicy;
+	struct bitmask *oldmask = numa_allocate_nodemask();
+	bool have_numa = true;
+	unsigned long maxnode = 0;
+
+	/* Check if kernel supports NUMA. */
+	if (numa_available() != 0) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		have_numa = false;
+	}
+
+	if (orig && have_numa) {
+		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+		if (get_mempolicy(&oldpolicy, oldmask->maskp,
+				  oldmask->size + 1, 0, 0) < 0) {
+			RTE_LOG(ERR, EAL,
+				"Failed to get current mempolicy: %s. "
+				"Assuming MPOL_DEFAULT.\n", strerror(errno));
+			oldpolicy = MPOL_DEFAULT;
+		}
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < maxnode; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == maxnode) {
+				node_id = (node_id + 1) % maxnode;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= maxnode;
+				}
+				essential_prev = 0;
+			} else {
+				node_id = j;
+				essential_prev = essential_memory[j];
+
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			numa_set_preferred(node_id);
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		if (fd < 0) {
 			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
 					strerror(errno));
-			return i;
+			goto out;
 		}
 
 		/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
 					strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 				munmap(virtaddr, hugepage_sz);
 				close(fd);
 				unlink(hugepg_tbl[i].filepath);
-				return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				if (maxnode)
+					essential_memory[node_id] =
+						essential_prev;
+#endif
+				goto out;
 			}
 			*(int *)virtaddr = 0;
 		}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
 				__func__, strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode) {
+		RTE_LOG(DEBUG, EAL,
+			"Restoring previous memory policy: %d\n", oldpolicy);
+		if (oldpolicy == MPOL_DEFAULT) {
+			numa_set_localalloc();
+		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
+					 oldmask->size + 1) < 0) {
+			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+				strerror(errno));
+			numa_set_localalloc();
+		}
+	}
+	numa_free_cpumask(oldmask);
+#endif
 	return i;
 }
 
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v10 2/2] config: enable vhost numa awareness by default
       [not found]                                       ` <CGME20170629055940eucas1p1c9adcb26bec3ce5de97fe56753fd941a@eucas1p1.samsung.com>
@ 2017-06-29  5:59                                         ` Ilya Maximets
  2017-06-30 15:50                                           ` Thomas Monjalon
  0 siblings, 1 reply; 99+ messages in thread
From: Ilya Maximets @ 2017-06-29  5:59 UTC (permalink / raw)
  To: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Hemant Agrawal, Ilya Maximets

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

DPDK_DEP_NUMA not needed anymore.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 config/common_linuxapp                    | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
 devtools/test-build.sh                    | 4 ----
 4 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87..74c7d64 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
 CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d4..00bc2ab 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index f78449d..b061fb0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
 
 # Doesn't support NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 #
 # Compile Support Libraries for DPAA2
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7..0dbc04a 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,6 @@ default_path=$PATH
 # - DPDK_DEP_ISAL_CRYPTO (y/[n])
 # - DPDK_DEP_LDFLAGS
 # - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
 # - DPDK_DEP_PCAP (y/[n])
 # - DPDK_DEP_SSL (y/[n])
 # - DPDK_DEP_SZE (y/[n])
@@ -124,7 +123,6 @@ reset_env ()
 	unset DPDK_DEP_ISAL_CRYPTO
 	unset DPDK_DEP_LDFLAGS
 	unset DPDK_DEP_MOFED
-	unset DPDK_DEP_NUMA
 	unset DPDK_DEP_PCAP
 	unset DPDK_DEP_SSL
 	unset DPDK_DEP_SZE
@@ -163,8 +161,6 @@ config () # <directory> <target> <options>
 		sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
 
 		# Automatic configuration
-		test "$DPDK_DEP_NUMA" != y || \
-		sed -ri               's,(NUMA=)n,\1y,' $1/.config
 		sed -ri    's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
 		sed -ri             's,(BYPASS=)n,\1y,' $1/.config
 		test "$DPDK_DEP_ARCHIVE" != y || \
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v9 1/2] mem: balanced allocation of hugepages
  2017-06-29  5:48                                         ` Ilya Maximets
@ 2017-06-29  6:08                                           ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-29  6:08 UTC (permalink / raw)
  To: Hemant Agrawal, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 29.06.2017 08:48, Ilya Maximets wrote:
> On 29.06.2017 08:32, Hemant Agrawal wrote:
>> On 6/27/2017 3:54 PM, Ilya Maximets wrote:
>>> Currently EAL allocates hugepages one by one not paying attention
>>> from which NUMA node allocation was done.
>>>
>>> Such behaviour leads to allocation failure if number of available
>>> hugepages for application limited by cgroups or hugetlbfs and
>>> memory requested not only from the first socket.
>>>
>>> Example:
>>>     # 90 x 1GB hugepages availavle in a system
>>>
>>>     cgcreate -g hugetlb:/test
>>>     # Limit to 32GB of hugepages
>>>     cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
>>>     # Request 4GB from each of 2 sockets
>>>     cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>>>
>>>     EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
>>>     EAL: 32 not 90 hugepages of size 1024 MB allocated
>>>     EAL: Not enough memory available on socket 1!
>>>          Requested: 4096MB, available: 0MB
>>>     PANIC in rte_eal_init():
>>>     Cannot init memory
>>>
>>>     This happens beacause all allocated pages are
>>>     on socket 0.
>>>
>>> Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
>>> to one of requested nodes using following schema:
>>>
>>>     1) Allocate essential hugepages:
>>>         1.1) Allocate as many hugepages from numa N to
>>>              only fit requested memory for this numa.
>>>         1.2) repeat 1.1 for all numa nodes.
>>>     2) Try to map all remaining free hugepages in a round-robin
>>>        fashion.
>>>     3) Sort pages and choose the most suitable.
>>>
>>> In this case all essential memory will be allocated and all remaining
>>> pages will be fairly distributed between all requested nodes.
>>>
>>> New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
>>> enabled by default for linuxapp except armv7 and dpaa2.
>>> Enabling of this option adds libnuma as a dependency for EAL.
>>>
>>> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>>>
>>> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
>>> ---
>>>  config/common_base                        |   1 +
>>>  config/common_linuxapp                    |   1 +
>>>  config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
>>>  config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
>>>  lib/librte_eal/linuxapp/eal/Makefile      |   3 +
>>>  lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
>>>  mk/rte.app.mk                             |   3 +
>>>  7 files changed, 126 insertions(+), 8 deletions(-)
>>>
>>> diff --git a/config/common_base b/config/common_base
>>> index f6aafd1..660588a 100644
>>> --- a/config/common_base
>>> +++ b/config/common_base
>>> @@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
>>>  CONFIG_RTE_EAL_IGB_UIO=n
>>>  CONFIG_RTE_EAL_VFIO=n
>>>  CONFIG_RTE_MALLOC_DEBUG=n
>>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>>>
>>>  #
>>>  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
>>> diff --git a/config/common_linuxapp b/config/common_linuxapp
>>> index b3cf41b..64bef87 100644
>>> --- a/config/common_linuxapp
>>> +++ b/config/common_linuxapp
>>> @@ -35,6 +35,7 @@
>>>  CONFIG_RTE_EXEC_ENV="linuxapp"
>>>  CONFIG_RTE_EXEC_ENV_LINUXAPP=y
>>>
>>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>>  CONFIG_RTE_EAL_IGB_UIO=y
>>>  CONFIG_RTE_EAL_VFIO=y
>>>  CONFIG_RTE_KNI_KMOD=y
>>> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
>>> index 19607eb..e06b1d4 100644
>>> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
>>> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
>>> @@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
>>>  CONFIG_RTE_TOOLCHAIN="gcc"
>>>  CONFIG_RTE_TOOLCHAIN_GCC=y
>>>
>>> +# NUMA is not supported on ARM
>>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>>> +
>>>  # ARM doesn't have support for vmware TSC map
>>>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
>>>
>>> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>> index 2304ab6..f78449d 100644
>>> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
>>> @@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
>>>
>>>  CONFIG_RTE_PKTMBUF_HEADROOM=256
>>>
>>> +# Doesn't support NUMA
>>> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>>> +
>>
>> DPAA2 does not support NUMA so,
>> CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> 
> Oh, sorry. Just typo.
> Thanks for catching this.

Fixed. Hemant, please, check the new version (v10).

> Sergio, I'll send v10 with only this change and will keep your
> acked-by because the change is trivial.
> 
>>>  #
>>>  # Compile Support Libraries for DPAA2
>>>  #
>>> diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
>>> index 640afd0..8651e27 100644
>>> --- a/lib/librte_eal/linuxapp/eal/Makefile
>>> +++ b/lib/librte_eal/linuxapp/eal/Makefile
>>> @@ -50,6 +50,9 @@ LDLIBS += -ldl
>>>  LDLIBS += -lpthread
>>>  LDLIBS += -lgcc_s
>>>  LDLIBS += -lrt
>>> +ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
>>> +LDLIBS += -lnuma
>>> +endif
>>>
>>>  # specific to linuxapp exec-env
>>>  SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
>>> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
>>> index e17c9cb..647d89c 100644
>>> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
>>> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
>>> @@ -54,6 +54,10 @@
>>>  #include <sys/time.h>
>>>  #include <signal.h>
>>>  #include <setjmp.h>
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +#include <numa.h>
>>> +#include <numaif.h>
>>> +#endif
>>>
>>>  #include <rte_log.h>
>>>  #include <rte_memory.h>
>>> @@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
>>>      return sigsetjmp(huge_jmpenv, 1);
>>>  }
>>>
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +/* Callback for numa library. */
>>> +void numa_error(char *where)
>>> +{
>>> +    RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
>>> +}
>>> +#endif
>>> +
>>>  /*
>>>   * Mmap all hugepages of hugepage table: it first open a file in
>>>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
>>> @@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
>>>   * map continguous physical blocks in contiguous virtual blocks.
>>>   */
>>>  static unsigned
>>> -map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>> -        struct hugepage_info *hpi, int orig)
>>> +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
>>> +          uint64_t *essential_memory __rte_unused, int orig)
>>>  {
>>>      int fd;
>>>      unsigned i;
>>>      void *virtaddr;
>>>      void *vma_addr = NULL;
>>>      size_t vma_len = 0;
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +    int node_id = -1;
>>> +    int essential_prev = 0;
>>> +    int oldpolicy;
>>> +    struct bitmask *oldmask = numa_allocate_nodemask();
>>> +    bool have_numa = true;
>>> +    unsigned long maxnode = 0;
>>> +
>>> +    /* Check if kernel supports NUMA. */
>>> +    if (numa_available() != 0) {
>>> +        RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
>>> +        have_numa = false;
>>> +    }
>>> +
>>> +    if (orig && have_numa) {
>>> +        RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
>>> +        if (get_mempolicy(&oldpolicy, oldmask->maskp,
>>> +                  oldmask->size + 1, 0, 0) < 0) {
>>> +            RTE_LOG(ERR, EAL,
>>> +                "Failed to get current mempolicy: %s. "
>>> +                "Assuming MPOL_DEFAULT.\n", strerror(errno));
>>> +            oldpolicy = MPOL_DEFAULT;
>>> +        }
>>> +        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
>>> +            if (internal_config.socket_mem[i])
>>> +                maxnode = i + 1;
>>> +    }
>>> +#endif
>>>
>>>      for (i = 0; i < hpi->num_pages[0]; i++) {
>>>          uint64_t hugepage_sz = hpi->hugepage_sz;
>>>
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +        if (maxnode) {
>>> +            unsigned int j;
>>> +
>>> +            for (j = 0; j < maxnode; j++)
>>> +                if (essential_memory[j])
>>> +                    break;
>>> +
>>> +            if (j == maxnode) {
>>> +                node_id = (node_id + 1) % maxnode;
>>> +                while (!internal_config.socket_mem[node_id]) {
>>> +                    node_id++;
>>> +                    node_id %= maxnode;
>>> +                }
>>> +                essential_prev = 0;
>>> +            } else {
>>> +                node_id = j;
>>> +                essential_prev = essential_memory[j];
>>> +
>>> +                if (essential_memory[j] < hugepage_sz)
>>> +                    essential_memory[j] = 0;
>>> +                else
>>> +                    essential_memory[j] -= hugepage_sz;
>>> +            }
>>> +
>>> +            RTE_LOG(DEBUG, EAL,
>>> +                "Setting policy MPOL_PREFERRED for socket %d\n",
>>> +                node_id);
>>> +            numa_set_preferred(node_id);
>>> +        }
>>> +#endif
>>> +
>>>          if (orig) {
>>>              hugepg_tbl[i].file_id = i;
>>>              hugepg_tbl[i].size = hugepage_sz;
>>> @@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>>          if (fd < 0) {
>>>              RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
>>>                      strerror(errno));
>>> -            return i;
>>> +            goto out;
>>>          }
>>>
>>>          /* map the segment, and populate page tables,
>>> @@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>>              RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
>>>                      strerror(errno));
>>>              close(fd);
>>> -            return i;
>>> +            goto out;
>>>          }
>>>
>>>          if (orig) {
>>> @@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>>                  munmap(virtaddr, hugepage_sz);
>>>                  close(fd);
>>>                  unlink(hugepg_tbl[i].filepath);
>>> -                return i;
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +                if (maxnode)
>>> +                    essential_memory[node_id] =
>>> +                        essential_prev;
>>> +#endif
>>> +                goto out;
>>>              }
>>>              *(int *)virtaddr = 0;
>>>          }
>>> @@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>>              RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
>>>                  __func__, strerror(errno));
>>>              close(fd);
>>> -            return i;
>>> +            goto out;
>>>          }
>>>
>>>          close(fd);
>>> @@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>>>          vma_len -= hugepage_sz;
>>>      }
>>>
>>> +out:
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +    if (maxnode) {
>>> +        RTE_LOG(DEBUG, EAL,
>>> +            "Restoring previous memory policy: %d\n", oldpolicy);
>>> +        if (oldpolicy == MPOL_DEFAULT) {
>>> +            numa_set_localalloc();
>>> +        } else if (set_mempolicy(oldpolicy, oldmask->maskp,
>>> +                     oldmask->size + 1) < 0) {
>>> +            RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
>>> +                strerror(errno));
>>> +            numa_set_localalloc();
>>> +        }
>>> +    }
>>> +    numa_free_cpumask(oldmask);
>>> +#endif
>>>      return i;
>>>  }
>>>
>>> @@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
>>>              if (hugepg_tbl[i].orig_va == va) {
>>>                  hugepg_tbl[i].socket_id = socket_id;
>>>                  hp_count++;
>>> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
>>> +                RTE_LOG(DEBUG, EAL,
>>> +                    "Hugepage %s is on socket %d\n",
>>> +                    hugepg_tbl[i].filepath, socket_id);
>>> +#endif
>>>              }
>>>          }
>>>      }
>>> @@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
>>>
>>>      huge_register_sigbus();
>>>
>>> +    /* make a copy of socket_mem, needed for balanced allocation. */
>>> +    for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
>>> +        memory[i] = internal_config.socket_mem[i];
>>> +
>>> +
>>>      /* map all hugepages and sort them */
>>>      for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
>>>          unsigned pages_old, pages_new;
>>> @@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
>>>
>>>          /* map all hugepages available */
>>>          pages_old = hpi->num_pages[0];
>>> -        pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
>>> +        pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
>>> +                          memory, 1);
>>>          if (pages_new < pages_old) {
>>>              RTE_LOG(DEBUG, EAL,
>>>                  "%d not %d hugepages of size %u MB allocated\n",
>>> @@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
>>>                sizeof(struct hugepage_file), cmp_physaddr);
>>>
>>>          /* remap all hugepages */
>>> -        if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
>>> +        if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
>>>              hpi->num_pages[0]) {
>>>              RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
>>>                      (unsigned)(hpi->hugepage_sz / 0x100000));
>>> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
>>> index bcaf1b3..4fe22d1 100644
>>> --- a/mk/rte.app.mk
>>> +++ b/mk/rte.app.mk
>>> @@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
>>>  # The static libraries do not know their dependencies.
>>>  # So linking with static library requires explicit dependencies.
>>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
>>> +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
>>> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
>>> +endif
>>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
>>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
>>>  _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
>>>
>>
>>
>>
>>
>>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v10 0/2] Balanced allocation of hugepages
  2017-06-29  5:59                                     ` [PATCH v10 0/2] Balanced allocation of hugepages Ilya Maximets
       [not found]                                       ` <CGME20170629055933eucas1p1e5eba5f07850f63f9afbd48e6ca64c42@eucas1p1.samsung.com>
       [not found]                                       ` <CGME20170629055940eucas1p1c9adcb26bec3ce5de97fe56753fd941a@eucas1p1.samsung.com>
@ 2017-06-29  6:29                                       ` Jerin Jacob
  2017-06-30  8:36                                         ` Ilya Maximets
  2017-06-30 16:12                                       ` [PATCH v11 " Thomas Monjalon
  3 siblings, 1 reply; 99+ messages in thread
From: Jerin Jacob @ 2017-06-29  6:29 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Thomas Monjalon,
	Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Hemant Agrawal

-----Original Message-----
> Date: Thu, 29 Jun 2017 08:59:18 +0300
> From: Ilya Maximets <i.maximets@samsung.com>
> To: dev@dpdk.org, David Marchand <david.marchand@6wind.com>, Sergio
>  Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Thomas Monjalon
>  <thomas@monjalon.net>
> CC: Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu
>  <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>, Bruce
>  Richardson <bruce.richardson@intel.com>, Jerin Jacob
>  <jerin.jacob@caviumnetworks.com>, Hemant Agrawal <hemant.agrawal@nxp.com>,
>  Ilya Maximets <i.maximets@samsung.com>
> Subject: [PATCH v10 0/2] Balanced allocation of hugepages
> X-Mailer: git-send-email 2.7.4
> 
> Version 10:
> 	* Fixed typo in DPAA2 config.
> 
> Version 9:
> 	* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
> 	  anymore.
> 	* Fixed out of bound write to essential_memory in case
> 	  where socket-mem not specified and SIGBUS occured.
> 
> Version 8:
> 	* helper functions from libnuma used to set mempolicy and
> 	  work with cpu mask.
> 	* Function now restores previous mempolicy instead of MPOL_DEFAULT.
> 	* Fixed essential_memory on SIGBUS.
> 	* Fixed restoring of mempolicy in case of errors (goto out).
> 	* Enabled by default for all linuxapp except armv7 and dpaa2.
> 
> Version 7:
> 	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
> 
> Version 6:
> 	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
> 	  returned. Enabled by default for x86, ppc and thunderx.
> 
> Version 5:
> 	* Fixed shared build. (Automated build test will fail
> 	  anyway because libnuma-devel not installed on build servers)
> 
> Version 4:
> 	* Fixed work on systems without NUMA by adding check for NUMA
> 	  support in kernel.
> 
> Version 3:
> 	* Implemented hybrid schema for allocation.
> 	* Fixed not needed mempolicy change while remapping. (orig = 0)
> 	* Added patch to enable VHOST_NUMA by default.
> 
> Version 2:
> 	* rebased (fuzz in Makefile)
> 
> Ilya Maximets (2):
>   mem: balanced allocation of hugepages
>   config: enable vhost numa awareness by default

Series-Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Tested on a arm64 NUMA machine.
Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v10 1/2] mem: balanced allocation of hugepages
  2017-06-29  5:59                                         ` [PATCH v10 1/2] mem: balanced " Ilya Maximets
@ 2017-06-29  7:03                                           ` Hemant Agrawal
  0 siblings, 0 replies; 99+ messages in thread
From: Hemant Agrawal @ 2017-06-29  7:03 UTC (permalink / raw)
  To: Ilya Maximets, dev, David Marchand, Sergio Gonzalez Monroy,
	Thomas Monjalon
  Cc: Heetae Ahn, Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob

On 6/29/2017 11:29 AM, Ilya Maximets wrote:
> Currently EAL allocates hugepages one by one not paying attention
> from which NUMA node allocation was done.
>
> Such behaviour leads to allocation failure if number of available
> hugepages for application limited by cgroups or hugetlbfs and
> memory requested not only from the first socket.
>
> Example:
> 	# 90 x 1GB hugepages availavle in a system
>
> 	cgcreate -g hugetlb:/test
> 	# Limit to 32GB of hugepages
> 	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
> 	# Request 4GB from each of 2 sockets
> 	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
>
> 	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
> 	EAL: 32 not 90 hugepages of size 1024 MB allocated
> 	EAL: Not enough memory available on socket 1!
> 	     Requested: 4096MB, available: 0MB
> 	PANIC in rte_eal_init():
> 	Cannot init memory
>
> 	This happens beacause all allocated pages are
> 	on socket 0.
>
> Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
> to one of requested nodes using following schema:
>
> 	1) Allocate essential hugepages:
> 		1.1) Allocate as many hugepages from numa N to
> 		     only fit requested memory for this numa.
> 		1.2) repeat 1.1 for all numa nodes.
> 	2) Try to map all remaining free hugepages in a round-robin
> 	   fashion.
> 	3) Sort pages and choose the most suitable.
>
> In this case all essential memory will be allocated and all remaining
> pages will be fairly distributed between all requested nodes.
>
> New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
> enabled by default for linuxapp except armv7 and dpaa2.
> Enabling of this option adds libnuma as a dependency for EAL.
>
> Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
>
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
> ---
>  config/common_base                        |   1 +
>  config/common_linuxapp                    |   1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
>  lib/librte_eal/linuxapp/eal/Makefile      |   3 +
>  lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
>  mk/rte.app.mk                             |   3 +
>  7 files changed, 126 insertions(+), 8 deletions(-)
>
> diff --git a/config/common_base b/config/common_base
> index f6aafd1..660588a 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
>  CONFIG_RTE_EAL_IGB_UIO=n
>  CONFIG_RTE_EAL_VFIO=n
>  CONFIG_RTE_MALLOC_DEBUG=n
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
>
>  #
>  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
> diff --git a/config/common_linuxapp b/config/common_linuxapp
> index b3cf41b..64bef87 100644
> --- a/config/common_linuxapp
> +++ b/config/common_linuxapp
> @@ -35,6 +35,7 @@
>  CONFIG_RTE_EXEC_ENV="linuxapp"
>  CONFIG_RTE_EXEC_ENV_LINUXAPP=y
>
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
>  CONFIG_RTE_EAL_IGB_UIO=y
>  CONFIG_RTE_EAL_VFIO=y
>  CONFIG_RTE_KNI_KMOD=y
> diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
> index 19607eb..e06b1d4 100644
> --- a/config/defconfig_arm-armv7a-linuxapp-gcc
> +++ b/config/defconfig_arm-armv7a-linuxapp-gcc
> @@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
>  CONFIG_RTE_TOOLCHAIN="gcc"
>  CONFIG_RTE_TOOLCHAIN_GCC=y
>
> +# NUMA is not supported on ARM
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +
>  # ARM doesn't have support for vmware TSC map
>  CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
>
> diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> index 2304ab6..f78449d 100644
> --- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
> +++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
> @@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
>
>  CONFIG_RTE_PKTMBUF_HEADROOM=256
>
> +# Doesn't support NUMA
> +CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> +
>  #
>  # Compile Support Libraries for DPAA2
>  #
> diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
> index 640afd0..8651e27 100644
> --- a/lib/librte_eal/linuxapp/eal/Makefile
> +++ b/lib/librte_eal/linuxapp/eal/Makefile
> @@ -50,6 +50,9 @@ LDLIBS += -ldl
>  LDLIBS += -lpthread
>  LDLIBS += -lgcc_s
>  LDLIBS += -lrt
> +ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
> +LDLIBS += -lnuma
> +endif
>
>  # specific to linuxapp exec-env
>  SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
> diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
> index e17c9cb..647d89c 100644
> --- a/lib/librte_eal/linuxapp/eal/eal_memory.c
> +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
> @@ -54,6 +54,10 @@
>  #include <sys/time.h>
>  #include <signal.h>
>  #include <setjmp.h>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +#include <numa.h>
> +#include <numaif.h>
> +#endif
>
>  #include <rte_log.h>
>  #include <rte_memory.h>
> @@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
>  	return sigsetjmp(huge_jmpenv, 1);
>  }
>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +/* Callback for numa library. */
> +void numa_error(char *where)
> +{
> +	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
> +}
> +#endif
> +
>  /*
>   * Mmap all hugepages of hugepage table: it first open a file in
>   * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
> @@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
>   * map continguous physical blocks in contiguous virtual blocks.
>   */
>  static unsigned
> -map_all_hugepages(struct hugepage_file *hugepg_tbl,
> -		struct hugepage_info *hpi, int orig)
> +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
> +		  uint64_t *essential_memory __rte_unused, int orig)
>  {
>  	int fd;
>  	unsigned i;
>  	void *virtaddr;
>  	void *vma_addr = NULL;
>  	size_t vma_len = 0;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +	int node_id = -1;
> +	int essential_prev = 0;
> +	int oldpolicy;
> +	struct bitmask *oldmask = numa_allocate_nodemask();
> +	bool have_numa = true;
> +	unsigned long maxnode = 0;
> +
> +	/* Check if kernel supports NUMA. */
> +	if (numa_available() != 0) {
> +		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
> +		have_numa = false;
> +	}
> +
> +	if (orig && have_numa) {
> +		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
> +		if (get_mempolicy(&oldpolicy, oldmask->maskp,
> +				  oldmask->size + 1, 0, 0) < 0) {
> +			RTE_LOG(ERR, EAL,
> +				"Failed to get current mempolicy: %s. "
> +				"Assuming MPOL_DEFAULT.\n", strerror(errno));
> +			oldpolicy = MPOL_DEFAULT;
> +		}
> +		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> +			if (internal_config.socket_mem[i])
> +				maxnode = i + 1;
> +	}
> +#endif
>
>  	for (i = 0; i < hpi->num_pages[0]; i++) {
>  		uint64_t hugepage_sz = hpi->hugepage_sz;
>
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +		if (maxnode) {
> +			unsigned int j;
> +
> +			for (j = 0; j < maxnode; j++)
> +				if (essential_memory[j])
> +					break;
> +
> +			if (j == maxnode) {
> +				node_id = (node_id + 1) % maxnode;
> +				while (!internal_config.socket_mem[node_id]) {
> +					node_id++;
> +					node_id %= maxnode;
> +				}
> +				essential_prev = 0;
> +			} else {
> +				node_id = j;
> +				essential_prev = essential_memory[j];
> +
> +				if (essential_memory[j] < hugepage_sz)
> +					essential_memory[j] = 0;
> +				else
> +					essential_memory[j] -= hugepage_sz;
> +			}
> +
> +			RTE_LOG(DEBUG, EAL,
> +				"Setting policy MPOL_PREFERRED for socket %d\n",
> +				node_id);
> +			numa_set_preferred(node_id);
> +		}
> +#endif
> +
>  		if (orig) {
>  			hugepg_tbl[i].file_id = i;
>  			hugepg_tbl[i].size = hugepage_sz;
> @@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		if (fd < 0) {
>  			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
>  					strerror(errno));
> -			return i;
> +			goto out;
>  		}
>
>  		/* map the segment, and populate page tables,
> @@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
>  					strerror(errno));
>  			close(fd);
> -			return i;
> +			goto out;
>  		}
>
>  		if (orig) {
> @@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  				munmap(virtaddr, hugepage_sz);
>  				close(fd);
>  				unlink(hugepg_tbl[i].filepath);
> -				return i;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +				if (maxnode)
> +					essential_memory[node_id] =
> +						essential_prev;
> +#endif
> +				goto out;
>  			}
>  			*(int *)virtaddr = 0;
>  		}
> @@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
>  				__func__, strerror(errno));
>  			close(fd);
> -			return i;
> +			goto out;
>  		}
>
>  		close(fd);
> @@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
>  		vma_len -= hugepage_sz;
>  	}
>
> +out:
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +	if (maxnode) {
> +		RTE_LOG(DEBUG, EAL,
> +			"Restoring previous memory policy: %d\n", oldpolicy);
> +		if (oldpolicy == MPOL_DEFAULT) {
> +			numa_set_localalloc();
> +		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
> +					 oldmask->size + 1) < 0) {
> +			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
> +				strerror(errno));
> +			numa_set_localalloc();
> +		}
> +	}
> +	numa_free_cpumask(oldmask);
> +#endif
>  	return i;
>  }
>
> @@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
>  			if (hugepg_tbl[i].orig_va == va) {
>  				hugepg_tbl[i].socket_id = socket_id;
>  				hp_count++;
> +#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
> +				RTE_LOG(DEBUG, EAL,
> +					"Hugepage %s is on socket %d\n",
> +					hugepg_tbl[i].filepath, socket_id);
> +#endif
>  			}
>  		}
>  	}
> @@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
>
>  	huge_register_sigbus();
>
> +	/* make a copy of socket_mem, needed for balanced allocation. */
> +	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
> +		memory[i] = internal_config.socket_mem[i];
> +
> +
>  	/* map all hugepages and sort them */
>  	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
>  		unsigned pages_old, pages_new;
> @@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
>
>  		/* map all hugepages available */
>  		pages_old = hpi->num_pages[0];
> -		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
> +		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
> +					      memory, 1);
>  		if (pages_new < pages_old) {
>  			RTE_LOG(DEBUG, EAL,
>  				"%d not %d hugepages of size %u MB allocated\n",
> @@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
>  		      sizeof(struct hugepage_file), cmp_physaddr);
>
>  		/* remap all hugepages */
> -		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
> +		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
>  		    hpi->num_pages[0]) {
>  			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
>  					(unsigned)(hpi->hugepage_sz / 0x100000));
> diff --git a/mk/rte.app.mk b/mk/rte.app.mk
> index bcaf1b3..4fe22d1 100644
> --- a/mk/rte.app.mk
> +++ b/mk/rte.app.mk
> @@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
>  # The static libraries do not know their dependencies.
>  # So linking with static library requires explicit dependencies.
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
> +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
> +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
> +endif
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
>  _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v10 0/2] Balanced allocation of hugepages
  2017-06-29  6:29                                       ` [PATCH v10 0/2] Balanced allocation of hugepages Jerin Jacob
@ 2017-06-30  8:36                                         ` Ilya Maximets
  0 siblings, 0 replies; 99+ messages in thread
From: Ilya Maximets @ 2017-06-30  8:36 UTC (permalink / raw)
  To: Sergio Gonzalez Monroy, Thomas Monjalon
  Cc: Jerin Jacob, dev, David Marchand, Heetae Ahn, Yuanhan Liu,
	Jianfeng Tan, Neil Horman, Yulong Pei, Bruce Richardson,
	Hemant Agrawal

So, are we ready for merging this now?
Thomas?

Best regards, Ilya Maximets.

On 29.06.2017 09:29, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Thu, 29 Jun 2017 08:59:18 +0300
>> From: Ilya Maximets <i.maximets@samsung.com>
>> To: dev@dpdk.org, David Marchand <david.marchand@6wind.com>, Sergio
>>  Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>, Thomas Monjalon
>>  <thomas@monjalon.net>
>> CC: Heetae Ahn <heetae82.ahn@samsung.com>, Yuanhan Liu
>>  <yliu@fridaylinux.org>, Jianfeng Tan <jianfeng.tan@intel.com>, Neil Horman
>>  <nhorman@tuxdriver.com>, Yulong Pei <yulong.pei@intel.com>, Bruce
>>  Richardson <bruce.richardson@intel.com>, Jerin Jacob
>>  <jerin.jacob@caviumnetworks.com>, Hemant Agrawal <hemant.agrawal@nxp.com>,
>>  Ilya Maximets <i.maximets@samsung.com>
>> Subject: [PATCH v10 0/2] Balanced allocation of hugepages
>> X-Mailer: git-send-email 2.7.4
>>
>> Version 10:
>> 	* Fixed typo in DPAA2 config.
>>
>> Version 9:
>> 	* Removed DPDK_DEP_NUMA from test-build.sh . Not needed
>> 	  anymore.
>> 	* Fixed out of bound write to essential_memory in case
>> 	  where socket-mem not specified and SIGBUS occured.
>>
>> Version 8:
>> 	* helper functions from libnuma used to set mempolicy and
>> 	  work with cpu mask.
>> 	* Function now restores previous mempolicy instead of MPOL_DEFAULT.
>> 	* Fixed essential_memory on SIGBUS.
>> 	* Fixed restoring of mempolicy in case of errors (goto out).
>> 	* Enabled by default for all linuxapp except armv7 and dpaa2.
>>
>> Version 7:
>> 	* RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
>>
>> Version 6:
>> 	* Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>> 	  returned. Enabled by default for x86, ppc and thunderx.
>>
>> Version 5:
>> 	* Fixed shared build. (Automated build test will fail
>> 	  anyway because libnuma-devel not installed on build servers)
>>
>> Version 4:
>> 	* Fixed work on systems without NUMA by adding check for NUMA
>> 	  support in kernel.
>>
>> Version 3:
>> 	* Implemented hybrid schema for allocation.
>> 	* Fixed not needed mempolicy change while remapping. (orig = 0)
>> 	* Added patch to enable VHOST_NUMA by default.
>>
>> Version 2:
>> 	* rebased (fuzz in Makefile)
>>
>> Ilya Maximets (2):
>>   mem: balanced allocation of hugepages
>>   config: enable vhost numa awareness by default
> 
> Series-Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Tested on a arm64 NUMA machine.
> Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 99+ messages in thread

* Re: [PATCH v10 2/2] config: enable vhost numa awareness by default
  2017-06-29  5:59                                         ` [PATCH v10 2/2] config: enable vhost numa awareness by default Ilya Maximets
@ 2017-06-30 15:50                                           ` Thomas Monjalon
  0 siblings, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-30 15:50 UTC (permalink / raw)
  To: Ilya Maximets
  Cc: dev, David Marchand, Sergio Gonzalez Monroy, Heetae Ahn,
	Yuanhan Liu, Jianfeng Tan, Neil Horman, Yulong Pei,
	Bruce Richardson, Jerin Jacob, Hemant Agrawal

29/06/2017 07:59, Ilya Maximets:
> It is safe to enable LIBRTE_VHOST_NUMA by default for all
> configurations where libnuma is already a default dependency.
> 
> DPDK_DEP_NUMA not needed anymore.
> 
> Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  config/common_linuxapp                    | 1 +
>  config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
>  devtools/test-build.sh                    | 4 ----
>  4 files changed, 3 insertions(+), 4 deletions(-)

After a second thought, we still need DPDK_DEP_NUMA in
devtools/test-build.sh.
We just need to inverse the logic so the default is enabled.

I am going to send a v11.

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v11 0/2] Balanced allocation of hugepages
  2017-06-29  5:59                                     ` [PATCH v10 0/2] Balanced allocation of hugepages Ilya Maximets
                                                         ` (2 preceding siblings ...)
  2017-06-29  6:29                                       ` [PATCH v10 0/2] Balanced allocation of hugepages Jerin Jacob
@ 2017-06-30 16:12                                       ` Thomas Monjalon
  2017-06-30 16:12                                         ` [PATCH v11 1/2] mem: balanced " Thomas Monjalon
                                                           ` (2 more replies)
  3 siblings, 3 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-30 16:12 UTC (permalink / raw)
  To: Ilya Maximets; +Cc: dev

Version 11:
        * Fixed test-build.sh for missing libnuma dependency

Version 10:
        * Fixed typo in DPAA2 config.

Version 9:
        * Removed DPDK_DEP_NUMA from test-build.sh . Not needed
          anymore.
        * Fixed out of bound write to essential_memory in case
          where socket-mem not specified and SIGBUS occured.

Version 8:
        * helper functions from libnuma used to set mempolicy and
          work with cpu mask.
        * Function now restores previous mempolicy instead of MPOL_DEFAULT.
        * Fixed essential_memory on SIGBUS.
        * Fixed restoring of mempolicy in case of errors (goto out).
        * Enabled by default for all linuxapp except armv7 and dpaa2.

Version 7:
        * RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES

Version 6:
        * Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
          returned. Enabled by default for x86, ppc and thunderx.

Version 5:
        * Fixed shared build. (Automated build test will fail
          anyway because libnuma-devel not installed on build servers)

Version 4:
        * Fixed work on systems without NUMA by adding check for NUMA
          support in kernel.

Version 3:
        * Implemented hybrid schema for allocation.
        * Fixed not needed mempolicy change while remapping. (orig = 0)
        * Added patch to enable VHOST_NUMA by default.

Version 2:
        * rebased (fuzz in Makefile)

Ilya Maximets (2):
  mem: balanced allocation of hugepages
  config: enable vhost NUMA awareness by default

 config/common_base                        |   1 +
 config/common_linuxapp                    |   2 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   4 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   4 +
 devtools/test-build.sh                    |   6 +-
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 8 files changed, 132 insertions(+), 11 deletions(-)

-- 
2.13.1

^ permalink raw reply	[flat|nested] 99+ messages in thread

* [PATCH v11 1/2] mem: balanced allocation of hugepages
  2017-06-30 16:12                                       ` [PATCH v11 " Thomas Monjalon
@ 2017-06-30 16:12                                         ` Thomas Monjalon
  2017-06-30 16:12                                         ` [PATCH v11 2/2] config: enable vhost NUMA awareness by default Thomas Monjalon
  2017-07-01 10:59                                         ` [PATCH v11 0/2] Balanced allocation of hugepages Thomas Monjalon
  2 siblings, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-30 16:12 UTC (permalink / raw)
  To: Ilya Maximets; +Cc: dev

From: Ilya Maximets <i.maximets@samsung.com>

Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.

Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.

Example:
	# 90 x 1GB hugepages availavle in a system

	cgcreate -g hugetlb:/test
	# Limit to 32GB of hugepages
	cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
	# Request 4GB from each of 2 sockets
	cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...

	EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
	EAL: 32 not 90 hugepages of size 1024 MB allocated
	EAL: Not enough memory available on socket 1!
	     Requested: 4096MB, available: 0MB
	PANIC in rte_eal_init():
	Cannot init memory

	This happens beacause all allocated pages are
	on socket 0.

Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:

	1) Allocate essential hugepages:
		1.1) Allocate as many hugepages from numa N to
		     only fit requested memory for this numa.
		1.2) repeat 1.1 for all numa nodes.
	2) Try to map all remaining free hugepages in a round-robin
	   fashion.
	3) Sort pages and choose the most suitable.

In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.

New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp except armv7 and dpaa2.
Enabling of this option adds libnuma as a dependency for EAL.

Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 config/common_base                        |   1 +
 config/common_linuxapp                    |   1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  |   3 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc |   3 +
 devtools/test-build.sh                    |   6 +-
 lib/librte_eal/linuxapp/eal/Makefile      |   3 +
 lib/librte_eal/linuxapp/eal/eal_memory.c  | 120 ++++++++++++++++++++++++++++--
 mk/rte.app.mk                             |   3 +
 8 files changed, 129 insertions(+), 11 deletions(-)

diff --git a/config/common_base b/config/common_base
index f6aafd17d..660588a3d 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
 CONFIG_RTE_EAL_IGB_UIO=n
 CONFIG_RTE_EAL_VFIO=n
 CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
 
 #
 # Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b01..64bef87af 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,7 @@
 CONFIG_RTE_EXEC_ENV="linuxapp"
 CONFIG_RTE_EXEC_ENV_LINUXAPP=y
 
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
 CONFIG_RTE_EAL_IGB_UIO=y
 CONFIG_RTE_EAL_VFIO=y
 CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb67..e06b1d441 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
 CONFIG_RTE_TOOLCHAIN="gcc"
 CONFIG_RTE_TOOLCHAIN_GCC=y
 
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
 
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 2304ab607..d17201b1e 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -45,6 +45,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
 
 CONFIG_RTE_PKTMBUF_HEADROOM=256
 
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
 #
 # Compile Support Libraries for DPAA2
 #
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 61bdce7cc..079c8b8f8 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -41,7 +41,7 @@ default_path=$PATH
 # - DPDK_DEP_ISAL_CRYPTO (y/[n])
 # - DPDK_DEP_LDFLAGS
 # - DPDK_DEP_MOFED (y/[n])
-# - DPDK_DEP_NUMA (y/[n])
+# - DPDK_DEP_NUMA ([y]/n)
 # - DPDK_DEP_PCAP (y/[n])
 # - DPDK_DEP_SSL (y/[n])
 # - DPDK_DEP_SZE (y/[n])
@@ -163,8 +163,8 @@ config () # <directory> <target> <options>
 		sed -ri 's,(TEST_PMD_RECORD_.*=)n,\1y,' $1/.config )
 
 		# Automatic configuration
-		test "$DPDK_DEP_NUMA" != y || \
-		sed -ri               's,(NUMA=)n,\1y,' $1/.config
+		test "$DPDK_DEP_NUMA" != n || \
+		sed -ri             's,(NUMA.*=)y,\1n,' $1/.config
 		sed -ri    's,(LIBRTE_IEEE1588=)n,\1y,' $1/.config
 		sed -ri             's,(BYPASS=)n,\1y,' $1/.config
 		test "$DPDK_DEP_ARCHIVE" != y || \
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd088..8651e2783 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
 LDLIBS += -lpthread
 LDLIBS += -lgcc_s
 LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
 
 # specific to linuxapp exec-env
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb5d..647d89c58 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,10 @@
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
 
 #include <rte_log.h>
 #include <rte_memory.h>
@@ -348,6 +352,14 @@ static int huge_wrap_sigsetjmp(void)
 	return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +368,78 @@ static int huge_wrap_sigsetjmp(void)
  * map continguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-		struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+		  uint64_t *essential_memory __rte_unused, int orig)
 {
 	int fd;
 	unsigned i;
 	void *virtaddr;
 	void *vma_addr = NULL;
 	size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	int node_id = -1;
+	int essential_prev = 0;
+	int oldpolicy;
+	struct bitmask *oldmask = numa_allocate_nodemask();
+	bool have_numa = true;
+	unsigned long maxnode = 0;
+
+	/* Check if kernel supports NUMA. */
+	if (numa_available() != 0) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		have_numa = false;
+	}
+
+	if (orig && have_numa) {
+		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+		if (get_mempolicy(&oldpolicy, oldmask->maskp,
+				  oldmask->size + 1, 0, 0) < 0) {
+			RTE_LOG(ERR, EAL,
+				"Failed to get current mempolicy: %s. "
+				"Assuming MPOL_DEFAULT.\n", strerror(errno));
+			oldpolicy = MPOL_DEFAULT;
+		}
+		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+			if (internal_config.socket_mem[i])
+				maxnode = i + 1;
+	}
+#endif
 
 	for (i = 0; i < hpi->num_pages[0]; i++) {
 		uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+		if (maxnode) {
+			unsigned int j;
+
+			for (j = 0; j < maxnode; j++)
+				if (essential_memory[j])
+					break;
+
+			if (j == maxnode) {
+				node_id = (node_id + 1) % maxnode;
+				while (!internal_config.socket_mem[node_id]) {
+					node_id++;
+					node_id %= maxnode;
+				}
+				essential_prev = 0;
+			} else {
+				node_id = j;
+				essential_prev = essential_memory[j];
+
+				if (essential_memory[j] < hugepage_sz)
+					essential_memory[j] = 0;
+				else
+					essential_memory[j] -= hugepage_sz;
+			}
+
+			RTE_LOG(DEBUG, EAL,
+				"Setting policy MPOL_PREFERRED for socket %d\n",
+				node_id);
+			numa_set_preferred(node_id);
+		}
+#endif
+
 		if (orig) {
 			hugepg_tbl[i].file_id = i;
 			hugepg_tbl[i].size = hugepage_sz;
@@ -422,7 +494,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		if (fd < 0) {
 			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
 					strerror(errno));
-			return i;
+			goto out;
 		}
 
 		/* map the segment, and populate page tables,
@@ -433,7 +505,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
 					strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		if (orig) {
@@ -458,7 +530,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 				munmap(virtaddr, hugepage_sz);
 				close(fd);
 				unlink(hugepg_tbl[i].filepath);
-				return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				if (maxnode)
+					essential_memory[node_id] =
+						essential_prev;
+#endif
+				goto out;
 			}
 			*(int *)virtaddr = 0;
 		}
@@ -469,7 +546,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
 				__func__, strerror(errno));
 			close(fd);
-			return i;
+			goto out;
 		}
 
 		close(fd);
@@ -478,6 +555,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
 		vma_len -= hugepage_sz;
 	}
 
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (maxnode) {
+		RTE_LOG(DEBUG, EAL,
+			"Restoring previous memory policy: %d\n", oldpolicy);
+		if (oldpolicy == MPOL_DEFAULT) {
+			numa_set_localalloc();
+		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
+					 oldmask->size + 1) < 0) {
+			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+				strerror(errno));
+			numa_set_localalloc();
+		}
+	}
+	numa_free_cpumask(oldmask);
+#endif
 	return i;
 }
 
@@ -562,6 +655,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 			if (hugepg_tbl[i].orig_va == va) {
 				hugepg_tbl[i].socket_id = socket_id;
 				hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+				RTE_LOG(DEBUG, EAL,
+					"Hugepage %s is on socket %d\n",
+					hugepg_tbl[i].filepath, socket_id);
+#endif
 			}
 		}
 	}
@@ -1000,6 +1098,11 @@ rte_eal_hugepage_init(void)
 
 	huge_register_sigbus();
 
+	/* make a copy of socket_mem, needed for balanced allocation. */
+	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+		memory[i] = internal_config.socket_mem[i];
+
+
 	/* map all hugepages and sort them */
 	for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
 		unsigned pages_old, pages_new;
@@ -1017,7 +1120,8 @@ rte_eal_hugepage_init(void)
 
 		/* map all hugepages available */
 		pages_old = hpi->num_pages[0];
-		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+					      memory, 1);
 		if (pages_new < pages_old) {
 			RTE_LOG(DEBUG, EAL,
 				"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1164,7 @@ rte_eal_hugepage_init(void)
 		      sizeof(struct hugepage_file), cmp_physaddr);
 
 		/* remap all hugepages */
-		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+		if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
 		    hpi->num_pages[0]) {
 			RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
 					(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b382..4fe22d1fe 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
 # The static libraries do not know their dependencies.
 # So linking with static library requires explicit dependencies.
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lnuma
+endif
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lm
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrt
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lm
-- 
2.13.1

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* [PATCH v11 2/2] config: enable vhost NUMA awareness by default
  2017-06-30 16:12                                       ` [PATCH v11 " Thomas Monjalon
  2017-06-30 16:12                                         ` [PATCH v11 1/2] mem: balanced " Thomas Monjalon
@ 2017-06-30 16:12                                         ` Thomas Monjalon
  2017-07-01 10:59                                         ` [PATCH v11 0/2] Balanced allocation of hugepages Thomas Monjalon
  2 siblings, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-06-30 16:12 UTC (permalink / raw)
  To: Ilya Maximets; +Cc: dev

From: Ilya Maximets <i.maximets@samsung.com>

It is safe to enable LIBRTE_VHOST_NUMA by default for all
configurations where libnuma is already a default dependency.

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 config/common_linuxapp                    | 1 +
 config/defconfig_arm-armv7a-linuxapp-gcc  | 1 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc | 1 +
 3 files changed, 3 insertions(+)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 64bef87af..74c7d64ec 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -42,6 +42,7 @@ CONFIG_RTE_KNI_KMOD=y
 CONFIG_RTE_LIBRTE_KNI=y
 CONFIG_RTE_LIBRTE_PMD_KNI=y
 CONFIG_RTE_LIBRTE_VHOST=y
+CONFIG_RTE_LIBRTE_VHOST_NUMA=y
 CONFIG_RTE_LIBRTE_PMD_VHOST=y
 CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
 CONFIG_RTE_LIBRTE_PMD_TAP=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index e06b1d441..00bc2ab90 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -49,6 +49,7 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
 
 # NUMA is not supported on ARM
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 # ARM doesn't have support for vmware TSC map
 CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index d17201b1e..4452c2311 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -47,6 +47,7 @@ CONFIG_RTE_PKTMBUF_HEADROOM=256
 
 # Doesn't support NUMA
 CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
 
 #
 # Compile Support Libraries for DPAA2
-- 
2.13.1

^ permalink raw reply related	[flat|nested] 99+ messages in thread

* Re: [PATCH v11 0/2] Balanced allocation of hugepages
  2017-06-30 16:12                                       ` [PATCH v11 " Thomas Monjalon
  2017-06-30 16:12                                         ` [PATCH v11 1/2] mem: balanced " Thomas Monjalon
  2017-06-30 16:12                                         ` [PATCH v11 2/2] config: enable vhost NUMA awareness by default Thomas Monjalon
@ 2017-07-01 10:59                                         ` Thomas Monjalon
  2 siblings, 0 replies; 99+ messages in thread
From: Thomas Monjalon @ 2017-07-01 10:59 UTC (permalink / raw)
  To: Ilya Maximets; +Cc: dev

30/06/2017 18:12, Thomas Monjalon:
> Version 11:
>         * Fixed test-build.sh for missing libnuma dependency
> 
> Version 10:
>         * Fixed typo in DPAA2 config.
> 
> Version 9:
>         * Removed DPDK_DEP_NUMA from test-build.sh . Not needed
>           anymore.
>         * Fixed out of bound write to essential_memory in case
>           where socket-mem not specified and SIGBUS occured.
> 
> Version 8:
>         * helper functions from libnuma used to set mempolicy and
>           work with cpu mask.
>         * Function now restores previous mempolicy instead of MPOL_DEFAULT.
>         * Fixed essential_memory on SIGBUS.
>         * Fixed restoring of mempolicy in case of errors (goto out).
>         * Enabled by default for all linuxapp except armv7 and dpaa2.
> 
> Version 7:
>         * RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES --> RTE_EAL_NUMA_AWARE_HUGEPAGES
> 
> Version 6:
>         * Configuration option RTE_LIBRTE_EAL_NUMA_AWARE_HUGEPAGES
>           returned. Enabled by default for x86, ppc and thunderx.
> 
> Version 5:
>         * Fixed shared build. (Automated build test will fail
>           anyway because libnuma-devel not installed on build servers)
> 
> Version 4:
>         * Fixed work on systems without NUMA by adding check for NUMA
>           support in kernel.
> 
> Version 3:
>         * Implemented hybrid schema for allocation.
>         * Fixed not needed mempolicy change while remapping. (orig = 0)
>         * Added patch to enable VHOST_NUMA by default.
> 
> Version 2:
>         * rebased (fuzz in Makefile)
> 
> Ilya Maximets (2):
>   mem: balanced allocation of hugepages
>   config: enable vhost NUMA awareness by default

Applied this version, thanks for getting an agreement after long discussions :)

^ permalink raw reply	[flat|nested] 99+ messages in thread

end of thread, other threads:[~2017-07-01 10:59 UTC | newest]

Thread overview: 99+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <CGME20170216130139eucas1p2512567d6f5db9eaac5ee840b56bf920a@eucas1p2.samsung.com>
2017-02-16 13:01 ` [PATCH] mem: balanced allocation of hugepages Ilya Maximets
2017-02-16 13:26   ` Tan, Jianfeng
2017-02-16 13:55     ` Ilya Maximets
2017-02-16 13:57       ` Ilya Maximets
2017-02-16 13:31   ` Bruce Richardson
2017-03-06  9:34   ` Ilya Maximets
2017-03-08 13:46     ` Sergio Gonzalez Monroy
2017-03-09 12:57       ` Ilya Maximets
2017-03-27 13:01         ` Sergio Gonzalez Monroy
2017-03-27 14:43           ` Ilya Maximets
2017-04-07 15:14             ` Ilya Maximets
2017-04-07 15:44               ` Thomas Monjalon
2017-04-10  7:11                 ` Ilya Maximets
2017-04-10  7:51                   ` Sergio Gonzalez Monroy
2017-04-10  8:05                     ` Ilya Maximets
     [not found]   ` <CGME20170410080425eucas1p27fd424ae58151f13b1a7a3723aa4ad1e@eucas1p2.samsung.com>
2017-04-10  8:04     ` [PATCH v2] " Ilya Maximets
2017-04-10 10:03       ` Thomas Monjalon
     [not found]       ` <CGME20170606062227eucas1p2c49a95fb0fe11a4cadd5b4ceeb9712b1@eucas1p2.samsung.com>
2017-06-06  6:22         ` [PATCH v3 0/2] Balanced " Ilya Maximets
     [not found]           ` <CGME20170606062232eucas1p11d2c304a28353d32b93ddfbd134d4da9@eucas1p1.samsung.com>
2017-06-06  6:22             ` [PATCH v3 1/2] mem: balanced " Ilya Maximets
     [not found]           ` <CGME20170606062237eucas1p1de58fdde1bff816e480e50308804ba7a@eucas1p1.samsung.com>
2017-06-06  6:22             ` [PATCH v3 2/2] config: enable vhost numa awareness by default Ilya Maximets
     [not found]           ` <CGME20170606081359eucas1p2f7eafa1abc346c5bb910c783df1d1520@eucas1p2.samsung.com>
2017-06-06  8:13             ` [PATCH v4 0/2] Balanced allocation of hugepages Ilya Maximets
     [not found]               ` <CGME20170606081403eucas1p20c561b9177a51cfe58dd53b76cbfaaf7@eucas1p2.samsung.com>
2017-06-06  8:13                 ` [PATCH v4 1/2] mem: balanced " Ilya Maximets
     [not found]               ` <CGME20170606081409eucas1p2eed4a7dc49f1028c723f8c0a7a61fadf@eucas1p2.samsung.com>
2017-06-06  8:13                 ` [PATCH v4 2/2] config: enable vhost numa awareness by default Ilya Maximets
     [not found]               ` <CGME20170606133348eucas1p1cc5c3c05f88b2101c2ea47b26e0cac24@eucas1p1.samsung.com>
2017-06-06 13:33                 ` [PATCH v5 0/2] Balanced allocation of hugepages Ilya Maximets
     [not found]                   ` <CGME20170606133352eucas1p13d1e860e996057a50a084f9365189e4d@eucas1p1.samsung.com>
2017-06-06 13:33                     ` [PATCH v5 1/2] mem: balanced " Ilya Maximets
     [not found]                   ` <CGME20170606133354eucas1p284ae347e9ff07d6e8ab2bc09344ad1e5@eucas1p2.samsung.com>
2017-06-06 13:33                     ` [PATCH v5 2/2] config: enable vhost numa awareness by default Ilya Maximets
2017-06-08 11:21                   ` [PATCH v5 0/2] Balanced allocation of hugepages Ilya Maximets
2017-06-08 12:14                     ` Bruce Richardson
2017-06-08 15:44                       ` Sergio Gonzalez Monroy
2017-06-14  6:11                         ` Ilya Maximets
2017-06-19 11:10                           ` Hemant Agrawal
2017-06-20 13:07                             ` Thomas Monjalon
2017-06-20 13:58                               ` Ilya Maximets
2017-06-20 14:35                                 ` Thomas Monjalon
2017-06-20 14:58                                   ` Sergio Gonzalez Monroy
2017-06-20 15:41                                     ` Jerin Jacob
2017-06-20 15:51                                       ` Sergio Gonzalez Monroy
2017-06-21  8:14                                       ` Hemant Agrawal
2017-06-21  8:25                                         ` Sergio Gonzalez Monroy
2017-06-21  8:36                                           ` Ilya Maximets
2017-06-21  8:41                                           ` Jerin Jacob
2017-06-21  8:49                                             ` Thomas Monjalon
2017-06-21  9:27                                               ` Jerin Jacob
2017-06-21  9:58                                                 ` Thomas Monjalon
2017-06-21 10:29                                                   ` Jerin Jacob
2017-06-21 10:36                                                     ` Ilya Maximets
2017-06-21 11:22                                                       ` Jerin Jacob
2017-06-21 11:29                                                         ` Thomas Monjalon
2017-06-27  9:13                                                         ` Hemant Agrawal
2017-06-27  9:26                                                           ` Thomas Monjalon
2017-06-27  9:48                                                             ` Hemant Agrawal
     [not found]                   ` <CGME20170621080434eucas1p18d3d4e4133c1cf885c849d022806408d@eucas1p1.samsung.com>
2017-06-21  8:04                     ` [PATCH v6 " Ilya Maximets
     [not found]                       ` <CGME20170621080441eucas1p2dc01b29e7c8e4c1546ace6cd76ae51ff@eucas1p2.samsung.com>
2017-06-21  8:04                         ` [PATCH v6 1/2] mem: balanced " Ilya Maximets
2017-06-21  8:51                           ` Thomas Monjalon
2017-06-21  8:58                             ` Bruce Richardson
2017-06-21  9:25                               ` Ilya Maximets
2017-06-21  9:34                                 ` Bruce Richardson
2017-06-21  9:28                               ` Thomas Monjalon
     [not found]                       ` <CGME20170621080448eucas1p28951fac6e4910cc599fe88d7edac9734@eucas1p2.samsung.com>
2017-06-21  8:04                         ` [PATCH v6 2/2] config: enable vhost numa awareness by default Ilya Maximets
     [not found]                       ` <CGME20170621100837eucas1p1c570092cac733a66d939ca7ff04ac9e6@eucas1p1.samsung.com>
2017-06-21 10:08                         ` [PATCH v7 0/2] Balanced allocation of hugepages Ilya Maximets
     [not found]                           ` <CGME20170621100841eucas1p1114078b1d8a38920c3633e9bddbabc02@eucas1p1.samsung.com>
2017-06-21 10:08                             ` [PATCH v7 1/2] mem: balanced " Ilya Maximets
     [not found]                           ` <CGME20170621100845eucas1p2a457b1694d20de8e2d8126df679c43ae@eucas1p2.samsung.com>
2017-06-21 10:08                             ` [PATCH v7 2/2] config: enable vhost numa awareness by default Ilya Maximets
2017-06-27  9:20                               ` Hemant Agrawal
2017-06-26 10:44                           ` [PATCH v7 0/2] Balanced allocation of hugepages Ilya Maximets
2017-06-26 14:07                             ` Jerin Jacob
2017-06-26 15:33                             ` Sergio Gonzalez Monroy
2017-06-27  8:42                               ` Ilya Maximets
     [not found]                           ` <CGME20170627084632eucas1p28133ee4b425b3938e2564fca03e1140b@eucas1p2.samsung.com>
2017-06-27  8:46                             ` [PATCH v8 " Ilya Maximets
     [not found]                               ` <CGME20170627084637eucas1p2c591db905fa9f143fa5dbb3c08fae82f@eucas1p2.samsung.com>
2017-06-27  8:46                                 ` [PATCH v8 1/2] mem: balanced " Ilya Maximets
     [not found]                               ` <CGME20170627084641eucas1p182cac065efef74445ffa234a6dcbb23d@eucas1p1.samsung.com>
2017-06-27  8:46                                 ` [PATCH v8 2/2] config: enable vhost numa awareness by default Ilya Maximets
2017-06-27  9:18                                   ` Hemant Agrawal
2017-06-27  9:21                                     ` Thomas Monjalon
2017-06-27  9:41                                       ` Hemant Agrawal
2017-06-27  9:59                                         ` Thomas Monjalon
2017-06-27  9:59                                         ` Jerin Jacob
2017-06-27 12:17                                           ` Hemant Agrawal
2017-06-27 12:45                                             ` Jerin Jacob
2017-06-27 13:00                                               ` Hemant Agrawal
2017-06-27  9:19                                   ` Thomas Monjalon
2017-06-27 10:26                                     ` Ilya Maximets
     [not found]                               ` <CGME20170627102447eucas1p15a57bbaaf46944c0935d4ef71b55cd83@eucas1p1.samsung.com>
2017-06-27 10:24                                 ` [PATCH v9 0/2] Balanced allocation of hugepages Ilya Maximets
     [not found]                                   ` <CGME20170627102451eucas1p2254d8679f70e261b9db9d2123aa80091@eucas1p2.samsung.com>
2017-06-27 10:24                                     ` [PATCH v9 1/2] mem: balanced " Ilya Maximets
2017-06-28 10:30                                       ` Sergio Gonzalez Monroy
2017-06-29  5:32                                       ` Hemant Agrawal
2017-06-29  5:48                                         ` Ilya Maximets
2017-06-29  6:08                                           ` Ilya Maximets
     [not found]                                   ` <CGME20170627102454eucas1p14b2a1024d77158ad0bf40d62e6ad4365@eucas1p1.samsung.com>
2017-06-27 10:24                                     ` [PATCH v9 2/2] config: enable vhost numa awareness by default Ilya Maximets
2017-06-29  5:31                                       ` Hemant Agrawal
     [not found]                                   ` <CGME20170629055928eucas1p17e823d821cfe95953bfa59dc9883ca4f@eucas1p1.samsung.com>
2017-06-29  5:59                                     ` [PATCH v10 0/2] Balanced allocation of hugepages Ilya Maximets
     [not found]                                       ` <CGME20170629055933eucas1p1e5eba5f07850f63f9afbd48e6ca64c42@eucas1p1.samsung.com>
2017-06-29  5:59                                         ` [PATCH v10 1/2] mem: balanced " Ilya Maximets
2017-06-29  7:03                                           ` Hemant Agrawal
     [not found]                                       ` <CGME20170629055940eucas1p1c9adcb26bec3ce5de97fe56753fd941a@eucas1p1.samsung.com>
2017-06-29  5:59                                         ` [PATCH v10 2/2] config: enable vhost numa awareness by default Ilya Maximets
2017-06-30 15:50                                           ` Thomas Monjalon
2017-06-29  6:29                                       ` [PATCH v10 0/2] Balanced allocation of hugepages Jerin Jacob
2017-06-30  8:36                                         ` Ilya Maximets
2017-06-30 16:12                                       ` [PATCH v11 " Thomas Monjalon
2017-06-30 16:12                                         ` [PATCH v11 1/2] mem: balanced " Thomas Monjalon
2017-06-30 16:12                                         ` [PATCH v11 2/2] config: enable vhost NUMA awareness by default Thomas Monjalon
2017-07-01 10:59                                         ` [PATCH v11 0/2] Balanced allocation of hugepages Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.