All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] mempool: add stack (fifo) mempool handler
@ 2016-05-05 18:29 David Hunt
  2016-05-05 18:29 ` [PATCH 1/2] " David Hunt
                   ` (3 more replies)
  0 siblings, 4 replies; 48+ messages in thread
From: David Hunt @ 2016-05-05 18:29 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev, David Hunt

This patch set adds a fifo stack handler to the external mempool 
manager. 

This patch set depends on the 3 part external mempool handler 
patch set (v4 of the series):
http://dpdk.org/dev/patchwork/patch/12077/
which depends on the 36 part patch set for mempool rework
http://dpdk.org/dev/patchwork/patch/12038/

David Hunt (2):
  mempool: add stack (fifo) mempool handler
  test: add autotest for external mempool stack handler

 app/test/test_mempool.c                |  26 ++++++
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 154 +++++++++++++++++++++++++++++++++
 3 files changed, 181 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

-- 
2.5.5

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH 1/2] mempool: add stack (fifo) mempool handler
  2016-05-05 18:29 [PATCH 0/2] mempool: add stack (fifo) mempool handler David Hunt
@ 2016-05-05 18:29 ` David Hunt
  2016-05-05 21:28   ` Stephen Hemminger
  2016-05-05 18:29 ` [PATCH 2/2] test: add autotest for external mempool stack handler David Hunt
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-05-05 18:29 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 154 +++++++++++++++++++++++++++++++++
 2 files changed, 155 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index f19366e..5aa9ef8 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..1874781
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,154 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <string.h>
+
+struct rte_mempool_common_stack
+{
+	/* Spinlock to protect access */
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#endif
+};
+
+static void *
+common_stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_common_stack *s;
+	char stack_name[RTE_RING_NAMESIZE];
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void*);
+
+	/* Allocate our local memory structure */
+	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", mp->name);
+	s = rte_zmalloc_socket(stack_name,
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return NULL;
+	}
+
+	/* And the spinlock we use to protect access */
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool = (void *) s;
+	rte_mempool_set_handler(mp, "stack");
+
+	return (void *) s;
+}
+
+static int common_stack_put(void *p, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int common_stack_get(void *p, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	void **cache_objs;
+	unsigned index, len;
+
+	/* Acquire lock */
+	rte_spinlock_lock(&s->sl);
+
+	if(unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n; ++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned common_stack_get_count(void *p)
+{
+	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
+	return s->len;
+}
+
+static void
+common_stack_free(void *p)
+{
+	rte_free((struct rte_mempool_common_stack *)p);
+}
+
+static struct rte_mempool_handler handler_stack = {
+	.name = "stack",
+	.alloc = common_stack_alloc,
+	.free = common_stack_free,
+	.put = common_stack_put,
+	.get = common_stack_get,
+	.get_count = common_stack_get_count
+};
+
+MEMPOOL_REGISTER_HANDLER(handler_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 2/2] test: add autotest for external mempool stack handler
  2016-05-05 18:29 [PATCH 0/2] mempool: add stack (fifo) mempool handler David Hunt
  2016-05-05 18:29 ` [PATCH 1/2] " David Hunt
@ 2016-05-05 18:29 ` David Hunt
  2016-05-06  8:34 ` [PATCH 0/2] mempool: add stack (fifo) mempool handler Tan, Jianfeng
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
  3 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-05-05 18:29 UTC (permalink / raw)
  To: olivier.matz; +Cc: dev, David Hunt

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 09951cc..8190f20 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -570,6 +570,7 @@ test_mempool(void)
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
 	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -619,6 +620,27 @@ test_mempool(void)
 	}
 	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
 
+	/* create a mempool with an external handler */
+	mp_stack = rte_mempool_create_empty("test_stack",
+		MEMPOOL_SIZE,
+		MEMPOOL_ELT_SIZE,
+		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+		SOCKET_ID_ANY, 0);
+
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
+		goto err;
+	}
+	if (rte_mempool_set_handler(mp_stack, "stack") < 0) {
+		printf("cannot set stack handler\n");
+		goto err;
+	}
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
+		goto err;
+	}
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
+
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
 		printf("Cannot lookup mempool from its name\n");
@@ -652,6 +674,10 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [PATCH 1/2] mempool: add stack (fifo) mempool handler
  2016-05-05 18:29 ` [PATCH 1/2] " David Hunt
@ 2016-05-05 21:28   ` Stephen Hemminger
  2016-05-19 15:21     ` Hunt, David
  0 siblings, 1 reply; 48+ messages in thread
From: Stephen Hemminger @ 2016-05-05 21:28 UTC (permalink / raw)
  To: David Hunt; +Cc: olivier.matz, dev

Overall, this is ok, but why can't it be the default?
Lots of little things should be cleaned up


> +struct rte_mempool_common_stack
> +{
> +	/* Spinlock to protect access */
> +	rte_spinlock_t sl;
> +
> +	uint32_t size;
> +	uint32_t len;
> +	void *objs[];
> +
> +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> +#endif

Useless remove it

> +};
> +
> +static void *
> +common_stack_alloc(struct rte_mempool *mp)
> +{
> +	struct rte_mempool_common_stack *s;
> +	char stack_name[RTE_RING_NAMESIZE];
> +	unsigned n = mp->size;
> +	int size = sizeof(*s) + (n+16)*sizeof(void*);
> +
> +	/* Allocate our local memory structure */
> +	snprintf(stack_name, sizeof(stack_name), "%s-common-stack", mp->name);
> +	s = rte_zmalloc_socket(stack_name,
The name for zmalloc is ignored in current code, why bother making it unique.

> +			size,
> +			RTE_CACHE_LINE_SIZE,
> +			mp->socket_id);
> +	if (s == NULL) {
> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
> +		return NULL;
> +	}
> +
> +	/* And the spinlock we use to protect access */
> +	rte_spinlock_init(&s->sl);
> +
> +	s->size = n;
> +	mp->pool = (void *) s;
Since pool is void *, no need for a cast here

> +	rte_mempool_set_handler(mp, "stack");
> +
> +	return (void *) s;
> +}
> +
> +static int common_stack_put(void *p, void * const *obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;
> +	void **cache_objs;
> +	unsigned index;
> +
> +	/* Acquire lock */
Useless obvious comment, about as good a classic /* Add one to i */
>
> +static int common_stack_get(void *p, void **obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;

Unnecessary cast, in C void * can be assigned to any type.

> +	void **cache_objs;
> +	unsigned index, len;
> +
> +	/* Acquire lock */
Yet another useless comment.

> +	rte_spinlock_lock(&s->sl);
> +
> +	if(unlikely(n > s->len)) {
> +		rte_spinlock_unlock(&s->sl);
> +		return -ENOENT;
> +	}
> +
> +	cache_objs = s->objs;
> +
> +	for (index = 0, len = s->len - 1; index < n; ++index, len--, obj_table++)
> +		*obj_table = cache_objs[len];
> +
> +	s->len -= n;
> +	rte_spinlock_unlock(&s->sl);
> +	return n;
> +}
> +
> +static unsigned common_stack_get_count(void *p)
> +{
> +	struct rte_mempool_common_stack * s = (struct rte_mempool_common_stack *)p;

Another useless cast.

> +	return s->len;
> +}
> +
> +static void
> +common_stack_free(void *p)
> +{
> +	rte_free((struct rte_mempool_common_stack *)p);
Yet another useless cast

> +}
> +
> +static struct rte_mempool_handler handler_stack = {
For security, any data structure with function pointers should be const.

> +	.name = "stack",
> +	.alloc = common_stack_alloc,
> +	.free = common_stack_free,
> +	.put = common_stack_put,
> +	.get = common_stack_get,
> +	.get_count = common_stack_get_count
> +};
> +
> +MEMPOOL_REGISTER_HANDLER(handler_stack);

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH 0/2] mempool: add stack (fifo) mempool handler
  2016-05-05 18:29 [PATCH 0/2] mempool: add stack (fifo) mempool handler David Hunt
  2016-05-05 18:29 ` [PATCH 1/2] " David Hunt
  2016-05-05 18:29 ` [PATCH 2/2] test: add autotest for external mempool stack handler David Hunt
@ 2016-05-06  8:34 ` Tan, Jianfeng
  2016-05-06 23:02   ` Hunt, David
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
  3 siblings, 1 reply; 48+ messages in thread
From: Tan, Jianfeng @ 2016-05-06  8:34 UTC (permalink / raw)
  To: David Hunt, olivier.matz; +Cc: dev

Hi David,


On 5/6/2016 2:29 AM, David Hunt wrote:
> This patch set adds a fifo stack handler to the external mempool
> manager.

Just a minor confusion for me. Usually, we refer stack as LIFO and queue 
as FIFO. So is there any particular reason why we call "stack (fifo)" here?

Thanks,
Jianfeng

>
> This patch set depends on the 3 part external mempool handler
> patch set (v4 of the series):
> http://dpdk.org/dev/patchwork/patch/12077/
> which depends on the 36 part patch set for mempool rework
> http://dpdk.org/dev/patchwork/patch/12038/
>
> David Hunt (2):
>    mempool: add stack (fifo) mempool handler
>    test: add autotest for external mempool stack handler
>
>   app/test/test_mempool.c                |  26 ++++++
>   lib/librte_mempool/Makefile            |   1 +
>   lib/librte_mempool/rte_mempool_stack.c | 154 +++++++++++++++++++++++++++++++++
>   3 files changed, 181 insertions(+)
>   create mode 100644 lib/librte_mempool/rte_mempool_stack.c
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH 0/2] mempool: add stack (fifo) mempool handler
  2016-05-06  8:34 ` [PATCH 0/2] mempool: add stack (fifo) mempool handler Tan, Jianfeng
@ 2016-05-06 23:02   ` Hunt, David
  0 siblings, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-05-06 23:02 UTC (permalink / raw)
  To: Tan, Jianfeng, olivier.matz; +Cc: dev



On 5/6/2016 1:34 AM, Tan, Jianfeng wrote:
> Hi David,
>
>
> On 5/6/2016 2:29 AM, David Hunt wrote:
>> This patch set adds a fifo stack handler to the external mempool
>> manager.
>
> Just a minor confusion for me. Usually, we refer stack as LIFO and 
> queue as FIFO. So is there any particular reason why we call "stack 
> (fifo)" here?
>
> Thanks,
> Jianfeng

Jianfeng,
You are correct. That is a typo. It should read LIFO.
Regards,
Dave.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* v2 mempool: add stack (lifo) mempool handler
  2016-05-05 18:29 [PATCH 0/2] mempool: add stack (fifo) mempool handler David Hunt
                   ` (2 preceding siblings ...)
  2016-05-06  8:34 ` [PATCH 0/2] mempool: add stack (fifo) mempool handler Tan, Jianfeng
@ 2016-05-19 14:48 ` David Hunt
  2016-05-19 14:48   ` [PATCH v2 1/3] " David Hunt
                     ` (4 more replies)
  3 siblings, 5 replies; 48+ messages in thread
From: David Hunt @ 2016-05-19 14:48 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz

This patch set adds a fifo stack handler to the external mempool
manager.

This patch set depends on the 3 part external mempool handler
patch set (v5 of the series):
http://dpdk.org/ml/archives/dev/2016-May/039364.html

v2 changes:
  * updated based on mailing list feedback (Thanks Stephen)
  * checkpatch fixes.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
@ 2016-05-19 14:48   ` David Hunt
  2016-05-23 12:55     ` Olivier Matz
  2016-05-19 14:48   ` [PATCH v2 2/3] mempool: make declaration of handler structs const David Hunt
                     ` (3 subsequent siblings)
  4 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-05-19 14:48 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

v2: cleanup based on mailing list comments. Mainly removal of
unnecessary casts and comments.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index f19366e..5aa9ef8 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..6e25028
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_common_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static void *
+common_stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_common_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("common-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return NULL;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool = s;
+	rte_mempool_set_handler(mp, "stack");
+
+	return s;
+}
+
+static int common_stack_put(void *p, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack *s = p;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int common_stack_get(void *p, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_common_stack *s = p;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned common_stack_get_count(void *p)
+{
+	struct rte_mempool_common_stack *s = p;
+
+	return s->len;
+}
+
+static void
+common_stack_free(void *p)
+{
+	rte_free(p);
+}
+
+static struct rte_mempool_handler handler_stack = {
+	.name = "stack",
+	.alloc = common_stack_alloc,
+	.free = common_stack_free,
+	.put = common_stack_put,
+	.get = common_stack_get,
+	.get_count = common_stack_get_count
+};
+
+MEMPOOL_REGISTER_HANDLER(handler_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v2 2/3] mempool: make declaration of handler structs const
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
  2016-05-19 14:48   ` [PATCH v2 1/3] " David Hunt
@ 2016-05-19 14:48   ` David Hunt
  2016-05-23 12:55     ` Olivier Matz
  2016-05-19 14:48   ` [PATCH v2 3/3] test: add autotest for external mempool stack handler David Hunt
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-05-19 14:48 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, David Hunt

For security, any data structure with function pointers should be const

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/rte_mempool.h         | 2 +-
 lib/librte_mempool/rte_mempool_default.c | 8 ++++----
 lib/librte_mempool/rte_mempool_handler.c | 2 +-
 lib/librte_mempool/rte_mempool_stack.c   | 2 +-
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index ed2c110..baa5f48 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -491,7 +491,7 @@ rte_mempool_set_handler(struct rte_mempool *mp, const char *name);
  *   - >=0: Sucess; return the index of the handler in the table.
  *   - <0: Error (errno)
  */
-int rte_mempool_handler_register(struct rte_mempool_handler *h);
+int rte_mempool_handler_register(const struct rte_mempool_handler *h);
 
 /**
  * Macro to statically register an external pool handler.
diff --git a/lib/librte_mempool/rte_mempool_default.c b/lib/librte_mempool/rte_mempool_default.c
index a6ac65a..764f772 100644
--- a/lib/librte_mempool/rte_mempool_default.c
+++ b/lib/librte_mempool/rte_mempool_default.c
@@ -105,7 +105,7 @@ common_ring_free(void *p)
 	rte_ring_free((struct rte_ring *)p);
 }
 
-static struct rte_mempool_handler handler_mp_mc = {
+static const struct rte_mempool_handler handler_mp_mc = {
 	.name = "ring_mp_mc",
 	.alloc = common_ring_alloc,
 	.free = common_ring_free,
@@ -114,7 +114,7 @@ static struct rte_mempool_handler handler_mp_mc = {
 	.get_count = common_ring_get_count,
 };
 
-static struct rte_mempool_handler handler_sp_sc = {
+static const struct rte_mempool_handler handler_sp_sc = {
 	.name = "ring_sp_sc",
 	.alloc = common_ring_alloc,
 	.free = common_ring_free,
@@ -123,7 +123,7 @@ static struct rte_mempool_handler handler_sp_sc = {
 	.get_count = common_ring_get_count,
 };
 
-static struct rte_mempool_handler handler_mp_sc = {
+static const struct rte_mempool_handler handler_mp_sc = {
 	.name = "ring_mp_sc",
 	.alloc = common_ring_alloc,
 	.free = common_ring_free,
@@ -132,7 +132,7 @@ static struct rte_mempool_handler handler_mp_sc = {
 	.get_count = common_ring_get_count,
 };
 
-static struct rte_mempool_handler handler_sp_mc = {
+static const struct rte_mempool_handler handler_sp_mc = {
 	.name = "ring_sp_mc",
 	.alloc = common_ring_alloc,
 	.free = common_ring_free,
diff --git a/lib/librte_mempool/rte_mempool_handler.c b/lib/librte_mempool/rte_mempool_handler.c
index 78611f8..eab1e69 100644
--- a/lib/librte_mempool/rte_mempool_handler.c
+++ b/lib/librte_mempool/rte_mempool_handler.c
@@ -45,7 +45,7 @@ struct rte_mempool_handler_table rte_mempool_handler_table = {
 
 /* add a new handler in rte_mempool_handler_table, return its index */
 int
-rte_mempool_handler_register(struct rte_mempool_handler *h)
+rte_mempool_handler_register(const struct rte_mempool_handler *h)
 {
 	struct rte_mempool_handler *handler;
 	int16_t handler_idx;
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
index 6e25028..ad49436 100644
--- a/lib/librte_mempool/rte_mempool_stack.c
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -133,7 +133,7 @@ common_stack_free(void *p)
 	rte_free(p);
 }
 
-static struct rte_mempool_handler handler_stack = {
+static const struct rte_mempool_handler handler_stack = {
 	.name = "stack",
 	.alloc = common_stack_alloc,
 	.free = common_stack_free,
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v2 3/3] test: add autotest for external mempool stack handler
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
  2016-05-19 14:48   ` [PATCH v2 1/3] " David Hunt
  2016-05-19 14:48   ` [PATCH v2 2/3] mempool: make declaration of handler structs const David Hunt
@ 2016-05-19 14:48   ` David Hunt
  2016-05-19 15:16   ` v2 mempool: add stack (lifo) mempool handler Hunt, David
  2016-06-20 13:08   ` mempool: add stack " David Hunt
  4 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-05-19 14:48 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, David Hunt

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index f55d126..b98804a 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -570,6 +570,7 @@ test_mempool(void)
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
 	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -619,6 +620,27 @@ test_mempool(void)
 	}
 	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
 
+	/* create a mempool with an external handler */
+	mp_stack = rte_mempool_create_empty("test_stack",
+		MEMPOOL_SIZE,
+		MEMPOOL_ELT_SIZE,
+		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+		SOCKET_ID_ANY, 0);
+
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
+		goto err;
+	}
+	if (rte_mempool_set_handler(mp_stack, "stack") < 0) {
+		printf("cannot set stack handler\n");
+		goto err;
+	}
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
+		goto err;
+	}
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
+
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
 		printf("Cannot lookup mempool from its name\n");
@@ -652,6 +674,10 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: v2 mempool: add stack (lifo) mempool handler
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
                     ` (2 preceding siblings ...)
  2016-05-19 14:48   ` [PATCH v2 3/3] test: add autotest for external mempool stack handler David Hunt
@ 2016-05-19 15:16   ` Hunt, David
  2016-06-20 13:08   ` mempool: add stack " David Hunt
  4 siblings, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-05-19 15:16 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz


On 5/19/2016 3:48 PM, David Hunt wrote:
> This patch set adds a fifo stack handler to the external mempool
> manager.

Apologies, cut and paste error, should be lifo stack handler. I thought 
I'd caught all of these...


> This patch set depends on the 3 part external mempool handler
> patch set (v5 of the series):
> http://dpdk.org/ml/archives/dev/2016-May/039364.html
>
> v2 changes:
>    * updated based on mailing list feedback (Thanks Stephen)
>    * checkpatch fixes.
>
>

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH 1/2] mempool: add stack (fifo) mempool handler
  2016-05-05 21:28   ` Stephen Hemminger
@ 2016-05-19 15:21     ` Hunt, David
  0 siblings, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-05-19 15:21 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: olivier.matz, dev



On 5/5/2016 10:28 PM, Stephen Hemminger wrote:
> Overall, this is ok, but why can't it be the default?

Backward compatibility would probably be the main reason, to have the 
least impact when recompiling.

> Lots of little things should be cleaned up

I've submitted a v2, and addressed all your issues. Thanks for the review.

Regards,
Dave.


--snip---

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-05-19 14:48   ` [PATCH v2 1/3] " David Hunt
@ 2016-05-23 12:55     ` Olivier Matz
  2016-06-15 10:10       ` Hunt, David
  2016-06-17 14:18       ` Hunt, David
  0 siblings, 2 replies; 48+ messages in thread
From: Olivier Matz @ 2016-05-23 12:55 UTC (permalink / raw)
  To: David Hunt, dev

Hi David,

Please find some comments below.

On 05/19/2016 04:48 PM, David Hunt wrote:
> This is a mempool handler that is useful for pipelining apps, where
> the mempool cache doesn't really work - example, where we have one
> core doing rx (and alloc), and another core doing Tx (and return). In
> such a case, the mempool ring simply cycles through all the mbufs,
> resulting in a LLC miss on every mbuf allocated when the number of
> mbufs is large. A stack recycles buffers more effectively in this
> case.
> 
> v2: cleanup based on mailing list comments. Mainly removal of
> unnecessary casts and comments.
> 
> Signed-off-by: David Hunt <david.hunt@intel.com>
> ---
>  lib/librte_mempool/Makefile            |   1 +
>  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
>  2 files changed, 146 insertions(+)
>  create mode 100644 lib/librte_mempool/rte_mempool_stack.c
> 
> diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
> index f19366e..5aa9ef8 100644
> --- a/lib/librte_mempool/Makefile
> +++ b/lib/librte_mempool/Makefile
> @@ -44,6 +44,7 @@ LIBABIVER := 2
>  SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
>  SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c
>  SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
> +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
>  # install includes
>  SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
>  
> diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
> new file mode 100644
> index 0000000..6e25028
> --- /dev/null
> +++ b/lib/librte_mempool/rte_mempool_stack.c
> @@ -0,0 +1,145 @@
> +/*-
> + *   BSD LICENSE
> + *
> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
> + *   All rights reserved.

Should be 2016?

> ...
> +
> +static void *
> +common_stack_alloc(struct rte_mempool *mp)
> +{
> +	struct rte_mempool_common_stack *s;
> +	unsigned n = mp->size;
> +	int size = sizeof(*s) + (n+16)*sizeof(void *);
> +
> +	/* Allocate our local memory structure */
> +	s = rte_zmalloc_socket("common-stack",

"mempool-stack" ?

> +			size,
> +			RTE_CACHE_LINE_SIZE,
> +			mp->socket_id);
> +	if (s == NULL) {
> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
> +		return NULL;
> +	}
> +
> +	rte_spinlock_init(&s->sl);
> +
> +	s->size = n;
> +	mp->pool = s;
> +	rte_mempool_set_handler(mp, "stack");

rte_mempool_set_handler() is a user function, it should be called here

> +
> +	return s;
> +}
> +
> +static int common_stack_put(void *p, void * const *obj_table,
> +		unsigned n)
> +{
> +	struct rte_mempool_common_stack *s = p;
> +	void **cache_objs;
> +	unsigned index;
> +
> +	rte_spinlock_lock(&s->sl);
> +	cache_objs = &s->objs[s->len];
> +
> +	/* Is there sufficient space in the stack ? */
> +	if ((s->len + n) > s->size) {
> +		rte_spinlock_unlock(&s->sl);
> +		return -ENOENT;
> +	}

The usual return value for a failing put() is ENOBUFS (see in rte_ring).


After reading it, I realize that it's nearly exactly the same code than
in "app/test: test external mempool handler".
http://patchwork.dpdk.org/dev/patchwork/patch/12896/

We should drop one of them. If this stack handler is really useful for
a performance use-case, it could go in librte_mempool. At the first
read, the code looks like a demo example : it uses a simple spinlock for
concurrent accesses to the common pool. Maybe the mempool cache hides
this cost, in this case we could also consider removing the use of the
rte_ring.

Do you have some some performance numbers? Do you know if it scales
with the number of cores?

If we can identify the conditions where this mempool handler
overperforms the default handler, it would be valuable to have them
in the documentation.


Regards,
Olivier

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 2/3] mempool: make declaration of handler structs const
  2016-05-19 14:48   ` [PATCH v2 2/3] mempool: make declaration of handler structs const David Hunt
@ 2016-05-23 12:55     ` Olivier Matz
  2016-05-24 14:01       ` Hunt, David
  0 siblings, 1 reply; 48+ messages in thread
From: Olivier Matz @ 2016-05-23 12:55 UTC (permalink / raw)
  To: David Hunt, dev

Hi David,

On 05/19/2016 04:48 PM, David Hunt wrote:
> For security, any data structure with function pointers should be const
> 
> Signed-off-by: David Hunt <david.hunt@intel.com>

I think this should be merged in the patchset adding the mempool
handler feature.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 2/3] mempool: make declaration of handler structs const
  2016-05-23 12:55     ` Olivier Matz
@ 2016-05-24 14:01       ` Hunt, David
  0 siblings, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-05-24 14:01 UTC (permalink / raw)
  To: Olivier Matz, dev



On 5/23/2016 1:55 PM, Olivier Matz wrote:
> Hi David,
>
> On 05/19/2016 04:48 PM, David Hunt wrote:
>> For security, any data structure with function pointers should be const
> I think this should be merged in the patchset adding the mempool
> handler feature.

Agreed, I'm working on that now, merging it in with some changes 
suggested by Jan on the other patch.

Thanks,
David.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-05-23 12:55     ` Olivier Matz
@ 2016-06-15 10:10       ` Hunt, David
  2016-06-17 14:18       ` Hunt, David
  1 sibling, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-06-15 10:10 UTC (permalink / raw)
  To: Olivier Matz, dev

Hi Olivier,

On 23/5/2016 1:55 PM, Olivier Matz wrote:
> Hi David,
>
> Please find some comments below.
>
> On 05/19/2016 04:48 PM, David Hunt wrote:
>> This is a mempool handler that is useful for pipelining apps, where
>> the mempool cache doesn't really work - example, where we have one
>> core doing rx (and alloc), and another core doing Tx (and return). In
>> such a case, the mempool ring simply cycles through all the mbufs,
>> resulting in a LLC miss on every mbuf allocated when the number of
>> mbufs is large. A stack recycles buffers more effectively in this
>> case.
>>
>> v2: cleanup based on mailing list comments. Mainly removal of
>> unnecessary casts and comments.
>>
>> Signed-off-by: David Hunt <david.hunt@intel.com>
>> ---
>>   lib/librte_mempool/Makefile            |   1 +
>>   lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
>>   2 files changed, 146 insertions(+)
>>   create mode 100644 lib/librte_mempool/rte_mempool_stack.c
>>
>> diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
>> index f19366e..5aa9ef8 100644
>> --- a/lib/librte_mempool/Makefile
>> +++ b/lib/librte_mempool/Makefile
>> @@ -44,6 +44,7 @@ LIBABIVER := 2
>>   SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
>>   SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_handler.c
>>   SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_default.c
>> +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
>>   # install includes
>>   SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
>>   
>> diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
>> new file mode 100644
>> index 0000000..6e25028
>> --- /dev/null
>> +++ b/lib/librte_mempool/rte_mempool_stack.c
>> @@ -0,0 +1,145 @@
>> +/*-
>> + *   BSD LICENSE
>> + *
>> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
>> + *   All rights reserved.
> Should be 2016?

Yup, will change.

>> ...
>> +
>> +static void *
>> +common_stack_alloc(struct rte_mempool *mp)
>> +{
>> +	struct rte_mempool_common_stack *s;
>> +	unsigned n = mp->size;
>> +	int size = sizeof(*s) + (n+16)*sizeof(void *);
>> +
>> +	/* Allocate our local memory structure */
>> +	s = rte_zmalloc_socket("common-stack",
> "mempool-stack" ?

Yes. Also, I thing the names of the function should be changed from 
common_stack_x to simply stack_x. The "common_" does not add anything.

>> +			size,
>> +			RTE_CACHE_LINE_SIZE,
>> +			mp->socket_id);
>> +	if (s == NULL) {
>> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
>> +		return NULL;
>> +	}
>> +
>> +	rte_spinlock_init(&s->sl);
>> +
>> +	s->size = n;
>> +	mp->pool = s;
>> +	rte_mempool_set_handler(mp, "stack");
> rte_mempool_set_handler() is a user function, it should be called here

Sure, removed.

>> +
>> +	return s;
>> +}
>> +
>> +static int common_stack_put(void *p, void * const *obj_table,
>> +		unsigned n)
>> +{
>> +	struct rte_mempool_common_stack *s = p;
>> +	void **cache_objs;
>> +	unsigned index;
>> +
>> +	rte_spinlock_lock(&s->sl);
>> +	cache_objs = &s->objs[s->len];
>> +
>> +	/* Is there sufficient space in the stack ? */
>> +	if ((s->len + n) > s->size) {
>> +		rte_spinlock_unlock(&s->sl);
>> +		return -ENOENT;
>> +	}
> The usual return value for a failing put() is ENOBUFS (see in rte_ring).

Done.

> After reading it, I realize that it's nearly exactly the same code than
> in "app/test: test external mempool handler".
> http://patchwork.dpdk.org/dev/patchwork/patch/12896/
>
> We should drop one of them. If this stack handler is really useful for
> a performance use-case, it could go in librte_mempool. At the first
> read, the code looks like a demo example : it uses a simple spinlock for
> concurrent accesses to the common pool. Maybe the mempool cache hides
> this cost, in this case we could also consider removing the use of the
> rte_ring.

Unlike the code in the test app, the stack handler does not use a ring. 
This is for the
case where applications do a lot of core-to-core transfers of mbufs. The 
test app was
simply to demonstrate a simple example of a malloc mempool handler. This 
patch adds
a new lifo handler for general use.

Using the mempool_perf_autotest, I see a 30% increase in throughput when
local cache is enabled/used.  However, there is up to a 50% degradation 
when local cache
is NOT used, so it's not usable in all situations. However, with a 30% 
gain for the cache
use-case, I think it's worth having in there as an option for people to 
try if the use-case suits.


> Do you have some some performance numbers? Do you know if it scales
> with the number of cores?

30% gain when local cache is used. And these numbers scale up with the
number of cores on my test machine. It may be better for other use cases.

> If we can identify the conditions where this mempool handler
> overperforms the default handler, it would be valuable to have them
> in the documentation.
>

I could certainly add this to the docs, and mention the recommendation to
use local cache.

Regards,
Dave.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-05-23 12:55     ` Olivier Matz
  2016-06-15 10:10       ` Hunt, David
@ 2016-06-17 14:18       ` Hunt, David
  2016-06-20  8:17         ` Olivier Matz
  1 sibling, 1 reply; 48+ messages in thread
From: Hunt, David @ 2016-06-17 14:18 UTC (permalink / raw)
  To: Olivier Matz, dev

Hi Olivier,

On 23/5/2016 1:55 PM, Olivier Matz wrote:
> Hi David,
>
> Please find some comments below.
>
> On 05/19/2016 04:48 PM, David Hunt wrote:
>> [...]
>> +++ b/lib/librte_mempool/rte_mempool_stack.c
>> @@ -0,0 +1,145 @@
>> +/*-
>> + *   BSD LICENSE
>> + *
>> + *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
>> + *   All rights reserved.
> Should be 2016?

Yes, fixed.

>> ...
>> +
>> +static void *
>> +common_stack_alloc(struct rte_mempool *mp)
>> +{
>> +	struct rte_mempool_common_stack *s;
>> +	unsigned n = mp->size;
>> +	int size = sizeof(*s) + (n+16)*sizeof(void *);
>> +
>> +	/* Allocate our local memory structure */
>> +	s = rte_zmalloc_socket("common-stack",
> "mempool-stack" ?

Done

>> +			size,
>> +			RTE_CACHE_LINE_SIZE,
>> +			mp->socket_id);
>> +	if (s == NULL) {
>> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
>> +		return NULL;
>> +	}
>> +
>> +	rte_spinlock_init(&s->sl);
>> +
>> +	s->size = n;
>> +	mp->pool = s;
>> +	rte_mempool_set_handler(mp, "stack");
> rte_mempool_set_handler() is a user function, it should be called here

Removed.

>> +
>> +	return s;
>> +}
>> +
>> +static int common_stack_put(void *p, void * const *obj_table,
>> +		unsigned n)
>> +{
>> +	struct rte_mempool_common_stack *s = p;
>> +	void **cache_objs;
>> +	unsigned index;
>> +
>> +	rte_spinlock_lock(&s->sl);
>> +	cache_objs = &s->objs[s->len];
>> +
>> +	/* Is there sufficient space in the stack ? */
>> +	if ((s->len + n) > s->size) {
>> +		rte_spinlock_unlock(&s->sl);
>> +		return -ENOENT;
>> +	}
> The usual return value for a failing put() is ENOBUFS (see in rte_ring).

Done.

>
> After reading it, I realize that it's nearly exactly the same code than
> in "app/test: test external mempool handler".
> http://patchwork.dpdk.org/dev/patchwork/patch/12896/
>
> We should drop one of them. If this stack handler is really useful for
> a performance use-case, it could go in librte_mempool. At the first
> read, the code looks like a demo example : it uses a simple spinlock for
> concurrent accesses to the common pool. Maybe the mempool cache hides
> this cost, in this case we could also consider removing the use of the
> rte_ring.

While I agree that the code is similar, the handler in the test is a 
ring based handler,
where as this patch adds an array based handler.

I think that the case for leaving it in as a test for the standard 
handler as part of the
previous mempool handler is valid, but maybe there is a case for 
removing it if
we add the stack handler. Maybe a future patch?

> Do you have some some performance numbers? Do you know if it scales
> with the number of cores?

For the mempool_perf_autotest, I'm seeing a 30% increase in performance 
for the
local cache use-case for 1 - 36 cores (results vary within those tests 
between
10-45% gain, but with an average of 30% gain over all the tests.).

However, for the tests with no local cache configured, throughput of the 
enqueue/dequeue
drops by about 30%, with the 36 core yelding the largest drop of 40%. So 
this handler would
not be recommended in no-cache applications.

> If we can identify the conditions where this mempool handler
> overperforms the default handler, it would be valuable to have them
> in the documentation.
>


Regards,
Dave.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-06-17 14:18       ` Hunt, David
@ 2016-06-20  8:17         ` Olivier Matz
  2016-06-20 12:59           ` Hunt, David
  0 siblings, 1 reply; 48+ messages in thread
From: Olivier Matz @ 2016-06-20  8:17 UTC (permalink / raw)
  To: Hunt, David, dev

Hi David,

On 06/17/2016 04:18 PM, Hunt, David wrote:
>> After reading it, I realize that it's nearly exactly the same code than
>> in "app/test: test external mempool handler".
>> http://patchwork.dpdk.org/dev/patchwork/patch/12896/
>>
>> We should drop one of them. If this stack handler is really useful for
>> a performance use-case, it could go in librte_mempool. At the first
>> read, the code looks like a demo example : it uses a simple spinlock for
>> concurrent accesses to the common pool. Maybe the mempool cache hides
>> this cost, in this case we could also consider removing the use of the
>> rte_ring.
> 
> While I agree that the code is similar, the handler in the test is a
> ring based handler,
> where as this patch adds an array based handler.

Not sure I'm getting what you are saying. Do you mean stack instead
of array?

Actually, both are stacks when talking about bulks of objects. If we
consider each objects one by one, that's true the order will differ.
But as discussed in [1], the cache code already reverses the order of
objects when doing a mempool_get(). I'd say the reversing in cache code
is not really needed (only the order of object bulks should remain the
same). A rte_memcpy() looks to be faster, but it would require to do
some real-life tests to validate or unvalidate this theory.

So to conclude, I still think both code in app/test and lib/mempool are
quite similar, and only one of them should be kept.

[1] http://www.dpdk.org/ml/archives/dev/2016-May/039873.html

> I think that the case for leaving it in as a test for the standard
> handler as part of the
> previous mempool handler is valid, but maybe there is a case for
> removing it if
> we add the stack handler. Maybe a future patch?
> 
>> Do you have some some performance numbers? Do you know if it scales
>> with the number of cores?
> 
> For the mempool_perf_autotest, I'm seeing a 30% increase in performance
> for the
> local cache use-case for 1 - 36 cores (results vary within those tests
> between
> 10-45% gain, but with an average of 30% gain over all the tests.).
> 
> However, for the tests with no local cache configured, throughput of the
> enqueue/dequeue
> drops by about 30%, with the 36 core yelding the largest drop of 40%. So
> this handler would
> not be recommended in no-cache applications.

Interesting, thanks. If you also have real-life (I mean network)
performance tests, I'd be interested too.

Ideally, we should have a documentation explaining in which cases a
handler or another should be used. However, if we don't know this
today, I'm not opposed to add this new handler in 16.07, and let people
do their tests and comment, then describe it properly for 16.11.

What do you think?


Regards,
Olivier

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-06-20  8:17         ` Olivier Matz
@ 2016-06-20 12:59           ` Hunt, David
  2016-06-29 14:31             ` Olivier MATZ
  0 siblings, 1 reply; 48+ messages in thread
From: Hunt, David @ 2016-06-20 12:59 UTC (permalink / raw)
  To: Olivier Matz, dev

Hi Olivier,

On 20/6/2016 9:17 AM, Olivier Matz wrote:
> Hi David,
>
> On 06/17/2016 04:18 PM, Hunt, David wrote:
>>> After reading it, I realize that it's nearly exactly the same code than
>>> in "app/test: test external mempool handler".
>>> http://patchwork.dpdk.org/dev/patchwork/patch/12896/
>>>
>>> We should drop one of them. If this stack handler is really useful for
>>> a performance use-case, it could go in librte_mempool. At the first
>>> read, the code looks like a demo example : it uses a simple spinlock for
>>> concurrent accesses to the common pool. Maybe the mempool cache hides
>>> this cost, in this case we could also consider removing the use of the
>>> rte_ring.
>> While I agree that the code is similar, the handler in the test is a
>> ring based handler,
>> where as this patch adds an array based handler.
> Not sure I'm getting what you are saying. Do you mean stack instead
> of array?

Yes, apologies, stack.

> Actually, both are stacks when talking about bulks of objects. If we
> consider each objects one by one, that's true the order will differ.
> But as discussed in [1], the cache code already reverses the order of
> objects when doing a mempool_get(). I'd say the reversing in cache code
> is not really needed (only the order of object bulks should remain the
> same). A rte_memcpy() looks to be faster, but it would require to do
> some real-life tests to validate or unvalidate this theory.
>
> So to conclude, I still think both code in app/test and lib/mempool are
> quite similar, and only one of them should be kept.
>
> [1] http://www.dpdk.org/ml/archives/dev/2016-May/039873.html

OK, so we will probably remove the test app portion in the future is if 
is not needed,
and if we apply the stack handler proposed in this patch set.

>> I think that the case for leaving it in as a test for the standard
>> handler as part of the
>> previous mempool handler is valid, but maybe there is a case for
>> removing it if
>> we add the stack handler. Maybe a future patch?
>>
>>> Do you have some some performance numbers? Do you know if it scales
>>> with the number of cores?
>> For the mempool_perf_autotest, I'm seeing a 30% increase in performance
>> for the
>> local cache use-case for 1 - 36 cores (results vary within those tests
>> between
>> 10-45% gain, but with an average of 30% gain over all the tests.).
>>
>> However, for the tests with no local cache configured, throughput of the
>> enqueue/dequeue
>> drops by about 30%, with the 36 core yelding the largest drop of 40%. So
>> this handler would
>> not be recommended in no-cache applications.
> Interesting, thanks. If you also have real-life (I mean network)
> performance tests, I'd be interested too.

I'm afraid don't currently have any real-life performance tests.

> Ideally, we should have a documentation explaining in which cases a
> handler or another should be used. However, if we don't know this
> today, I'm not opposed to add this new handler in 16.07, and let people
> do their tests and comment, then describe it properly for 16.11.
>
> What do you think?

I agree. Add it in 16.07, and let people develop use cases for it, as 
well as possibly
coming up with new handlers for 16.11. There's talk of hardware based 
handlers, I
would also hope to see some of those contributed soon.

Regards,
David.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* mempool: add stack mempool handler
  2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
                     ` (3 preceding siblings ...)
  2016-05-19 15:16   ` v2 mempool: add stack (lifo) mempool handler Hunt, David
@ 2016-06-20 13:08   ` David Hunt
  2016-06-20 13:08     ` [PATCH v3 1/2] mempool: add stack (lifo) " David Hunt
                       ` (2 more replies)
  4 siblings, 3 replies; 48+ messages in thread
From: David Hunt @ 2016-06-20 13:08 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain

This patch set adds a lifo stack handler to the external mempool
manager.

This patch set depends on the 3 part external mempool handler
patch set (v15 of the series):
http://dpdk.org/ml/archives/dev/2016-June/042004.html

v3 changes:
  * Updated based on the latest version (v15) of the Mempool Handler feature

v2 changes:
  * updated based on mailing list feedback (Thanks Stephen)
  * checkpatch fixes.

David Hunt (2)
  mempool: add stack (lifo) mempool handler
  test: add autotest for external mempool stack handler

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:08   ` mempool: add stack " David Hunt
@ 2016-06-20 13:08     ` David Hunt
  2016-06-20 13:25       ` Jerin Jacob
  2016-06-20 13:08     ` [PATCH v3 2/2] test: add autotest for external mempool stack handler David Hunt
  2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-06-20 13:08 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a4c089e..057a6ab 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..a92da69
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("mempool-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return -ENOMEM;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool_data = s;
+
+	return 0;
+}
+
+static int stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOBUFS;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int stack_dequeue(struct rte_mempool *mp, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+
+	return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+	rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+	.name = "stack",
+	.alloc = stack_alloc,
+	.free = stack_free,
+	.enqueue = stack_enqueue,
+	.dequeue = stack_dequeue,
+	.get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v3 2/2] test: add autotest for external mempool stack handler
  2016-06-20 13:08   ` mempool: add stack " David Hunt
  2016-06-20 13:08     ` [PATCH v3 1/2] mempool: add stack (lifo) " David Hunt
@ 2016-06-20 13:08     ` David Hunt
  2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-06-20 13:08 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 31582d8..6cb2b14 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -573,6 +573,7 @@ test_mempool(void)
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
 	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -622,6 +623,27 @@ test_mempool(void)
 	}
 	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
 
+	/* create a mempool with an external handler */
+	mp_stack = rte_mempool_create_empty("test_stack",
+		MEMPOOL_SIZE,
+		MEMPOOL_ELT_SIZE,
+		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+		SOCKET_ID_ANY, 0);
+
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
+		goto err;
+	}
+	if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+		printf("cannot set stack handler\n");
+		goto err;
+	}
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
+		goto err;
+	}
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
+
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
 		printf("Cannot lookup mempool from its name\n");
@@ -655,6 +677,10 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:08     ` [PATCH v3 1/2] mempool: add stack (lifo) " David Hunt
@ 2016-06-20 13:25       ` Jerin Jacob
  2016-06-20 13:54         ` Thomas Monjalon
  0 siblings, 1 reply; 48+ messages in thread
From: Jerin Jacob @ 2016-06-20 13:25 UTC (permalink / raw)
  To: David Hunt; +Cc: dev, olivier.matz, viktorin, shreyansh.jain

On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> This is a mempool handler that is useful for pipelining apps, where
> the mempool cache doesn't really work - example, where we have one
> core doing rx (and alloc), and another core doing Tx (and return). In
> such a case, the mempool ring simply cycles through all the mbufs,
> resulting in a LLC miss on every mbuf allocated when the number of
> mbufs is large. A stack recycles buffers more effectively in this
> case.
> 
> Signed-off-by: David Hunt <david.hunt@intel.com>
> ---
>  lib/librte_mempool/Makefile            |   1 +
>  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++

How about moving new mempool handlers to drivers/mempool? (or similar).
In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.

Jerin

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:25       ` Jerin Jacob
@ 2016-06-20 13:54         ` Thomas Monjalon
  2016-06-20 13:58           ` Ananyev, Konstantin
  2016-06-21  3:42           ` Jerin Jacob
  0 siblings, 2 replies; 48+ messages in thread
From: Thomas Monjalon @ 2016-06-20 13:54 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: dev, David Hunt, olivier.matz, viktorin, shreyansh.jain

2016-06-20 18:55, Jerin Jacob:
> On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > This is a mempool handler that is useful for pipelining apps, where
> > the mempool cache doesn't really work - example, where we have one
> > core doing rx (and alloc), and another core doing Tx (and return). In
> > such a case, the mempool ring simply cycles through all the mbufs,
> > resulting in a LLC miss on every mbuf allocated when the number of
> > mbufs is large. A stack recycles buffers more effectively in this
> > case.
> > 
> > Signed-off-by: David Hunt <david.hunt@intel.com>
> > ---
> >  lib/librte_mempool/Makefile            |   1 +
> >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> 
> How about moving new mempool handlers to drivers/mempool? (or similar).
> In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.

You're probably right.
However we need to check and understand what a HW mempool handler will be.
I imagine the first of them will have to move handlers in drivers/
Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:54         ` Thomas Monjalon
@ 2016-06-20 13:58           ` Ananyev, Konstantin
  2016-06-20 14:22             ` Jerin Jacob
  2016-06-21  3:42           ` Jerin Jacob
  1 sibling, 1 reply; 48+ messages in thread
From: Ananyev, Konstantin @ 2016-06-20 13:58 UTC (permalink / raw)
  To: Thomas Monjalon, Jerin Jacob
  Cc: dev, Hunt, David, olivier.matz, viktorin, shreyansh.jain



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> Sent: Monday, June 20, 2016 2:54 PM
> To: Jerin Jacob
> Cc: dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> 
> 2016-06-20 18:55, Jerin Jacob:
> > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > This is a mempool handler that is useful for pipelining apps, where
> > > the mempool cache doesn't really work - example, where we have one
> > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > such a case, the mempool ring simply cycles through all the mbufs,
> > > resulting in a LLC miss on every mbuf allocated when the number of
> > > mbufs is large. A stack recycles buffers more effectively in this
> > > case.
> > >
> > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > ---
> > >  lib/librte_mempool/Makefile            |   1 +
> > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> >
> > How about moving new mempool handlers to drivers/mempool? (or similar).
> > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> 
> You're probably right.
> However we need to check and understand what a HW mempool handler will be.
> I imagine the first of them will have to move handlers in drivers/

Does it mean it we'll have to move mbuf into drivers too?
Again other libs do use mempool too.
Why not just lib/librte_mempool/arch/<arch_specific_dir_here>
?
Konstantin


> Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:58           ` Ananyev, Konstantin
@ 2016-06-20 14:22             ` Jerin Jacob
  2016-06-20 17:56               ` Ananyev, Konstantin
  0 siblings, 1 reply; 48+ messages in thread
From: Jerin Jacob @ 2016-06-20 14:22 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Thomas Monjalon, dev, Hunt, David, olivier.matz, viktorin,
	shreyansh.jain

On Mon, Jun 20, 2016 at 01:58:04PM +0000, Ananyev, Konstantin wrote:
> 
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> > Sent: Monday, June 20, 2016 2:54 PM
> > To: Jerin Jacob
> > Cc: dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > 
> > 2016-06-20 18:55, Jerin Jacob:
> > > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > > This is a mempool handler that is useful for pipelining apps, where
> > > > the mempool cache doesn't really work - example, where we have one
> > > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > > such a case, the mempool ring simply cycles through all the mbufs,
> > > > resulting in a LLC miss on every mbuf allocated when the number of
> > > > mbufs is large. A stack recycles buffers more effectively in this
> > > > case.
> > > >
> > > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > > ---
> > > >  lib/librte_mempool/Makefile            |   1 +
> > > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> > >
> > > How about moving new mempool handlers to drivers/mempool? (or similar).
> > > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> > 
> > You're probably right.
> > However we need to check and understand what a HW mempool handler will be.
> > I imagine the first of them will have to move handlers in drivers/
> 
> Does it mean it we'll have to move mbuf into drivers too?
> Again other libs do use mempool too.
> Why not just lib/librte_mempool/arch/<arch_specific_dir_here>
> ?

I was proposing only to move only the new
handler(lib/librte_mempool/rte_mempool_stack.c). Not any library or any
other common code.

Just like DPDK crypto device, Even if it is software implementation its
better to move in driver/crypto instead of lib/librte_cryptodev

"lib/librte_mempool/arch/" is not correct place as it is platform specific
not architecture specific and HW mempool device may be PCIe or platform
device.

> Konstantin
> 
> 
> > Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 14:22             ` Jerin Jacob
@ 2016-06-20 17:56               ` Ananyev, Konstantin
  2016-06-21  3:35                 ` Jerin Jacob
  0 siblings, 1 reply; 48+ messages in thread
From: Ananyev, Konstantin @ 2016-06-20 17:56 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, dev, Hunt, David, olivier.matz, viktorin,
	shreyansh.jain



> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Monday, June 20, 2016 3:22 PM
> To: Ananyev, Konstantin
> Cc: Thomas Monjalon; dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> 
> On Mon, Jun 20, 2016 at 01:58:04PM +0000, Ananyev, Konstantin wrote:
> >
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> > > Sent: Monday, June 20, 2016 2:54 PM
> > > To: Jerin Jacob
> > > Cc: dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > >
> > > 2016-06-20 18:55, Jerin Jacob:
> > > > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > > > This is a mempool handler that is useful for pipelining apps, where
> > > > > the mempool cache doesn't really work - example, where we have one
> > > > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > > > such a case, the mempool ring simply cycles through all the mbufs,
> > > > > resulting in a LLC miss on every mbuf allocated when the number of
> > > > > mbufs is large. A stack recycles buffers more effectively in this
> > > > > case.
> > > > >
> > > > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > > > ---
> > > > >  lib/librte_mempool/Makefile            |   1 +
> > > > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> > > >
> > > > How about moving new mempool handlers to drivers/mempool? (or similar).
> > > > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> > >
> > > You're probably right.
> > > However we need to check and understand what a HW mempool handler will be.
> > > I imagine the first of them will have to move handlers in drivers/
> >
> > Does it mean it we'll have to move mbuf into drivers too?
> > Again other libs do use mempool too.
> > Why not just lib/librte_mempool/arch/<arch_specific_dir_here>
> > ?
> 
> I was proposing only to move only the new
> handler(lib/librte_mempool/rte_mempool_stack.c). Not any library or any
> other common code.
> 
> Just like DPDK crypto device, Even if it is software implementation its
> better to move in driver/crypto instead of lib/librte_cryptodev
> 
> "lib/librte_mempool/arch/" is not correct place as it is platform specific
> not architecture specific and HW mempool device may be PCIe or platform
> device.

Ok, but why rte_mempool_stack.c has to be moved?
I can hardly imagine it is a 'platform sepcific'.
>From my understanding it is a generic code.
Konstantin


> 
> > Konstantin
> >
> >
> > > Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 17:56               ` Ananyev, Konstantin
@ 2016-06-21  3:35                 ` Jerin Jacob
  2016-06-21  9:28                   ` Ananyev, Konstantin
  0 siblings, 1 reply; 48+ messages in thread
From: Jerin Jacob @ 2016-06-21  3:35 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Thomas Monjalon, dev, Hunt, David, olivier.matz, viktorin,
	shreyansh.jain

On Mon, Jun 20, 2016 at 05:56:40PM +0000, Ananyev, Konstantin wrote:
> 
> 
> > -----Original Message-----
> > From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> > Sent: Monday, June 20, 2016 3:22 PM
> > To: Ananyev, Konstantin
> > Cc: Thomas Monjalon; dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > 
> > On Mon, Jun 20, 2016 at 01:58:04PM +0000, Ananyev, Konstantin wrote:
> > >
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> > > > Sent: Monday, June 20, 2016 2:54 PM
> > > > To: Jerin Jacob
> > > > Cc: dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > > > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > > >
> > > > 2016-06-20 18:55, Jerin Jacob:
> > > > > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > > > > This is a mempool handler that is useful for pipelining apps, where
> > > > > > the mempool cache doesn't really work - example, where we have one
> > > > > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > > > > such a case, the mempool ring simply cycles through all the mbufs,
> > > > > > resulting in a LLC miss on every mbuf allocated when the number of
> > > > > > mbufs is large. A stack recycles buffers more effectively in this
> > > > > > case.
> > > > > >
> > > > > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > > > > ---
> > > > > >  lib/librte_mempool/Makefile            |   1 +
> > > > > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> > > > >
> > > > > How about moving new mempool handlers to drivers/mempool? (or similar).
> > > > > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> > > >
> > > > You're probably right.
> > > > However we need to check and understand what a HW mempool handler will be.
> > > > I imagine the first of them will have to move handlers in drivers/
> > >
> > > Does it mean it we'll have to move mbuf into drivers too?
> > > Again other libs do use mempool too.
> > > Why not just lib/librte_mempool/arch/<arch_specific_dir_here>
> > > ?
> > 
> > I was proposing only to move only the new
> > handler(lib/librte_mempool/rte_mempool_stack.c). Not any library or any
> > other common code.
> > 
> > Just like DPDK crypto device, Even if it is software implementation its
> > better to move in driver/crypto instead of lib/librte_cryptodev
> > 
> > "lib/librte_mempool/arch/" is not correct place as it is platform specific
> > not architecture specific and HW mempool device may be PCIe or platform
> > device.
> 
> Ok, but why rte_mempool_stack.c has to be moved?

Just thought of having all the mempool handlers at one place.
We can't move all HW mempool handlers at lib/librte_mempool/

Jerin

> I can hardly imagine it is a 'platform sepcific'.
> From my understanding it is a generic code.
> Konstantin
> 
> 
> > 
> > > Konstantin
> > >
> > >
> > > > Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-20 13:54         ` Thomas Monjalon
  2016-06-20 13:58           ` Ananyev, Konstantin
@ 2016-06-21  3:42           ` Jerin Jacob
  1 sibling, 0 replies; 48+ messages in thread
From: Jerin Jacob @ 2016-06-21  3:42 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, David Hunt, olivier.matz, viktorin, shreyansh.jain

On Mon, Jun 20, 2016 at 03:54:20PM +0200, Thomas Monjalon wrote:
> 2016-06-20 18:55, Jerin Jacob:
> > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > This is a mempool handler that is useful for pipelining apps, where
> > > the mempool cache doesn't really work - example, where we have one
> > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > such a case, the mempool ring simply cycles through all the mbufs,
> > > resulting in a LLC miss on every mbuf allocated when the number of
> > > mbufs is large. A stack recycles buffers more effectively in this
> > > case.
> > > 
> > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > ---
> > >  lib/librte_mempool/Makefile            |   1 +
> > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> > 
> > How about moving new mempool handlers to drivers/mempool? (or similar).
> > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> 
> You're probably right.
> However we need to check and understand what a HW mempool handler will be.
> I imagine the first of them will have to move handlers in drivers/
> Jerin, are you volunteer?

Thomas,

We are planning to upstream a HW based mempool handler.
Not sure about the timelines. We will take up this as part of a HW
based mempool upstreaming if no one takes it before that.

Jerin

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-21  3:35                 ` Jerin Jacob
@ 2016-06-21  9:28                   ` Ananyev, Konstantin
  2016-06-21  9:44                     ` Olivier Matz
  0 siblings, 1 reply; 48+ messages in thread
From: Ananyev, Konstantin @ 2016-06-21  9:28 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, dev, Hunt, David, olivier.matz, viktorin,
	shreyansh.jain



> -----Original Message-----
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Tuesday, June 21, 2016 4:35 AM
> To: Ananyev, Konstantin
> Cc: Thomas Monjalon; dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> 
> On Mon, Jun 20, 2016 at 05:56:40PM +0000, Ananyev, Konstantin wrote:
> >
> >
> > > -----Original Message-----
> > > From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> > > Sent: Monday, June 20, 2016 3:22 PM
> > > To: Ananyev, Konstantin
> > > Cc: Thomas Monjalon; dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > >
> > > On Mon, Jun 20, 2016 at 01:58:04PM +0000, Ananyev, Konstantin wrote:
> > > >
> > > >
> > > > > -----Original Message-----
> > > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Thomas Monjalon
> > > > > Sent: Monday, June 20, 2016 2:54 PM
> > > > > To: Jerin Jacob
> > > > > Cc: dev@dpdk.org; Hunt, David; olivier.matz@6wind.com; viktorin@rehivetech.com; shreyansh.jain@nxp.com
> > > > > Subject: Re: [dpdk-dev] [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
> > > > >
> > > > > 2016-06-20 18:55, Jerin Jacob:
> > > > > > On Mon, Jun 20, 2016 at 02:08:10PM +0100, David Hunt wrote:
> > > > > > > This is a mempool handler that is useful for pipelining apps, where
> > > > > > > the mempool cache doesn't really work - example, where we have one
> > > > > > > core doing rx (and alloc), and another core doing Tx (and return). In
> > > > > > > such a case, the mempool ring simply cycles through all the mbufs,
> > > > > > > resulting in a LLC miss on every mbuf allocated when the number of
> > > > > > > mbufs is large. A stack recycles buffers more effectively in this
> > > > > > > case.
> > > > > > >
> > > > > > > Signed-off-by: David Hunt <david.hunt@intel.com>
> > > > > > > ---
> > > > > > >  lib/librte_mempool/Makefile            |   1 +
> > > > > > >  lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
> > > > > >
> > > > > > How about moving new mempool handlers to drivers/mempool? (or similar).
> > > > > > In future, adding HW specific handlers in lib/librte_mempool/ may be bad idea.
> > > > >
> > > > > You're probably right.
> > > > > However we need to check and understand what a HW mempool handler will be.
> > > > > I imagine the first of them will have to move handlers in drivers/
> > > >
> > > > Does it mean it we'll have to move mbuf into drivers too?
> > > > Again other libs do use mempool too.
> > > > Why not just lib/librte_mempool/arch/<arch_specific_dir_here>
> > > > ?
> > >
> > > I was proposing only to move only the new
> > > handler(lib/librte_mempool/rte_mempool_stack.c). Not any library or any
> > > other common code.
> > >
> > > Just like DPDK crypto device, Even if it is software implementation its
> > > better to move in driver/crypto instead of lib/librte_cryptodev
> > >
> > > "lib/librte_mempool/arch/" is not correct place as it is platform specific
> > > not architecture specific and HW mempool device may be PCIe or platform
> > > device.
> >
> > Ok, but why rte_mempool_stack.c has to be moved?
> 
> Just thought of having all the mempool handlers at one place.
> We can't move all HW mempool handlers at lib/librte_mempool/

Yep, but from your previous mail I thought we might have specific ones
for specific devices, no? 
If so, why to put them in one place, why just not in:
Drivers/xxx_dev/xxx_mempool.[h,c]
?
And keep generic ones in lib/librte_mempool
?
Konstantin

> 
> Jerin
> 
> > I can hardly imagine it is a 'platform sepcific'.
> > From my understanding it is a generic code.
> > Konstantin
> >
> >
> > >
> > > > Konstantin
> > > >
> > > >
> > > > > Jerin, are you volunteer?

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v3 1/2] mempool: add stack (lifo) mempool handler
  2016-06-21  9:28                   ` Ananyev, Konstantin
@ 2016-06-21  9:44                     ` Olivier Matz
  0 siblings, 0 replies; 48+ messages in thread
From: Olivier Matz @ 2016-06-21  9:44 UTC (permalink / raw)
  To: Ananyev, Konstantin, Jerin Jacob
  Cc: Thomas Monjalon, dev, Hunt, David, viktorin, shreyansh.jain

Hi,

On 06/21/2016 11:28 AM, Ananyev, Konstantin wrote:
>>>> I was proposing only to move only the new
>>>> handler(lib/librte_mempool/rte_mempool_stack.c). Not any library or any
>>>> other common code.
>>>>
>>>> Just like DPDK crypto device, Even if it is software implementation its
>>>> better to move in driver/crypto instead of lib/librte_cryptodev
>>>>
>>>> "lib/librte_mempool/arch/" is not correct place as it is platform specific
>>>> not architecture specific and HW mempool device may be PCIe or platform
>>>> device.
>>>
>>> Ok, but why rte_mempool_stack.c has to be moved?
>>
>> Just thought of having all the mempool handlers at one place.
>> We can't move all HW mempool handlers at lib/librte_mempool/
> 
> Yep, but from your previous mail I thought we might have specific ones
> for specific devices, no? 
> If so, why to put them in one place, why just not in:
> Drivers/xxx_dev/xxx_mempool.[h,c]
> ?
> And keep generic ones in lib/librte_mempool
> ?

I think all drivers (generic or not) should be at the same place for
consistency.

I'm not sure having them lib/librte_mempool is really a problem today,
but once hardware-dependent handler are pushed, we may move all of them
in drivers/mempool because I think we should avoid to have hw-specific
code in lib/.

I don't think it will cause ABI/API breakage since the user always
talk to the generic mempool API.

Regards
Olivier

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v2 1/3] mempool: add stack (lifo) mempool handler
  2016-06-20 12:59           ` Hunt, David
@ 2016-06-29 14:31             ` Olivier MATZ
  0 siblings, 0 replies; 48+ messages in thread
From: Olivier MATZ @ 2016-06-29 14:31 UTC (permalink / raw)
  To: Hunt, David, dev

Hi Dave,

On 06/20/2016 02:59 PM, Hunt, David wrote:
> Hi Olivier,
>
> On 20/6/2016 9:17 AM, Olivier Matz wrote:
>> Hi David,
>>
>> On 06/17/2016 04:18 PM, Hunt, David wrote:
>>>> After reading it, I realize that it's nearly exactly the same code than
>>>> in "app/test: test external mempool handler".
>>>> http://patchwork.dpdk.org/dev/patchwork/patch/12896/
>>>>
>>>> We should drop one of them. If this stack handler is really useful for
>>>> a performance use-case, it could go in librte_mempool. At the first
>>>> read, the code looks like a demo example : it uses a simple spinlock
>>>> for
>>>> concurrent accesses to the common pool. Maybe the mempool cache hides
>>>> this cost, in this case we could also consider removing the use of the
>>>> rte_ring.
>>> While I agree that the code is similar, the handler in the test is a
>>> ring based handler,
>>> where as this patch adds an array based handler.
>> Not sure I'm getting what you are saying. Do you mean stack instead
>> of array?
>
> Yes, apologies, stack.
>
>> Actually, both are stacks when talking about bulks of objects. If we
>> consider each objects one by one, that's true the order will differ.
>> But as discussed in [1], the cache code already reverses the order of
>> objects when doing a mempool_get(). I'd say the reversing in cache code
>> is not really needed (only the order of object bulks should remain the
>> same). A rte_memcpy() looks to be faster, but it would require to do
>> some real-life tests to validate or unvalidate this theory.
>>
>> So to conclude, I still think both code in app/test and lib/mempool are
>> quite similar, and only one of them should be kept.
>>
>> [1] http://www.dpdk.org/ml/archives/dev/2016-May/039873.html
>
> OK, so we will probably remove the test app portion in the future is if
> is not needed,
> and if we apply the stack handler proposed in this patch set.

Back on this thread. Maybe I misunderstood what you were saying
here (because I see this comment is not addressed in v3).

I don't think we should add similar code at two different places
and then remove it later in another patchset. I feel it's better to
have only one instance of the stack handler, either in test, or
in librte_mempool.

If you plan to do a v4, I think this is something that could go in 
16.07-rc2.

Regards,
Olivier

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v4 0/2] mempool: add stack mempool handler
  2016-06-20 13:08   ` mempool: add stack " David Hunt
  2016-06-20 13:08     ` [PATCH v3 1/2] mempool: add stack (lifo) " David Hunt
  2016-06-20 13:08     ` [PATCH v3 2/2] test: add autotest for external mempool stack handler David Hunt
@ 2016-06-30  7:41     ` David Hunt
  2016-06-30  7:41       ` [PATCH v4 1/2] mempool: add stack (lifo) " David Hunt
                         ` (2 more replies)
  2 siblings, 3 replies; 48+ messages in thread
From: David Hunt @ 2016-06-30  7:41 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain

This patch set adds a lifo stack handler to the external mempool
manager.

This patch utilises the mempool handler feature which allows the addition
of new mempool handlers to DPDK.

Performance Notes:
For the mempool_perf_autotest, there's ain average of 30% increase in
performance for the local cache use-case for 1 - 36 cores (results vary
within those tests between 10-45% gain, but with an average of 30% gain
over all the tests.).

However, for the tests with no local cache configured, throughput of the
enqueue/dequeue drops by about 30%, with the 36 core yelding the largest
drop of 40%. So this handler would not be recommended in no-cache
applications.

v4 changes:
  * Update the test to replace the custom handler test with the
    stack handler test rather than just adding a stack handler test.
    The custom handler code is very similar to the stack handler, so
    there's no need to have both.

v3 changes:
  * Updated based on the latest version (v15) of the Mempool Handler feature

v2 changes:
  * updated based on mailing list feedback (Thanks Stephen)
  * checkpatch fixes.

David Hunt (2)
  mempool: add stack (lifo) mempool handler
  test: migrate custom handler test to stack handler

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v4 1/2] mempool: add stack (lifo) mempool handler
  2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
@ 2016-06-30  7:41       ` David Hunt
  2016-06-30  7:41       ` [PATCH v4 2/2] test: migrate custom handler test to stack handler David Hunt
  2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-06-30  7:41 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a4c089e..057a6ab 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..a92da69
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("mempool-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return -ENOMEM;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool_data = s;
+
+	return 0;
+}
+
+static int stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOBUFS;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int stack_dequeue(struct rte_mempool *mp, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+
+	return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+	rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+	.name = "stack",
+	.alloc = stack_alloc,
+	.free = stack_free,
+	.enqueue = stack_enqueue,
+	.dequeue = stack_dequeue,
+	.get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v4 2/2] test: migrate custom handler test to stack handler
  2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
  2016-06-30  7:41       ` [PATCH v4 1/2] mempool: add stack (lifo) " David Hunt
@ 2016-06-30  7:41       ` David Hunt
  2016-06-30  9:45         ` Thomas Monjalon
  2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-06-30  7:41 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

After introducing the stack handler in the previous commit,
we now have very similar code to the custom handler in test_mempool.c,
which creates a custom mempool based on simple mallocs.
The stack handler is a cleaner example of adding a new mempool handler,
so this commit replaces the custom handler test with a stack
handler test, and removes the custom handler code.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 114 ++++++------------------------------------------
 1 file changed, 13 insertions(+), 101 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 31582d8..19b1eb9 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -83,99 +83,6 @@
 static rte_atomic32_t synchro;
 
 /*
- * Simple example of custom mempool structure. Holds pointers to all the
- * elements which are simply malloc'd in this example.
- */
-struct custom_mempool {
-	rte_spinlock_t lock;
-	unsigned count;
-	unsigned size;
-	void *elts[];
-};
-
-/*
- * Loop through all the element pointers and allocate a chunk of memory, then
- * insert that memory into the ring.
- */
-static int
-custom_mempool_alloc(struct rte_mempool *mp)
-{
-	struct custom_mempool *cm;
-
-	cm = rte_zmalloc("custom_mempool",
-		sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0);
-	if (cm == NULL)
-		return -ENOMEM;
-
-	rte_spinlock_init(&cm->lock);
-	cm->count = 0;
-	cm->size = mp->size;
-	mp->pool_data = cm;
-	return 0;
-}
-
-static void
-custom_mempool_free(struct rte_mempool *mp)
-{
-	rte_free((void *)(mp->pool_data));
-}
-
-static int
-custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table,
-		unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (cm->count + n > cm->size) {
-		ret = -ENOBUFS;
-	} else {
-		memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n);
-		cm->count += n;
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-
-static int
-custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (n > cm->count) {
-		ret = -ENOENT;
-	} else {
-		cm->count -= n;
-		memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n);
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-static unsigned
-custom_mempool_get_count(const struct rte_mempool *mp)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-
-	return cm->count;
-}
-
-static struct rte_mempool_ops mempool_ops_custom = {
-	.name = "custom_handler",
-	.alloc = custom_mempool_alloc,
-	.free = custom_mempool_free,
-	.enqueue = custom_mempool_enqueue,
-	.dequeue = custom_mempool_dequeue,
-	.get_count = custom_mempool_get_count,
-};
-
-MEMPOOL_REGISTER_OPS(mempool_ops_custom);
-
-/*
  * save the object number in the first 4 bytes of object data. All
  * other bytes are set to 0.
  */
@@ -573,6 +480,7 @@ test_mempool(void)
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
 	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -602,25 +510,25 @@ test_mempool(void)
 	}
 
 	/* create a mempool with an external handler */
-	mp_ext = rte_mempool_create_empty("test_ext",
+	mp_stack = rte_mempool_create_empty("test_stack",
 		MEMPOOL_SIZE,
 		MEMPOOL_ELT_SIZE,
 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
 		SOCKET_ID_ANY, 0);
 
-	if (mp_ext == NULL) {
-		printf("cannot allocate mp_ext mempool\n");
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
 		goto err;
 	}
-	if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) {
-		printf("cannot set custom handler\n");
+	if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+		printf("cannot set stack handler\n");
 		goto err;
 	}
-	if (rte_mempool_populate_default(mp_ext) < 0) {
-		printf("cannot populate mp_ext mempool\n");
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
 		goto err;
 	}
-	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
 
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
@@ -655,6 +563,10 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [PATCH v4 2/2] test: migrate custom handler test to stack handler
  2016-06-30  7:41       ` [PATCH v4 2/2] test: migrate custom handler test to stack handler David Hunt
@ 2016-06-30  9:45         ` Thomas Monjalon
  2016-06-30 17:36           ` Hunt, David
  0 siblings, 1 reply; 48+ messages in thread
From: Thomas Monjalon @ 2016-06-30  9:45 UTC (permalink / raw)
  To: David Hunt; +Cc: dev, olivier.matz, viktorin, jerin.jacob, shreyansh.jain

Hi David,

There is a compilation error:

app/test/test_mempool.c:598:6: error:
too few arguments to function ‘test_mempool_basic’

Please send a v5, thanks.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v4 2/2] test: migrate custom handler test to stack handler
  2016-06-30  9:45         ` Thomas Monjalon
@ 2016-06-30 17:36           ` Hunt, David
  2016-06-30 17:46             ` Thomas Monjalon
  0 siblings, 1 reply; 48+ messages in thread
From: Hunt, David @ 2016-06-30 17:36 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, olivier.matz, viktorin, jerin.jacob, shreyansh.jain



On 30/6/2016 10:45 AM, Thomas Monjalon wrote:
> Hi David,
>
> There is a compilation error:
>
> app/test/test_mempool.c:598:6: error:
> too few arguments to function ‘test_mempool_basic’
>
> Please send a v5, thanks.

Thomas, after the second commit patch-set is applied, there should only 
be 585 lines in test_mempool.c

I cannot reproduce the issue you are seeing. I have pulled down the 2 
patches from patchwork and it compiles and runs the autotest fine here. 
Could you please check on your side?

Regards,
Dave.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v4 2/2] test: migrate custom handler test to stack handler
  2016-06-30 17:36           ` Hunt, David
@ 2016-06-30 17:46             ` Thomas Monjalon
  2016-06-30 17:49               ` Hunt, David
  0 siblings, 1 reply; 48+ messages in thread
From: Thomas Monjalon @ 2016-06-30 17:46 UTC (permalink / raw)
  To: Hunt, David; +Cc: dev, olivier.matz, viktorin, jerin.jacob, shreyansh.jain

2016-06-30 18:36, Hunt, David:
> On 30/6/2016 10:45 AM, Thomas Monjalon wrote:
> > Hi David,
> >
> > There is a compilation error:
> >
> > app/test/test_mempool.c:598:6: error:
> > too few arguments to function ‘test_mempool_basic’
> >
> > Please send a v5, thanks.
> 
> Thomas, after the second commit patch-set is applied, there should only 
> be 585 lines in test_mempool.c

I have 616 lines.

> I cannot reproduce the issue you are seeing. I have pulled down the 2 
> patches from patchwork and it compiles and runs the autotest fine here. 
> Could you please check on your side?

It is because of the patch from Lazaros.
Please rebase from master.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v4 2/2] test: migrate custom handler test to stack handler
  2016-06-30 17:46             ` Thomas Monjalon
@ 2016-06-30 17:49               ` Hunt, David
  0 siblings, 0 replies; 48+ messages in thread
From: Hunt, David @ 2016-06-30 17:49 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, olivier.matz, viktorin, jerin.jacob, shreyansh.jain



On 30/6/2016 6:46 PM, Thomas Monjalon wrote:
> 2016-06-30 18:36, Hunt, David:
>> On 30/6/2016 10:45 AM, Thomas Monjalon wrote:
>>> Hi David,
>>>
>>> There is a compilation error:
>>>
>>> app/test/test_mempool.c:598:6: error:
>>> too few arguments to function ‘test_mempool_basic’
>>>
>>> Please send a v5, thanks.
>> Thomas, after the second commit patch-set is applied, there should only
>> be 585 lines in test_mempool.c
> I have 616 lines.
>
>> I cannot reproduce the issue you are seeing. I have pulled down the 2
>> patches from patchwork and it compiles and runs the autotest fine here.
>> Could you please check on your side?
> It is because of the patch from Lazaros.
> Please rebase from master.

OK, will do.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v5 0/2] mempool: add stack mempool handler
  2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
  2016-06-30  7:41       ` [PATCH v4 1/2] mempool: add stack (lifo) " David Hunt
  2016-06-30  7:41       ` [PATCH v4 2/2] test: migrate custom handler test to stack handler David Hunt
@ 2016-06-30 18:05       ` David Hunt
  2016-06-30 18:05         ` [PATCH v5 1/2] mempool: add stack (lifo) " David Hunt
                           ` (2 more replies)
  2 siblings, 3 replies; 48+ messages in thread
From: David Hunt @ 2016-06-30 18:05 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain

This patch set adds a lifo stack handler to the external mempool
manager.

This patch utilises the mempool handler feature which allows the addition
of new mempool handlers to DPDK.

v5 changes:
  * Added the extra parameter requred for the test_mempool_basic()
    function, which has changed due to the user defined cache
    functionality.

v4 changes:
  * Update the test to replace the custom handler test with the
    stack handler test rather than just adding a stack handler test.
    The custom handler code is very similar to the stack handler, so
    there's no need to have both.

v3 changes:
  * Updated based on the latest version (v15) of the Mempool Handler feature

v2 changes:
  * updated based on mailing list feedback (Thanks Stephen)
  * checkpatch fixes.

David Hunt (2)
  mempool: add stack (lifo) mempool handler
  test: migrate custom handler test to stack handler

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v5 1/2] mempool: add stack (lifo) mempool handler
  2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
@ 2016-06-30 18:05         ` David Hunt
  2016-06-30 18:05         ` [PATCH v5 2/2] test: migrate custom handler test to stack handler David Hunt
  2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-06-30 18:05 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a4c089e..057a6ab 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..a92da69
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("mempool-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return -ENOMEM;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool_data = s;
+
+	return 0;
+}
+
+static int stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOBUFS;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int stack_dequeue(struct rte_mempool *mp, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+
+	return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+	rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+	.name = "stack",
+	.alloc = stack_alloc,
+	.free = stack_free,
+	.enqueue = stack_enqueue,
+	.dequeue = stack_dequeue,
+	.get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v5 2/2] test: migrate custom handler test to stack handler
  2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
  2016-06-30 18:05         ` [PATCH v5 1/2] mempool: add stack (lifo) " David Hunt
@ 2016-06-30 18:05         ` David Hunt
  2016-07-01  7:32           ` Olivier MATZ
  2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
  2 siblings, 1 reply; 48+ messages in thread
From: David Hunt @ 2016-06-30 18:05 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

After introducing the stack handler in the previous commit,
we now have very similar code to the custom handler in test_mempool.c,
which creates a custom mempool based on simple mallocs.
The stack handler is a cleaner example of adding a new mempool handler,
so this commit replaces the custom handler test with a stack
handler test, and removes the custom handler code.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 114 ++++++------------------------------------------
 1 file changed, 13 insertions(+), 101 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 63c61f3..b751220 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -89,99 +89,6 @@
 static rte_atomic32_t synchro;
 
 /*
- * Simple example of custom mempool structure. Holds pointers to all the
- * elements which are simply malloc'd in this example.
- */
-struct custom_mempool {
-	rte_spinlock_t lock;
-	unsigned count;
-	unsigned size;
-	void *elts[];
-};
-
-/*
- * Loop through all the element pointers and allocate a chunk of memory, then
- * insert that memory into the ring.
- */
-static int
-custom_mempool_alloc(struct rte_mempool *mp)
-{
-	struct custom_mempool *cm;
-
-	cm = rte_zmalloc("custom_mempool",
-		sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0);
-	if (cm == NULL)
-		return -ENOMEM;
-
-	rte_spinlock_init(&cm->lock);
-	cm->count = 0;
-	cm->size = mp->size;
-	mp->pool_data = cm;
-	return 0;
-}
-
-static void
-custom_mempool_free(struct rte_mempool *mp)
-{
-	rte_free((void *)(mp->pool_data));
-}
-
-static int
-custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table,
-		unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (cm->count + n > cm->size) {
-		ret = -ENOBUFS;
-	} else {
-		memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n);
-		cm->count += n;
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-
-static int
-custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (n > cm->count) {
-		ret = -ENOENT;
-	} else {
-		cm->count -= n;
-		memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n);
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-static unsigned
-custom_mempool_get_count(const struct rte_mempool *mp)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-
-	return cm->count;
-}
-
-static struct rte_mempool_ops mempool_ops_custom = {
-	.name = "custom_handler",
-	.alloc = custom_mempool_alloc,
-	.free = custom_mempool_free,
-	.enqueue = custom_mempool_enqueue,
-	.dequeue = custom_mempool_dequeue,
-	.get_count = custom_mempool_get_count,
-};
-
-MEMPOOL_REGISTER_OPS(mempool_ops_custom);
-
-/*
  * save the object number in the first 4 bytes of object data. All
  * other bytes are set to 0.
  */
@@ -600,6 +507,7 @@ test_mempool(void)
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
 	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -629,25 +537,25 @@ test_mempool(void)
 	}
 
 	/* create a mempool with an external handler */
-	mp_ext = rte_mempool_create_empty("test_ext",
+	mp_stack = rte_mempool_create_empty("test_stack",
 		MEMPOOL_SIZE,
 		MEMPOOL_ELT_SIZE,
 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
 		SOCKET_ID_ANY, 0);
 
-	if (mp_ext == NULL) {
-		printf("cannot allocate mp_ext mempool\n");
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
 		goto err;
 	}
-	if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) {
-		printf("cannot set custom handler\n");
+	if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+		printf("cannot set stack handler\n");
 		goto err;
 	}
-	if (rte_mempool_populate_default(mp_ext) < 0) {
-		printf("cannot populate mp_ext mempool\n");
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
 		goto err;
 	}
-	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
 
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
@@ -686,6 +594,10 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack, 1) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [PATCH v5 2/2] test: migrate custom handler test to stack handler
  2016-06-30 18:05         ` [PATCH v5 2/2] test: migrate custom handler test to stack handler David Hunt
@ 2016-07-01  7:32           ` Olivier MATZ
  0 siblings, 0 replies; 48+ messages in thread
From: Olivier MATZ @ 2016-07-01  7:32 UTC (permalink / raw)
  To: David Hunt, dev; +Cc: viktorin, jerin.jacob, shreyansh.jain

Hi David,

On 06/30/2016 08:05 PM, David Hunt wrote:
> After introducing the stack handler in the previous commit,
> we now have very similar code to the custom handler in test_mempool.c,
> which creates a custom mempool based on simple mallocs.
> The stack handler is a cleaner example of adding a new mempool handler,
> so this commit replaces the custom handler test with a stack
> handler test, and removes the custom handler code.
> 
> Signed-off-by: David Hunt <david.hunt@intel.com>
>
> [...]
> 
> -static struct rte_mempool_ops mempool_ops_custom = {
> -	.name = "custom_handler",
> -	.alloc = custom_mempool_alloc,
> -	.free = custom_mempool_free,
> -	.enqueue = custom_mempool_enqueue,
> -	.dequeue = custom_mempool_dequeue,
> -	.get_count = custom_mempool_get_count,
> -};
> -
> -MEMPOOL_REGISTER_OPS(mempool_ops_custom);
> -
> -/*
>   * save the object number in the first 4 bytes of object data. All
>   * other bytes are set to 0.
>   */
> @@ -600,6 +507,7 @@ test_mempool(void)
>  	struct rte_mempool *mp_cache = NULL;
>  	struct rte_mempool *mp_nocache = NULL;
>  	struct rte_mempool *mp_ext = NULL;

mp_ext remains but is unused (it is just freed below).
It should be removed.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v6 0/2] mempool: add stack mempool handler
  2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
  2016-06-30 18:05         ` [PATCH v5 1/2] mempool: add stack (lifo) " David Hunt
  2016-06-30 18:05         ` [PATCH v5 2/2] test: migrate custom handler test to stack handler David Hunt
@ 2016-07-01  7:46         ` David Hunt
  2016-07-01  7:46           ` [PATCH v6 1/2] mempool: add stack (lifo) " David Hunt
                             ` (2 more replies)
  2 siblings, 3 replies; 48+ messages in thread
From: David Hunt @ 2016-07-01  7:46 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain

This patch set adds a lifo stack handler to the external mempool
manager.

This patch utilises the mempool handler feature which allows the addition
of new mempool handlers to DPDK.

v6 changes:
  * removed unneeded mp_ext variable.
  * added in a free for mp_stack after we're finished with it.

v5 changes:
  * Added the extra parameter requred for the changed test_mempool_basic()
    function for user defined caches.

v4 changes:
  * Update the test to replace the custom handler test with the
    stack handler test rather than just adding a stack handler test.
    The custom handler code is very similar to the stack handler, so
    there's no need to have both.

v3 changes:
  * Updated based on the latest version (v15) of the Mempool Handler feature

v2 changes:
  * updated based on mailing list feedback (Thanks Stephen)
  * checkpatch fixes.

David Hunt (2)
  mempool: add stack (lifo) mempool handler
  test: migrate custom handler test to stack handler

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH v6 1/2] mempool: add stack (lifo) mempool handler
  2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
@ 2016-07-01  7:46           ` David Hunt
  2016-07-01  7:46           ` [PATCH v6 2/2] test: migrate custom handler test to stack handler David Hunt
  2016-07-01  8:18           ` [PATCH v6 0/2] mempool: add stack mempool handler Olivier MATZ
  2 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-07-01  7:46 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

This is a mempool handler that is useful for pipelining apps, where
the mempool cache doesn't really work - example, where we have one
core doing rx (and alloc), and another core doing Tx (and return). In
such a case, the mempool ring simply cycles through all the mbufs,
resulting in a LLC miss on every mbuf allocated when the number of
mbufs is large. A stack recycles buffers more effectively in this
case.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 lib/librte_mempool/Makefile            |   1 +
 lib/librte_mempool/rte_mempool_stack.c | 145 +++++++++++++++++++++++++++++++++
 2 files changed, 146 insertions(+)
 create mode 100644 lib/librte_mempool/rte_mempool_stack.c

diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index a4c089e..057a6ab 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -44,6 +44,7 @@ LIBABIVER := 2
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_ring.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) +=  rte_mempool_stack.c
 # install includes
 SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
 
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/lib/librte_mempool/rte_mempool_stack.c
new file mode 100644
index 0000000..a92da69
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_stack.c
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+struct rte_mempool_stack {
+	rte_spinlock_t sl;
+
+	uint32_t size;
+	uint32_t len;
+	void *objs[];
+};
+
+static int
+stack_alloc(struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s;
+	unsigned n = mp->size;
+	int size = sizeof(*s) + (n+16)*sizeof(void *);
+
+	/* Allocate our local memory structure */
+	s = rte_zmalloc_socket("mempool-stack",
+			size,
+			RTE_CACHE_LINE_SIZE,
+			mp->socket_id);
+	if (s == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n");
+		return -ENOMEM;
+	}
+
+	rte_spinlock_init(&s->sl);
+
+	s->size = n;
+	mp->pool_data = s;
+
+	return 0;
+}
+
+static int stack_enqueue(struct rte_mempool *mp, void * const *obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index;
+
+	rte_spinlock_lock(&s->sl);
+	cache_objs = &s->objs[s->len];
+
+	/* Is there sufficient space in the stack ? */
+	if ((s->len + n) > s->size) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOBUFS;
+	}
+
+	/* Add elements back into the cache */
+	for (index = 0; index < n; ++index, obj_table++)
+		cache_objs[index] = *obj_table;
+
+	s->len += n;
+
+	rte_spinlock_unlock(&s->sl);
+	return 0;
+}
+
+static int stack_dequeue(struct rte_mempool *mp, void **obj_table,
+		unsigned n)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+	void **cache_objs;
+	unsigned index, len;
+
+	rte_spinlock_lock(&s->sl);
+
+	if (unlikely(n > s->len)) {
+		rte_spinlock_unlock(&s->sl);
+		return -ENOENT;
+	}
+
+	cache_objs = s->objs;
+
+	for (index = 0, len = s->len - 1; index < n;
+			++index, len--, obj_table++)
+		*obj_table = cache_objs[len];
+
+	s->len -= n;
+	rte_spinlock_unlock(&s->sl);
+	return n;
+}
+
+static unsigned
+stack_get_count(const struct rte_mempool *mp)
+{
+	struct rte_mempool_stack *s = mp->pool_data;
+
+	return s->len;
+}
+
+static void
+stack_free(struct rte_mempool *mp)
+{
+	rte_free((void *)(mp->pool_data));
+}
+
+static struct rte_mempool_ops ops_stack = {
+	.name = "stack",
+	.alloc = stack_alloc,
+	.free = stack_free,
+	.enqueue = stack_enqueue,
+	.dequeue = stack_dequeue,
+	.get_count = stack_get_count
+};
+
+MEMPOOL_REGISTER_OPS(ops_stack);
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH v6 2/2] test: migrate custom handler test to stack handler
  2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
  2016-07-01  7:46           ` [PATCH v6 1/2] mempool: add stack (lifo) " David Hunt
@ 2016-07-01  7:46           ` David Hunt
  2016-07-01  8:18           ` [PATCH v6 0/2] mempool: add stack mempool handler Olivier MATZ
  2 siblings, 0 replies; 48+ messages in thread
From: David Hunt @ 2016-07-01  7:46 UTC (permalink / raw)
  To: dev; +Cc: olivier.matz, viktorin, jerin.jacob, shreyansh.jain, David Hunt

After introducing the stack handler in the previous commit,
we now have very similar code to the custom handler in test_mempool.c,
which creates a custom mempool based on simple mallocs.
The stack handler is a cleaner example of adding a new mempool handler,
so this commit replaces the custom handler test with a stack
handler test, and removes the custom handler code.

Signed-off-by: David Hunt <david.hunt@intel.com>
---
 app/test/test_mempool.c | 117 ++++++------------------------------------------
 1 file changed, 14 insertions(+), 103 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 63c61f3..4aec605 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -89,99 +89,6 @@
 static rte_atomic32_t synchro;
 
 /*
- * Simple example of custom mempool structure. Holds pointers to all the
- * elements which are simply malloc'd in this example.
- */
-struct custom_mempool {
-	rte_spinlock_t lock;
-	unsigned count;
-	unsigned size;
-	void *elts[];
-};
-
-/*
- * Loop through all the element pointers and allocate a chunk of memory, then
- * insert that memory into the ring.
- */
-static int
-custom_mempool_alloc(struct rte_mempool *mp)
-{
-	struct custom_mempool *cm;
-
-	cm = rte_zmalloc("custom_mempool",
-		sizeof(struct custom_mempool) + mp->size * sizeof(void *), 0);
-	if (cm == NULL)
-		return -ENOMEM;
-
-	rte_spinlock_init(&cm->lock);
-	cm->count = 0;
-	cm->size = mp->size;
-	mp->pool_data = cm;
-	return 0;
-}
-
-static void
-custom_mempool_free(struct rte_mempool *mp)
-{
-	rte_free((void *)(mp->pool_data));
-}
-
-static int
-custom_mempool_enqueue(struct rte_mempool *mp, void * const *obj_table,
-		unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (cm->count + n > cm->size) {
-		ret = -ENOBUFS;
-	} else {
-		memcpy(&cm->elts[cm->count], obj_table, sizeof(void *) * n);
-		cm->count += n;
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-
-static int
-custom_mempool_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-	int ret = 0;
-
-	rte_spinlock_lock(&cm->lock);
-	if (n > cm->count) {
-		ret = -ENOENT;
-	} else {
-		cm->count -= n;
-		memcpy(obj_table, &cm->elts[cm->count], sizeof(void *) * n);
-	}
-	rte_spinlock_unlock(&cm->lock);
-	return ret;
-}
-
-static unsigned
-custom_mempool_get_count(const struct rte_mempool *mp)
-{
-	struct custom_mempool *cm = (struct custom_mempool *)(mp->pool_data);
-
-	return cm->count;
-}
-
-static struct rte_mempool_ops mempool_ops_custom = {
-	.name = "custom_handler",
-	.alloc = custom_mempool_alloc,
-	.free = custom_mempool_free,
-	.enqueue = custom_mempool_enqueue,
-	.dequeue = custom_mempool_dequeue,
-	.get_count = custom_mempool_get_count,
-};
-
-MEMPOOL_REGISTER_OPS(mempool_ops_custom);
-
-/*
  * save the object number in the first 4 bytes of object data. All
  * other bytes are set to 0.
  */
@@ -599,7 +506,7 @@ test_mempool(void)
 {
 	struct rte_mempool *mp_cache = NULL;
 	struct rte_mempool *mp_nocache = NULL;
-	struct rte_mempool *mp_ext = NULL;
+	struct rte_mempool *mp_stack = NULL;
 
 	rte_atomic32_init(&synchro);
 
@@ -629,25 +536,25 @@ test_mempool(void)
 	}
 
 	/* create a mempool with an external handler */
-	mp_ext = rte_mempool_create_empty("test_ext",
+	mp_stack = rte_mempool_create_empty("test_stack",
 		MEMPOOL_SIZE,
 		MEMPOOL_ELT_SIZE,
 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
 		SOCKET_ID_ANY, 0);
 
-	if (mp_ext == NULL) {
-		printf("cannot allocate mp_ext mempool\n");
+	if (mp_stack == NULL) {
+		printf("cannot allocate mp_stack mempool\n");
 		goto err;
 	}
-	if (rte_mempool_set_ops_byname(mp_ext, "custom_handler", NULL) < 0) {
-		printf("cannot set custom handler\n");
+	if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+		printf("cannot set stack handler\n");
 		goto err;
 	}
-	if (rte_mempool_populate_default(mp_ext) < 0) {
-		printf("cannot populate mp_ext mempool\n");
+	if (rte_mempool_populate_default(mp_stack) < 0) {
+		printf("cannot populate mp_stack mempool\n");
 		goto err;
 	}
-	rte_mempool_obj_iter(mp_ext, my_obj_init, NULL);
+	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
 
 	/* retrieve the mempool from its name */
 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
@@ -686,14 +593,18 @@ test_mempool(void)
 	if (test_mempool_xmem_misc() < 0)
 		goto err;
 
+	/* test the stack handler */
+	if (test_mempool_basic(mp_stack, 1) < 0)
+		goto err;
+
 	rte_mempool_list_dump(stdout);
 
 	return 0;
 
 err:
+	rte_mempool_free(mp_stack);
 	rte_mempool_free(mp_nocache);
 	rte_mempool_free(mp_cache);
-	rte_mempool_free(mp_ext);
 	return -1;
 }
 
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* Re: [PATCH v6 0/2] mempool: add stack mempool handler
  2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
  2016-07-01  7:46           ` [PATCH v6 1/2] mempool: add stack (lifo) " David Hunt
  2016-07-01  7:46           ` [PATCH v6 2/2] test: migrate custom handler test to stack handler David Hunt
@ 2016-07-01  8:18           ` Olivier MATZ
  2016-07-01 10:41             ` Thomas Monjalon
  2 siblings, 1 reply; 48+ messages in thread
From: Olivier MATZ @ 2016-07-01  8:18 UTC (permalink / raw)
  To: David Hunt, dev; +Cc: viktorin, jerin.jacob, shreyansh.jain

Hi Dave,

On 07/01/2016 09:46 AM, David Hunt wrote:
> This patch set adds a lifo stack handler to the external mempool
> manager.
> 
> This patch utilises the mempool handler feature which allows the addition
> of new mempool handlers to DPDK.
> 
> v6 changes:
>   * removed unneeded mp_ext variable.
>   * added in a free for mp_stack after we're finished with it.
> 
> v5 changes:
>   * Added the extra parameter requred for the changed test_mempool_basic()
>     function for user defined caches.
> 
> v4 changes:
>   * Update the test to replace the custom handler test with the
>     stack handler test rather than just adding a stack handler test.
>     The custom handler code is very similar to the stack handler, so
>     there's no need to have both.
> 
> v3 changes:
>   * Updated based on the latest version (v15) of the Mempool Handler feature
> 
> v2 changes:
>   * updated based on mailing list feedback (Thanks Stephen)
>   * checkpatch fixes.
> 
> David Hunt (2)
>   mempool: add stack (lifo) mempool handler
>   test: migrate custom handler test to stack handler
> 


Acked-by: Olivier Matz <olivier.matz@6wind.com>

Thanks!

^ permalink raw reply	[flat|nested] 48+ messages in thread

* Re: [PATCH v6 0/2] mempool: add stack mempool handler
  2016-07-01  8:18           ` [PATCH v6 0/2] mempool: add stack mempool handler Olivier MATZ
@ 2016-07-01 10:41             ` Thomas Monjalon
  0 siblings, 0 replies; 48+ messages in thread
From: Thomas Monjalon @ 2016-07-01 10:41 UTC (permalink / raw)
  To: David Hunt; +Cc: dev, Olivier MATZ, viktorin, jerin.jacob, shreyansh.jain

> > David Hunt (2)
> >   mempool: add stack (lifo) mempool handler
> >   test: migrate custom handler test to stack handler
> 
> Acked-by: Olivier Matz <olivier.matz@6wind.com>

Applied, thanks

^ permalink raw reply	[flat|nested] 48+ messages in thread

end of thread, other threads:[~2016-07-01 10:43 UTC | newest]

Thread overview: 48+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-05 18:29 [PATCH 0/2] mempool: add stack (fifo) mempool handler David Hunt
2016-05-05 18:29 ` [PATCH 1/2] " David Hunt
2016-05-05 21:28   ` Stephen Hemminger
2016-05-19 15:21     ` Hunt, David
2016-05-05 18:29 ` [PATCH 2/2] test: add autotest for external mempool stack handler David Hunt
2016-05-06  8:34 ` [PATCH 0/2] mempool: add stack (fifo) mempool handler Tan, Jianfeng
2016-05-06 23:02   ` Hunt, David
2016-05-19 14:48 ` v2 mempool: add stack (lifo) " David Hunt
2016-05-19 14:48   ` [PATCH v2 1/3] " David Hunt
2016-05-23 12:55     ` Olivier Matz
2016-06-15 10:10       ` Hunt, David
2016-06-17 14:18       ` Hunt, David
2016-06-20  8:17         ` Olivier Matz
2016-06-20 12:59           ` Hunt, David
2016-06-29 14:31             ` Olivier MATZ
2016-05-19 14:48   ` [PATCH v2 2/3] mempool: make declaration of handler structs const David Hunt
2016-05-23 12:55     ` Olivier Matz
2016-05-24 14:01       ` Hunt, David
2016-05-19 14:48   ` [PATCH v2 3/3] test: add autotest for external mempool stack handler David Hunt
2016-05-19 15:16   ` v2 mempool: add stack (lifo) mempool handler Hunt, David
2016-06-20 13:08   ` mempool: add stack " David Hunt
2016-06-20 13:08     ` [PATCH v3 1/2] mempool: add stack (lifo) " David Hunt
2016-06-20 13:25       ` Jerin Jacob
2016-06-20 13:54         ` Thomas Monjalon
2016-06-20 13:58           ` Ananyev, Konstantin
2016-06-20 14:22             ` Jerin Jacob
2016-06-20 17:56               ` Ananyev, Konstantin
2016-06-21  3:35                 ` Jerin Jacob
2016-06-21  9:28                   ` Ananyev, Konstantin
2016-06-21  9:44                     ` Olivier Matz
2016-06-21  3:42           ` Jerin Jacob
2016-06-20 13:08     ` [PATCH v3 2/2] test: add autotest for external mempool stack handler David Hunt
2016-06-30  7:41     ` [PATCH v4 0/2] mempool: add stack mempool handler David Hunt
2016-06-30  7:41       ` [PATCH v4 1/2] mempool: add stack (lifo) " David Hunt
2016-06-30  7:41       ` [PATCH v4 2/2] test: migrate custom handler test to stack handler David Hunt
2016-06-30  9:45         ` Thomas Monjalon
2016-06-30 17:36           ` Hunt, David
2016-06-30 17:46             ` Thomas Monjalon
2016-06-30 17:49               ` Hunt, David
2016-06-30 18:05       ` [PATCH v5 0/2] mempool: add stack mempool handler David Hunt
2016-06-30 18:05         ` [PATCH v5 1/2] mempool: add stack (lifo) " David Hunt
2016-06-30 18:05         ` [PATCH v5 2/2] test: migrate custom handler test to stack handler David Hunt
2016-07-01  7:32           ` Olivier MATZ
2016-07-01  7:46         ` [PATCH v6 0/2] mempool: add stack mempool handler David Hunt
2016-07-01  7:46           ` [PATCH v6 1/2] mempool: add stack (lifo) " David Hunt
2016-07-01  7:46           ` [PATCH v6 2/2] test: migrate custom handler test to stack handler David Hunt
2016-07-01  8:18           ` [PATCH v6 0/2] mempool: add stack mempool handler Olivier MATZ
2016-07-01 10:41             ` Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.