All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] mempool: notify mempool area after mempool alloc
@ 2017-12-15 16:00 Pavan Nikhilesh
  2017-12-15 16:00 ` [PATCH 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-15 16:00 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh

Mempool creation needs to be completed first before notifying mempool to
register the mempool area.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 lib/librte_mempool/rte_mempool.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index d50dba4..6d17022 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -367,11 +367,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	struct rte_mempool_memhdr *memhdr;
 	int ret;
 
-	/* Notify memory area to mempool */
-	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
-	if (ret != -ENOTSUP && ret < 0)
-		return ret;
-
 	/* create the internal ring if not already done */
 	if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
 		ret = rte_mempool_ops_alloc(mp);
@@ -380,6 +375,11 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 		mp->flags |= MEMPOOL_F_POOL_CREATED;
 	}
 
+	/* Notify memory area to mempool */
+	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
+	if (ret != -ENOTSUP && ret < 0)
+		return ret;
+
 	/* mempool is already populated */
 	if (mp->populated_size >= mp->size)
 		return -ENOSPC;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/2] mempool/octeontx: clean up memory area registration
  2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
@ 2017-12-15 16:00 ` Pavan Nikhilesh
  2017-12-18  5:00   ` santosh
  2017-12-18  4:56 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc santosh
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-15 16:00 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh

Clean up dependency between alloc and memory area registration, this
removes the need for SLIST data structure and octeontx_pool_list.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 23 ++------
 drivers/mempool/octeontx/octeontx_fpavf.h       |  6 ++-
 drivers/mempool/octeontx/rte_mempool_octeontx.c | 72 ++-----------------------
 3 files changed, 12 insertions(+), 89 deletions(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 3bc50f3..28f431e 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -386,8 +386,8 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
 	return ret;
 }
 
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
 			  void *memva, uint16_t gpool)
 {
 	uint64_t va_end;
@@ -509,12 +509,9 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node_id)
+				unsigned int buf_offset, int node_id)
 {
 	unsigned int gpool;
-	void *memva;
-	unsigned long memsz;
 	uintptr_t gpool_handle;
 	uintptr_t pool_bar;
 	int res;
@@ -522,9 +519,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	RTE_SET_USED(node_id);
 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
-	if (unlikely(*va_start == NULL))
-		goto error_end;
-
 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
 	if (object_size > FPA_MAX_OBJ_SIZE) {
 		errno = EINVAL;
@@ -567,15 +561,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 		goto error_pool_destroy;
 	}
 
-	/* vf pool setup */
-	memsz = object_size * object_count;
-	memva = *va_start;
-	res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
-	if (res < 0) {
-		errno = res;
-		goto error_gaura_detach;
-	}
-
 	/* Release lock */
 	rte_spinlock_unlock(&fpadev.lock);
 
@@ -591,8 +576,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 
 	return gpool_handle;
 
-error_gaura_detach:
-	(void) octeontx_fpapf_aura_detach(gpool);
 error_pool_destroy:
 	octeontx_fpavf_free(gpool);
 	octeontx_fpapf_pool_destroy(gpool);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1d09f00..bc5dc3b 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -114,8 +114,10 @@ do {							\
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node);
+				unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+			  void *memva, uint16_t gpool);
 int
 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
 int
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index e89355c..fc0cab9 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -36,55 +36,18 @@
 
 #include "octeontx_fpavf.h"
 
-/*
- * Per-pool descriptor.
- * Links mempool with the corresponding memzone,
- * that provides memory under the pool's elements.
- */
-struct octeontx_pool_info {
-	const struct rte_mempool *mp;
-	uintptr_t mz_addr;
-
-	SLIST_ENTRY(octeontx_pool_info) link;
-};
-
-SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
-
-/* List of the allocated pools */
-static struct octeontx_pool_list octeontx_pool_head =
-				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
-/* Spinlock to protect pool list */
-static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
-
 static int
 octeontx_fpavf_alloc(struct rte_mempool *mp)
 {
 	uintptr_t pool;
-	struct octeontx_pool_info *pool_info;
 	uint32_t memseg_count = mp->size;
 	uint32_t object_size;
-	uintptr_t va_start;
 	int rc = 0;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		return -ENXIO;
-	}
-
-	/* virtual hugepage mapped addr */
-	va_start = pool_info->mz_addr;
-	rte_spinlock_unlock(&pool_list_lock);
-
 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
 
 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
 						OCTEONTX_FPAVF_BUF_OFFSET,
-						(char **)&va_start,
 						mp->socket_id);
 	rc = octeontx_fpa_bufpool_block_size(pool);
 	if (rc < 0)
@@ -109,27 +72,9 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 static void
 octeontx_fpavf_free(struct rte_mempool *mp)
 {
-	struct octeontx_pool_info *pool_info;
 	uintptr_t pool;
-
 	pool = (uintptr_t)mp->pool_id;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		rte_panic("%s: trying to free pool with no valid metadata",
-		    __func__);
-	}
-
-	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-
-	rte_free(pool_info);
 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
 }
 
@@ -222,21 +167,14 @@ static int
 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
 				    char *vaddr, rte_iova_t paddr, size_t len)
 {
-	struct octeontx_pool_info *pool_info;
-
 	RTE_SET_USED(paddr);
-	RTE_SET_USED(len);
+	uint8_t gpool;
+	uintptr_t pool_bar;
 
-	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
-	if (pool_info == NULL)
-		return -ENOMEM;
+	gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+	pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
 
-	pool_info->mp = mp;
-	pool_info->mz_addr = (uintptr_t)vaddr;
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-	return 0;
+	return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
 }
 
 static struct rte_mempool_ops octeontx_fpavf_ops = {
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mempool: notify mempool area after mempool alloc
  2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
  2017-12-15 16:00 ` [PATCH 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
@ 2017-12-18  4:56 ` santosh
  2017-12-19 18:09 ` [PATCH v2 " Pavan Nikhilesh
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: santosh @ 2017-12-18  4:56 UTC (permalink / raw)
  To: Pavan Nikhilesh, olivier.matz; +Cc: dev


On Friday 15 December 2017 09:30 PM, Pavan Nikhilesh wrote:
> Mempool creation needs to be completed first before notifying mempool to
> register the mempool area.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---

LGTM.
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] mempool/octeontx: clean up memory area registration
  2017-12-15 16:00 ` [PATCH 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
@ 2017-12-18  5:00   ` santosh
  2017-12-18 21:54     ` Pavan Nikhilesh
  0 siblings, 1 reply; 12+ messages in thread
From: santosh @ 2017-12-18  5:00 UTC (permalink / raw)
  To: Pavan Nikhilesh, olivier.matz; +Cc: dev


On Friday 15 December 2017 09:30 PM, Pavan Nikhilesh wrote:
> Clean up dependency between alloc and memory area registration, this
> removes the need for SLIST data structure and octeontx_pool_list.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---

nits: s/clean up dependency/ clean up the dependency.

Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] mempool/octeontx: clean up memory area registration
  2017-12-18  5:00   ` santosh
@ 2017-12-18 21:54     ` Pavan Nikhilesh
  0 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-18 21:54 UTC (permalink / raw)
  To: santosh, olivier.matz; +Cc: dev

On Mon, Dec 18, 2017 at 10:30:59AM +0530, santosh wrote:
>
> On Friday 15 December 2017 09:30 PM, Pavan Nikhilesh wrote:
> > Clean up dependency between alloc and memory area registration, this
> > removes the need for SLIST data structure and octeontx_pool_list.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > ---
>
> nits: s/clean up dependency/ clean up the dependency.
>
> Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
>

Thanks for the review and ack, will redo commit message in v2.

Pavan

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 1/2] mempool: notify mempool area after mempool alloc
  2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
  2017-12-15 16:00 ` [PATCH 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
  2017-12-18  4:56 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc santosh
@ 2017-12-19 18:09 ` Pavan Nikhilesh
  2017-12-19 18:09   ` [PATCH v2 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
  2017-12-22 15:11 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc Olivier MATZ
  2017-12-24 12:47 ` [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
  4 siblings, 1 reply; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-19 18:09 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh

Mempool creation needs to be completed first before notifying mempool to
register the mempool area.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---

 v2 Changes:
  - Redo commit log.

 lib/librte_mempool/rte_mempool.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index d50dba493..6d1702252 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -367,11 +367,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	struct rte_mempool_memhdr *memhdr;
 	int ret;

-	/* Notify memory area to mempool */
-	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
-	if (ret != -ENOTSUP && ret < 0)
-		return ret;
-
 	/* create the internal ring if not already done */
 	if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
 		ret = rte_mempool_ops_alloc(mp);
@@ -380,6 +375,11 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 		mp->flags |= MEMPOOL_F_POOL_CREATED;
 	}

+	/* Notify memory area to mempool */
+	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
+	if (ret != -ENOTSUP && ret < 0)
+		return ret;
+
 	/* mempool is already populated */
 	if (mp->populated_size >= mp->size)
 		return -ENOSPC;
--
2.14.1

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v2 2/2] mempool/octeontx: clean up memory area registration
  2017-12-19 18:09 ` [PATCH v2 " Pavan Nikhilesh
@ 2017-12-19 18:09   ` Pavan Nikhilesh
  0 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-19 18:09 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh

Clean up the dependency between alloc and memory area registration, this
removes the need for SLIST data structure and octeontx_pool_list.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 23 ++------
 drivers/mempool/octeontx/octeontx_fpavf.h       |  6 ++-
 drivers/mempool/octeontx/rte_mempool_octeontx.c | 72 ++-----------------------
 3 files changed, 12 insertions(+), 89 deletions(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 3bc50f35d..28f431ed9 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -386,8 +386,8 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
 	return ret;
 }
 
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
 			  void *memva, uint16_t gpool)
 {
 	uint64_t va_end;
@@ -509,12 +509,9 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node_id)
+				unsigned int buf_offset, int node_id)
 {
 	unsigned int gpool;
-	void *memva;
-	unsigned long memsz;
 	uintptr_t gpool_handle;
 	uintptr_t pool_bar;
 	int res;
@@ -522,9 +519,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	RTE_SET_USED(node_id);
 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
-	if (unlikely(*va_start == NULL))
-		goto error_end;
-
 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
 	if (object_size > FPA_MAX_OBJ_SIZE) {
 		errno = EINVAL;
@@ -567,15 +561,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 		goto error_pool_destroy;
 	}
 
-	/* vf pool setup */
-	memsz = object_size * object_count;
-	memva = *va_start;
-	res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
-	if (res < 0) {
-		errno = res;
-		goto error_gaura_detach;
-	}
-
 	/* Release lock */
 	rte_spinlock_unlock(&fpadev.lock);
 
@@ -591,8 +576,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 
 	return gpool_handle;
 
-error_gaura_detach:
-	(void) octeontx_fpapf_aura_detach(gpool);
 error_pool_destroy:
 	octeontx_fpavf_free(gpool);
 	octeontx_fpapf_pool_destroy(gpool);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1d09f0079..bc5dc3bab 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -114,8 +114,10 @@ do {							\
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node);
+				unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+			  void *memva, uint16_t gpool);
 int
 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
 int
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index e89355cdd..fc0cab944 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -36,55 +36,18 @@
 
 #include "octeontx_fpavf.h"
 
-/*
- * Per-pool descriptor.
- * Links mempool with the corresponding memzone,
- * that provides memory under the pool's elements.
- */
-struct octeontx_pool_info {
-	const struct rte_mempool *mp;
-	uintptr_t mz_addr;
-
-	SLIST_ENTRY(octeontx_pool_info) link;
-};
-
-SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
-
-/* List of the allocated pools */
-static struct octeontx_pool_list octeontx_pool_head =
-				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
-/* Spinlock to protect pool list */
-static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
-
 static int
 octeontx_fpavf_alloc(struct rte_mempool *mp)
 {
 	uintptr_t pool;
-	struct octeontx_pool_info *pool_info;
 	uint32_t memseg_count = mp->size;
 	uint32_t object_size;
-	uintptr_t va_start;
 	int rc = 0;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		return -ENXIO;
-	}
-
-	/* virtual hugepage mapped addr */
-	va_start = pool_info->mz_addr;
-	rte_spinlock_unlock(&pool_list_lock);
-
 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
 
 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
 						OCTEONTX_FPAVF_BUF_OFFSET,
-						(char **)&va_start,
 						mp->socket_id);
 	rc = octeontx_fpa_bufpool_block_size(pool);
 	if (rc < 0)
@@ -109,27 +72,9 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 static void
 octeontx_fpavf_free(struct rte_mempool *mp)
 {
-	struct octeontx_pool_info *pool_info;
 	uintptr_t pool;
-
 	pool = (uintptr_t)mp->pool_id;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		rte_panic("%s: trying to free pool with no valid metadata",
-		    __func__);
-	}
-
-	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-
-	rte_free(pool_info);
 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
 }
 
@@ -222,21 +167,14 @@ static int
 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
 				    char *vaddr, rte_iova_t paddr, size_t len)
 {
-	struct octeontx_pool_info *pool_info;
-
 	RTE_SET_USED(paddr);
-	RTE_SET_USED(len);
+	uint8_t gpool;
+	uintptr_t pool_bar;
 
-	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
-	if (pool_info == NULL)
-		return -ENOMEM;
+	gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+	pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
 
-	pool_info->mp = mp;
-	pool_info->mz_addr = (uintptr_t)vaddr;
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-	return 0;
+	return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
 }
 
 static struct rte_mempool_ops octeontx_fpavf_ops = {
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mempool: notify mempool area after mempool alloc
  2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
                   ` (2 preceding siblings ...)
  2017-12-19 18:09 ` [PATCH v2 " Pavan Nikhilesh
@ 2017-12-22 15:11 ` Olivier MATZ
  2017-12-24  7:07   ` Pavan Nikhilesh
  2017-12-24 12:47 ` [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
  4 siblings, 1 reply; 12+ messages in thread
From: Olivier MATZ @ 2017-12-22 15:11 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: santosh.shukla, dev

On Fri, Dec 15, 2017 at 09:30:30PM +0530, Pavan Nikhilesh wrote:
> Mempool creation needs to be completed first before notifying mempool to
> register the mempool area.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

Looks good to me.

Did you see any issue?
If yes, can you please resubmit the patch
with an updated title "mempool: fix first memory area notification",
a Fixes: and Cc: stable@dpdk.org?

Thanks,
Olivier

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mempool: notify mempool area after mempool alloc
  2017-12-22 15:11 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc Olivier MATZ
@ 2017-12-24  7:07   ` Pavan Nikhilesh
  0 siblings, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-24  7:07 UTC (permalink / raw)
  To: Olivier MATZ, santosh.shukla; +Cc: dev

On Fri, Dec 22, 2017 at 04:11:21PM +0100, Olivier MATZ wrote:
> On Fri, Dec 15, 2017 at 09:30:30PM +0530, Pavan Nikhilesh wrote:
> > Mempool creation needs to be completed first before notifying mempool to
> > register the mempool area.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>
> Looks good to me.
>
> Did you see any issue?
> If yes, can you please resubmit the patch
> with an updated title "mempool: fix first memory area notification",
> a Fixes: and Cc: stable@dpdk.org?

I will roll out a v2 updating.

>
> Thanks,
> Olivier

Thanks,
Pavan.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v3 1/2] mempool: fix first memory area notification
  2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
                   ` (3 preceding siblings ...)
  2017-12-22 15:11 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc Olivier MATZ
@ 2017-12-24 12:47 ` Pavan Nikhilesh
  2017-12-24 12:47   ` [PATCH v3 2/2] mempool/octeontx: fix memory area registration Pavan Nikhilesh
  2018-01-12 17:36   ` [PATCH v3 1/2] mempool: fix first memory area notification Thomas Monjalon
  4 siblings, 2 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-24 12:47 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh, stable

Mempool creation needs to be completed first before notifying mempool to
register the mempool area.

Fixes: 12b8cc1a7e86 ("mempool: notify memory area to pool")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---

 v3 Changes:
   - Redo commit title as fix.

 v2 Changes:
   - Redo commit log.

 lib/librte_mempool/rte_mempool.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index d50dba493..6d1702252 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -367,11 +367,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 	struct rte_mempool_memhdr *memhdr;
 	int ret;

-	/* Notify memory area to mempool */
-	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
-	if (ret != -ENOTSUP && ret < 0)
-		return ret;
-
 	/* create the internal ring if not already done */
 	if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
 		ret = rte_mempool_ops_alloc(mp);
@@ -380,6 +375,11 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
 		mp->flags |= MEMPOOL_F_POOL_CREATED;
 	}

+	/* Notify memory area to mempool */
+	ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
+	if (ret != -ENOTSUP && ret < 0)
+		return ret;
+
 	/* mempool is already populated */
 	if (mp->populated_size >= mp->size)
 		return -ENOSPC;
--
2.14.1

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 2/2] mempool/octeontx: fix memory area registration
  2017-12-24 12:47 ` [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
@ 2017-12-24 12:47   ` Pavan Nikhilesh
  2018-01-12 17:36   ` [PATCH v3 1/2] mempool: fix first memory area notification Thomas Monjalon
  1 sibling, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2017-12-24 12:47 UTC (permalink / raw)
  To: santosh.shukla, olivier.matz; +Cc: dev, Pavan Nikhilesh, stable

Clean up the dependency between alloc and memory area registration, this
removes the need for SLIST data structure and octeontx_pool_list.

Fixes: 2baa3f0b7de5 ("mempool/octeontx: support memory area ops")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 23 ++------
 drivers/mempool/octeontx/octeontx_fpavf.h       |  6 ++-
 drivers/mempool/octeontx/rte_mempool_octeontx.c | 72 ++-----------------------
 3 files changed, 12 insertions(+), 89 deletions(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 3bc50f35d..28f431ed9 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -386,8 +386,8 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
 	return ret;
 }
 
-static int
-octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
 			  void *memva, uint16_t gpool)
 {
 	uint64_t va_end;
@@ -509,12 +509,9 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node_id)
+				unsigned int buf_offset, int node_id)
 {
 	unsigned int gpool;
-	void *memva;
-	unsigned long memsz;
 	uintptr_t gpool_handle;
 	uintptr_t pool_bar;
 	int res;
@@ -522,9 +519,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	RTE_SET_USED(node_id);
 	RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
 
-	if (unlikely(*va_start == NULL))
-		goto error_end;
-
 	object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
 	if (object_size > FPA_MAX_OBJ_SIZE) {
 		errno = EINVAL;
@@ -567,15 +561,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 		goto error_pool_destroy;
 	}
 
-	/* vf pool setup */
-	memsz = object_size * object_count;
-	memva = *va_start;
-	res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
-	if (res < 0) {
-		errno = res;
-		goto error_gaura_detach;
-	}
-
 	/* Release lock */
 	rte_spinlock_unlock(&fpadev.lock);
 
@@ -591,8 +576,6 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 
 	return gpool_handle;
 
-error_gaura_detach:
-	(void) octeontx_fpapf_aura_detach(gpool);
 error_pool_destroy:
 	octeontx_fpavf_free(gpool);
 	octeontx_fpapf_pool_destroy(gpool);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1d09f0079..bc5dc3bab 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -114,8 +114,10 @@ do {							\
 
 uintptr_t
 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
-				unsigned int buf_offset, char **va_start,
-				int node);
+				unsigned int buf_offset, int node);
+int
+octeontx_fpavf_pool_set_range(uintptr_t handle, unsigned long memsz,
+			  void *memva, uint16_t gpool);
 int
 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
 int
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index e89355cdd..fc0cab944 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -36,55 +36,18 @@
 
 #include "octeontx_fpavf.h"
 
-/*
- * Per-pool descriptor.
- * Links mempool with the corresponding memzone,
- * that provides memory under the pool's elements.
- */
-struct octeontx_pool_info {
-	const struct rte_mempool *mp;
-	uintptr_t mz_addr;
-
-	SLIST_ENTRY(octeontx_pool_info) link;
-};
-
-SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
-
-/* List of the allocated pools */
-static struct octeontx_pool_list octeontx_pool_head =
-				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
-/* Spinlock to protect pool list */
-static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
-
 static int
 octeontx_fpavf_alloc(struct rte_mempool *mp)
 {
 	uintptr_t pool;
-	struct octeontx_pool_info *pool_info;
 	uint32_t memseg_count = mp->size;
 	uint32_t object_size;
-	uintptr_t va_start;
 	int rc = 0;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		return -ENXIO;
-	}
-
-	/* virtual hugepage mapped addr */
-	va_start = pool_info->mz_addr;
-	rte_spinlock_unlock(&pool_list_lock);
-
 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
 
 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
 						OCTEONTX_FPAVF_BUF_OFFSET,
-						(char **)&va_start,
 						mp->socket_id);
 	rc = octeontx_fpa_bufpool_block_size(pool);
 	if (rc < 0)
@@ -109,27 +72,9 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 static void
 octeontx_fpavf_free(struct rte_mempool *mp)
 {
-	struct octeontx_pool_info *pool_info;
 	uintptr_t pool;
-
 	pool = (uintptr_t)mp->pool_id;
 
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
-		if (pool_info->mp == mp)
-			break;
-	}
-
-	if (pool_info == NULL) {
-		rte_spinlock_unlock(&pool_list_lock);
-		rte_panic("%s: trying to free pool with no valid metadata",
-		    __func__);
-	}
-
-	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-
-	rte_free(pool_info);
 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
 }
 
@@ -222,21 +167,14 @@ static int
 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
 				    char *vaddr, rte_iova_t paddr, size_t len)
 {
-	struct octeontx_pool_info *pool_info;
-
 	RTE_SET_USED(paddr);
-	RTE_SET_USED(len);
+	uint8_t gpool;
+	uintptr_t pool_bar;
 
-	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
-	if (pool_info == NULL)
-		return -ENOMEM;
+	gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
+	pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
 
-	pool_info->mp = mp;
-	pool_info->mz_addr = (uintptr_t)vaddr;
-	rte_spinlock_lock(&pool_list_lock);
-	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
-	rte_spinlock_unlock(&pool_list_lock);
-	return 0;
+	return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
 }
 
 static struct rte_mempool_ops octeontx_fpavf_ops = {
-- 
2.14.1

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 1/2] mempool: fix first memory area notification
  2017-12-24 12:47 ` [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
  2017-12-24 12:47   ` [PATCH v3 2/2] mempool/octeontx: fix memory area registration Pavan Nikhilesh
@ 2018-01-12 17:36   ` Thomas Monjalon
  1 sibling, 0 replies; 12+ messages in thread
From: Thomas Monjalon @ 2018-01-12 17:36 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: dev, santosh.shukla, olivier.matz, stable

24/12/2017 13:47, Pavan Nikhilesh:
> Mempool creation needs to be completed first before notifying mempool to
> register the mempool area.
> 
> Fixes: 12b8cc1a7e86 ("mempool: notify memory area to pool")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>

Series applied, thanks

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-01-12 17:36 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-15 16:00 [PATCH 1/2] mempool: notify mempool area after mempool alloc Pavan Nikhilesh
2017-12-15 16:00 ` [PATCH 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
2017-12-18  5:00   ` santosh
2017-12-18 21:54     ` Pavan Nikhilesh
2017-12-18  4:56 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc santosh
2017-12-19 18:09 ` [PATCH v2 " Pavan Nikhilesh
2017-12-19 18:09   ` [PATCH v2 2/2] mempool/octeontx: clean up memory area registration Pavan Nikhilesh
2017-12-22 15:11 ` [PATCH 1/2] mempool: notify mempool area after mempool alloc Olivier MATZ
2017-12-24  7:07   ` Pavan Nikhilesh
2017-12-24 12:47 ` [PATCH v3 1/2] mempool: fix first memory area notification Pavan Nikhilesh
2017-12-24 12:47   ` [PATCH v3 2/2] mempool/octeontx: fix memory area registration Pavan Nikhilesh
2018-01-12 17:36   ` [PATCH v3 1/2] mempool: fix first memory area notification Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.