All of lore.kernel.org
 help / color / mirror / Atom feed
From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
To: olivier.matz@6wind.com, dev@dpdk.org
Cc: thomas@monjalon.net, jerin.jacob@caviumnetworks.com,
	hemant.agrawal@nxp.com,
	Santosh Shukla <santosh.shukla@caviumnetworks.com>
Subject: [PATCH v3 05/10] mempool/octeontx: add support for free
Date: Sun,  8 Oct 2017 18:10:06 +0530	[thread overview]
Message-ID: <20171008124011.1577-6-santosh.shukla@caviumnetworks.com> (raw)
In-Reply-To: <20171008124011.1577-1-santosh.shukla@caviumnetworks.com>

Upon pool free request from application, Octeon FPA free
does following:
- Uses mbox to reset fpapf pool setup.
- frees fpavf resources.

Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 drivers/mempool/octeontx/octeontx_fpavf.c       | 111 ++++++++++++++++++++++++
 drivers/mempool/octeontx/octeontx_fpavf.h       |   2 +
 drivers/mempool/octeontx/rte_mempool_octeontx.c |  12 ++-
 3 files changed, 124 insertions(+), 1 deletion(-)

diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index c0c9d8325..44253b09e 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -581,6 +581,117 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 	return (uintptr_t)NULL;
 }
 
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+	void **node, **curr, *head = NULL;
+	uint64_t sz;
+	uint64_t cnt, avail;
+	uint8_t gpool;
+	uintptr_t pool_bar;
+	int ret;
+
+	RTE_SET_USED(node_id);
+
+	/* Wait for all outstanding writes to be comitted */
+	rte_smp_wmb();
+
+	if (unlikely(!octeontx_fpa_handle_valid(handle)))
+		return -EINVAL;
+
+	/* get the pool */
+	gpool = octeontx_fpa_bufpool_gpool(handle);
+
+	/* Get pool bar address from handle */
+	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+	 /* Check for no outstanding buffers */
+	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+					FPA_VF_VHAURA_CNT(gpool)));
+	if (cnt) {
+		fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
+		return -EBUSY;
+	}
+
+	rte_spinlock_lock(&fpadev.lock);
+
+	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+				FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+	/* Prepare to empty the entire POOL */
+	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+	/* Empty the pool */
+	/* Invalidate the POOL */
+	octeontx_gpool_free(gpool);
+
+	/* Process all buffers in the pool */
+	while (avail--) {
+
+		/* Yank a buffer from the pool */
+		node = (void *)(uintptr_t)
+			fpavf_read64((void *)
+				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+
+		if (node == NULL) {
+			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+				      gpool, avail);
+			break;
+		}
+
+		/* Imsert it into an ordered linked list */
+		for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+			if ((uintptr_t)node <= (uintptr_t)curr[0])
+				break;
+		}
+		node[0] = curr[0];
+		curr[0] = node;
+	}
+
+	/* Verify the linked list to be a perfect series */
+	sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+	for (curr = head; curr != NULL && curr[0] != NULL;
+		curr = curr[0]) {
+		if (curr == curr[0] ||
+			((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+			fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+				      gpool, curr, curr[0]);
+		}
+	}
+
+	/* Disable pool operation */
+	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+			 FPA_VF_VHPOOL_START_ADDR(gpool)));
+	fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+			FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+	(void)octeontx_fpapf_pool_destroy(gpool);
+
+	/* Deactivate the AURA */
+	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+			FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+			FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+	ret = octeontx_fpapf_aura_detach(gpool);
+	if (ret) {
+		fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+			      gpool, ret);
+	}
+
+	/* Free VF */
+	(void)octeontx_fpavf_free(gpool);
+
+	rte_spinlock_unlock(&fpadev.lock);
+	return 0;
+}
+
 static void
 octeontx_fpavf_setup(void)
 {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 23a458363..28440e810 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -136,6 +136,8 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 				unsigned int buf_offset, char **va_start,
 				int node);
 int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
 octeontx_fpa_bufpool_block_size(uintptr_t handle);
 
 static __rte_always_inline uint8_t
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index d930a81f9..6ac4b7dc0 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -74,10 +74,20 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 	return rc;
 }
 
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+	uintptr_t pool;
+
+	pool = (uintptr_t)mp->pool_id;
+
+	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
 static struct rte_mempool_ops octeontx_fpavf_ops = {
 	.name = "octeontx_fpavf",
 	.alloc = octeontx_fpavf_alloc,
-	.free = NULL,
+	.free = octeontx_fpavf_free,
 	.enqueue = NULL,
 	.dequeue = NULL,
 	.get_count = NULL,
-- 
2.14.1

  parent reply	other threads:[~2017-10-08 12:41 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-24 13:28 [PATCH v1 00/11] Cavium Octeontx external mempool driver Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 01/11] mempool/octeontx: add HW constants Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 02/11] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 03/11] mempool/octeontx: probe fpavf pcie devices Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 04/11] mempool/octeontx: implement pool alloc Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 05/11] mempool/octeontx: implement pool free Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 06/11] mempool/octeontx: implement pool enq and deq Santosh Shukla
2017-08-24 13:28 ` [PATCH v1 07/11] mempool/octeontx: implement pool get count Santosh Shukla
2017-08-24 13:29 ` [PATCH v1 08/11] mempool/octeontx: implement pool get capability Santosh Shukla
2017-08-24 13:29 ` [PATCH v1 09/11] mempool/octeontx: implement pool update range Santosh Shukla
2017-08-24 13:29 ` [PATCH v1 10/11] mempool/octeontx: translate handle to pool Santosh Shukla
2017-08-24 13:29 ` [PATCH v1 11/11] doc: add mempool and octeontx mempool device Santosh Shukla
2017-08-31  6:37 ` [PATCH v2 00/10] Cavium Octeontx external mempool driver Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 01/10] mempool/octeontx: add HW constants Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 02/10] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 03/10] mempool/octeontx: probe fpavf pcie devices Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 04/10] mempool/octeontx: implement pool alloc Santosh Shukla
2017-10-06 20:51     ` Thomas Monjalon
2017-10-07  3:49       ` santosh
2017-08-31  6:37   ` [PATCH v2 05/10] mempool/octeontx: implement pool free Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 06/10] mempool/octeontx: implement pool enq and deq Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 07/10] mempool/octeontx: implement pool get count Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 08/10] mempool/octeontx: implement pool get capability Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 09/10] mempool/octeontx: implement pool update range Santosh Shukla
2017-08-31  6:37   ` [PATCH v2 10/10] doc: add mempool and octeontx mempool device Santosh Shukla
2017-09-19 13:52     ` Mcnamara, John
2017-09-19  8:29   ` [PATCH v2 00/10] Cavium Octeontx external mempool driver santosh
2017-10-06 20:55     ` Thomas Monjalon
2017-10-07  3:51       ` santosh
2017-10-07  4:26         ` Ferruh Yigit
2017-10-07  4:46           ` santosh
2017-10-08 13:12             ` santosh
2017-10-08 12:40   ` [PATCH v3 " Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 01/10] mempool/octeontx: add HW constants Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 02/10] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-10-08 17:16       ` Thomas Monjalon
2017-10-09  5:03         ` santosh
2017-10-08 12:40     ` [PATCH v3 03/10] mempool/octeontx: probe fpavf pcie devices Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 04/10] mempool/octeontx: add support for alloc Santosh Shukla
2017-10-08 12:40     ` Santosh Shukla [this message]
2017-10-08 12:40     ` [PATCH v3 06/10] mempool/octeontx: add support for enq and deq Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 07/10] mempool/octeontx: add support for get count Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 08/10] mempool/octeontx: add support for get capability Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 09/10] mempool/octeontx: add support for memory area ops Santosh Shukla
2017-10-08 12:40     ` [PATCH v3 10/10] doc: add mempool and octeontx mempool device Santosh Shukla
2017-10-08 16:43       ` Thomas Monjalon
2017-10-09  5:01         ` santosh
2017-10-09  5:46           ` santosh
2017-10-09  8:48             ` Thomas Monjalon
2017-10-09  9:19               ` santosh
2017-10-18 12:17                 ` santosh
2017-10-18 13:45                   ` Thomas Monjalon
2017-10-18 14:02                     ` santosh
2017-10-18 14:26                       ` Thomas Monjalon
2017-10-18 14:36                       ` Jerin Jacob
2017-10-18 15:11                         ` Thomas Monjalon
2017-10-20 15:21       ` [PATCH v4 0/3] Octeontx doc misc Santosh Shukla
2017-10-20 15:21         ` [PATCH v4 1/3] doc: add platform device Santosh Shukla
2017-10-21  9:41           ` Jerin Jacob
2017-10-21 21:09             ` Thomas Monjalon
2017-10-23 14:35           ` Mcnamara, John
2017-10-20 15:21         ` [PATCH v4 2/3] doc: add mempool and octeontx mempool device Santosh Shukla
2017-10-23 14:48           ` Mcnamara, John
2017-10-20 15:21         ` [PATCH v4 3/3] doc: use correct mempool ops handle name Santosh Shukla
2017-10-21  9:42           ` Jerin Jacob
2017-10-23 13:12           ` Mcnamara, John
2017-10-20 16:07         ` [PATCH v4 0/3] Octeontx doc misc Mcnamara, John
2017-10-20 21:10           ` Thomas Monjalon
2017-10-23 14:02             ` Mcnamara, John
2017-11-07  6:59         ` [PATCH v5 0/3] Doc misc Santosh Shukla
2017-11-07  6:59           ` [PATCH v5 1/3] doc: add platform guide Santosh Shukla
2017-11-10 17:42             ` Mcnamara, John
2017-11-07  6:59           ` [PATCH v5 2/3] doc: add mempool and octeontx mempool device Santosh Shukla
2017-11-10 17:43             ` Mcnamara, John
2017-11-07  6:59           ` [PATCH v5 3/3] doc: use correct mempool ops handle name Santosh Shukla
2017-11-10 17:43             ` Mcnamara, John
2017-11-12  3:52           ` [PATCH v5 0/3] Doc misc Thomas Monjalon
2017-10-08 17:16     ` [PATCH v3 00/10] Cavium Octeontx external mempool driver Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171008124011.1577-6-santosh.shukla@caviumnetworks.com \
    --to=santosh.shukla@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacob@caviumnetworks.com \
    --cc=olivier.matz@6wind.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.