All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: "thomas@monjalon.net" <thomas@monjalon.net>,
	Shreyansh Jain <shreyansh.jain@nxp.com>,
	"M.h. Lian" <minghuan.lian@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>
Subject: [PATCH v2 5/7] raw/dpaa2_qdma: add rbp mode support
Date: Thu, 4 Apr 2019 11:04:25 +0000	[thread overview]
Message-ID: <20190404110215.14410-5-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20190404110215.14410-1-hemant.agrawal@nxp.com>

Add support for route by port mode. The route by port
feature in HW helps in translating the PCI address
of connected device.

Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/raw/dpaa2_qdma/Makefile             |   2 +-
 drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 403 +++++++++++++-------
 drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |  65 +++-
 drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h |  60 ++-
 4 files changed, 383 insertions(+), 147 deletions(-)

diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
index 5c75f5fa0..ee95662f1 100644
--- a/drivers/raw/dpaa2_qdma/Makefile
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -26,7 +26,7 @@ LDLIBS += -lrte_common_dpaax
 
 EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
 
-LIBABIVER := 2
+LIBABIVER := 3
 
 #
 # all source are stored in SRCS-y
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index a1351e648..cf1a1aaa6 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -19,13 +19,16 @@
 #include <portal/dpaa2_hw_pvt.h>
 #include <portal/dpaa2_hw_dpio.h>
 
+#include "rte_pmd_dpaa2_qdma.h"
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
-#include "rte_pmd_dpaa2_qdma.h"
 
 /* Dynamic log type identifier */
 int dpaa2_qdma_logtype;
 
+uint32_t dpaa2_coherent_no_alloc_cache;
+uint32_t dpaa2_coherent_alloc_cache;
+
 /* QDMA device */
 static struct qdma_device qdma_dev;
 
@@ -345,14 +348,29 @@ rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
 
 	qdma_vqs[i].in_use = 1;
 	qdma_vqs[i].lcore_id = lcore_id;
-
+	memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
 	rte_spinlock_unlock(&qdma_dev.lock);
 
 	return i;
 }
 
+/*create vq for route-by-port*/
+int
+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
+			struct rte_qdma_rbp *rbp)
+{
+	int i;
+
+	i = rte_qdma_vq_create(lcore_id, flags);
+
+	memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
+
+	return i;
+}
+
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+			struct rte_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags)
 {
@@ -368,10 +386,36 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
 
 	/* source and destination descriptor */
-	DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
-	sdd++;
-	DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd->read_cmd.portid = rbp->sportid;
+		sdd->rbpcmd_simple.pfid = rbp->spfid;
+		sdd->rbpcmd_simple.vfid = rbp->svfid;
+
+		if (rbp->srbp) {
+			sdd->read_cmd.rbp = rbp->srbp;
+			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+		} else {
+			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+		}
+		sdd++;
+		/* destination */
+		sdd->write_cmd.portid = rbp->dportid;
+		sdd->rbpcmd_simple.pfid = rbp->dpfid;
+		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+
+		if (rbp->drbp) {
+			sdd->write_cmd.rbp = rbp->drbp;
+			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+		} else {
+			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		}
 
+	} else {
+		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+		sdd++;
+		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+	}
 	fle++;
 	/* source frame list to source buffer */
 	if (flags & RTE_QDMA_JOB_SRC_PHY) {
@@ -396,31 +440,57 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 	DPAA2_SET_FLE_FIN(fle);
 }
 
-int
-rte_qdma_vq_enqueue_multi(uint16_t vq_id,
-			  struct rte_qdma_job **job,
-			  uint16_t nb_jobs)
+static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd,
+					struct rte_qdma_job *job,
+					struct rte_qdma_rbp *rbp,
+					uint16_t vq_id)
 {
-	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
-	struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
 	struct qdma_io_meta *io_meta;
-	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
-	struct dpaa2_queue *txq;
 	struct qbman_fle *fle;
+	int ret = 0;
+	/*
+	 * Get an FLE/SDD from FLE pool.
+	 * Note: IO metadata is before the FLE and SDD memory.
+	 */
+	ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
+	if (ret) {
+		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		return ret;
+	}
+
+	/* Set the metadata */
+	io_meta->cnxt = (size_t)job;
+	io_meta->id = vq_id;
+
+	fle = (struct qbman_fle *)(io_meta + 1);
+
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+
+	/* Populate FLE */
+	memset(fle, 0, QDMA_FLE_POOL_SIZE);
+	dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
+				job->len, job->flags);
+
+	return 0;
+}
+
+static int
+dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
+			uint16_t txq_id,
+			uint16_t vq_id,
+			struct rte_qdma_rbp *rbp,
+			struct rte_qdma_job **job,
+			uint16_t nb_jobs)
+{
+	struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
+	struct dpaa2_queue *txq;
 	struct qbman_eq_desc eqdesc;
 	struct qbman_swp *swp;
 	int ret;
 	uint32_t num_to_send = 0;
 	uint16_t num_tx = 0;
-	uint16_t num_txed = 0;
-
-	/* Return error in case of wrong lcore_id */
-	if (rte_lcore_id() != qdma_vq->lcore_id) {
-		DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
-				vq_id);
-		return -1;
-	}
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -431,7 +501,7 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	txq = &(dpdmai_dev->tx_queue[qdma_pq->queue_id]);
+	txq = &(dpdmai_dev->tx_queue[txq_id]);
 
 	/* Prepare enqueue descriptor */
 	qbman_eq_desc_clear(&eqdesc);
@@ -439,6 +509,8 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,
 	qbman_eq_desc_set_no_orp(&eqdesc, 0);
 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
 
+	memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
+
 	while (nb_jobs > 0) {
 		uint32_t loop;
 
@@ -446,73 +518,100 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,
 			dpaa2_eqcr_size : nb_jobs;
 
 		for (loop = 0; loop < num_to_send; loop++) {
-			/*
-			 * Get an FLE/SDD from FLE pool.
-			 * Note: IO metadata is before the FLE and SDD memory.
-			 */
-			ret = rte_mempool_get(qdma_dev.fle_pool,
-					(void **)(&io_meta));
-			if (ret) {
-				DPAA2_QDMA_DP_WARN("Me alloc failed for FLE");
-				return ret;
+			ret = dpdmai_dev_set_fd(&fd[loop],
+						job[num_tx], rbp, vq_id);
+			if (ret < 0) {
+				/* Set nb_jobs to loop, so outer while loop
+				 * breaks out.
+				 */
+				nb_jobs = loop;
+				break;
 			}
 
-			/* Set the metadata */
-			io_meta->cnxt = (size_t)job[num_tx];
-			io_meta->id = vq_id;
-
-			fle = (struct qbman_fle *)(io_meta + 1);
-
-			/* populate Frame descriptor */
-			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
-			DPAA2_SET_FD_ADDR(&fd_arr[loop],
-					DPAA2_VADDR_TO_IOVA(fle));
-			DPAA2_SET_FD_COMPOUND_FMT(&fd_arr[loop]);
-			DPAA2_SET_FD_FRC(&fd_arr[loop], QDMA_SER_CTX);
-
-			/* Populate FLE */
-			memset(fle, 0, QDMA_FLE_POOL_SIZE);
-			dpaa2_qdma_populate_fle(fle, job[num_tx]->src,
-						job[num_tx]->dest,
-						job[num_tx]->len,
-						job[num_tx]->flags);
-
 			num_tx++;
 		}
 
 		/* Enqueue the packet to the QBMAN */
 		uint32_t enqueue_loop = 0;
-		while (enqueue_loop < num_to_send) {
+		while (enqueue_loop < loop) {
 			enqueue_loop += qbman_swp_enqueue_multiple(swp,
 						&eqdesc,
-						&fd_arr[enqueue_loop],
+						&fd[enqueue_loop],
 						NULL,
-						num_to_send - enqueue_loop);
+						loop - enqueue_loop);
 		}
-
-		num_txed += num_to_send;
-		nb_jobs -= num_to_send;
+		nb_jobs -= loop;
 	}
-	qdma_vq->num_enqueues += num_txed;
-	return num_txed;
+	return num_tx;
 }
 
 int
-rte_qdma_vq_enqueue(uint16_t vq_id,
-		    struct rte_qdma_job *job)
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+			  struct rte_qdma_job **job,
+			  uint16_t nb_jobs)
 {
+	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+	struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
 	int ret;
 
-	ret = rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
+	DPAA2_QDMA_FUNC_TRACE();
+
+	/* Return error in case of wrong lcore_id */
+	if (rte_lcore_id() != qdma_vq->lcore_id) {
+		DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+				vq_id);
+		return -EINVAL;
+	}
+
+	ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
+				 qdma_pq->queue_id,
+				 vq_id,
+				 &qdma_vq->rbp,
+				 job,
+				 nb_jobs);
 	if (ret < 0) {
 		DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
 		return ret;
 	}
 
-	return 1;
+	qdma_vq->num_enqueues += ret;
+
+	return ret;
+}
+
+int
+rte_qdma_vq_enqueue(uint16_t vq_id,
+		    struct rte_qdma_job *job)
+{
+	return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
+}
+
+static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,
+					struct rte_qdma_job **job)
+{
+	struct qbman_fle *fle;
+	struct qdma_io_meta *io_meta;
+	uint16_t vqid;
+	/*
+	 * Fetch metadata from FLE. job and vq_id were set
+	 * in metadata in the enqueue operation.
+	 */
+	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	io_meta = (struct qdma_io_meta *)(fle) - 1;
+
+	*job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
+	(*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
+			 (DPAA2_GET_FD_FRC(fd) & 0xFF);
+
+	vqid = io_meta->id;
+
+	/* Free FLE to the pool */
+	rte_mempool_put(qdma_dev.fle_pool, io_meta);
+
+	return vqid;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
 		   uint16_t rxq_id,
@@ -520,16 +619,18 @@ dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
 		   struct rte_qdma_job **job,
 		   uint16_t nb_jobs)
 {
-	struct qdma_io_meta *io_meta;
 	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage;
 	struct qbman_pull_desc pulldesc;
-	const struct qbman_fd *fd;
 	struct qbman_swp *swp;
-	struct qbman_fle *fle;
 	uint32_t fqid;
-	uint8_t status;
-	int ret;
+	uint8_t status, pending;
+	uint8_t num_rx = 0;
+	const struct qbman_fd *fd;
+	uint16_t vqid;
+	int ret, next_pull = nb_jobs, num_pulled = 0;
+
+	DPAA2_QDMA_FUNC_TRACE();
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
@@ -539,77 +640,75 @@ dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
+
 	rxq = &(dpdmai_dev->rx_queue[rxq_id]);
-	dq_storage = rxq->q_storage->dq_storage[0];
 	fqid = rxq->fqid;
 
-	/* Prepare dequeue descriptor */
-	qbman_pull_desc_clear(&pulldesc);
-	qbman_pull_desc_set_fq(&pulldesc, fqid);
-	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-		(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-	if (nb_jobs > dpaa2_dqrr_size)
-		qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
-	else
-		qbman_pull_desc_set_numframes(&pulldesc, nb_jobs);
-
-	while (1) {
-		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
-			continue;
+	do {
+		dq_storage = rxq->q_storage->dq_storage[0];
+		/* Prepare dequeue descriptor */
+		qbman_pull_desc_clear(&pulldesc);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
+		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+		if (next_pull > dpaa2_dqrr_size) {
+			qbman_pull_desc_set_numframes(&pulldesc,
+					dpaa2_dqrr_size);
+			next_pull -= dpaa2_dqrr_size;
+		} else {
+			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+			next_pull = 0;
 		}
-		break;
-	}
 
-	rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-	/* Check if the previous issued command is completed. */
-	while (!qbman_check_command_complete(dq_storage))
-		;
+		while (1) {
+			if (qbman_swp_pull(swp, &pulldesc)) {
+				DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+				/* Portal was busy, try again */
+				continue;
+			}
+			break;
+		}
 
-	int num_pulled = 0;
-	int pending = 1;
-	do {
-		/* Loop until the dq_storage is updated with
-		 * new token by QBMAN
-		 */
-		while (!qbman_check_new_result(dq_storage))
+		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+		/* Check if the previous issued command is completed. */
+		while (!qbman_check_command_complete(dq_storage))
 			;
 
-		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-		/* Check whether Last Pull command is Expired and
-		 * setting Condition for Loop termination
-		 */
-		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-			pending = 0;
-			/* Check for valid frame. */
-			status = qbman_result_DQ_flags(dq_storage);
-			if (unlikely((status &
-				QBMAN_DQ_STAT_VALIDFRAME) == 0))
-				continue;
-		}
-		fd = qbman_result_DQ_fd(dq_storage);
+		num_pulled = 0;
+		pending = 1;
 
-		/*
-		 * Fetch metadata from FLE. job and vq_id were set
-		 * in metadata in the enqueue operation.
-		 */
-		fle = (struct qbman_fle *)
-				DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-		io_meta = (struct qdma_io_meta *)(fle) - 1;
-		if (vq_id)
-			vq_id[num_pulled] = io_meta->id;
+		do {
+			/* Loop until dq_storage is updated
+			 * with new token by QBMAN
+			 */
+			while (!qbman_check_new_result(dq_storage))
+				;
+			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+
+			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+				pending = 0;
+				/* Check for valid frame. */
+				status = qbman_result_DQ_flags(dq_storage);
+				if (unlikely((status &
+					QBMAN_DQ_STAT_VALIDFRAME) == 0))
+					continue;
+			}
+			fd = qbman_result_DQ_fd(dq_storage);
 
-		job[num_pulled] = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
-		job[num_pulled]->status = DPAA2_GET_FD_ERR(fd);
+			vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+			if (vq_id)
+				vq_id[num_rx] = vqid;
 
-		/* Free FLE to the pool */
-		rte_mempool_put(qdma_dev.fle_pool, io_meta);
+			dq_storage++;
+			num_rx++;
+			num_pulled++;
 
-		dq_storage++;
-		num_pulled++;
-	} while (pending && (num_pulled <= dpaa2_dqrr_size));
+		} while (pending);
+	/* Last VDQ provided all packets and more packets are requested */
+	} while (next_pull && num_pulled == dpaa2_dqrr_size);
 
-	return num_pulled;
+	return num_rx;
 }
 
 int
@@ -664,9 +763,9 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,
 				temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
 				rte_ring_enqueue(temp_qdma_vq->status_ring,
 					(void *)(job[i]));
-				ring_count = rte_ring_count(
-					qdma_vq->status_ring);
 			}
+			ring_count = rte_ring_count(
+					qdma_vq->status_ring);
 		}
 
 		if (ring_count) {
@@ -743,6 +842,35 @@ rte_qdma_vq_destroy(uint16_t vq_id)
 	return 0;
 }
 
+int
+rte_qdma_vq_destroy_rbp(uint16_t vq_id)
+{
+	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+	DPAA2_QDMA_FUNC_TRACE();
+
+	/* In case there are pending jobs on any VQ, return -EBUSY */
+	if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+		return -EBUSY;
+
+	rte_spinlock_lock(&qdma_dev.lock);
+
+	if (qdma_vq->exclusive_hw_queue) {
+		free_hw_queue(qdma_vq->hw_queue);
+	} else {
+		if (qdma_vqs->status_ring)
+			rte_ring_free(qdma_vqs->status_ring);
+
+		put_hw_queue(qdma_vq->hw_queue);
+	}
+
+	memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+	rte_spinlock_lock(&qdma_dev.lock);
+
+	return 0;
+}
+
 void
 rte_qdma_stop(void)
 {
@@ -939,6 +1067,21 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
 		DPAA2_QDMA_ERR("Adding H/W queue to list failed");
 		goto init_err;
 	}
+
+	if (!dpaa2_coherent_no_alloc_cache) {
+		if (dpaa2_svr_family == SVR_LX2160A) {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+		} else {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_COHERENT_ALLOCATE_CACHE;
+		}
+	}
+
 	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
 
 	return 0;
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
index 0cbe90255..f15dda694 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -22,28 +22,24 @@ struct qdma_io_meta;
 
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
-
+#define DPAA2_RBP_MEM_RW            0x0
 /**
  * Source descriptor command read transaction type for RBP=0:
  * coherent copy of cacheable memory
  */
-#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))
+#define DPAA2_COHERENT_NO_ALLOCATE_CACHE	0xb
+#define DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE	0x7
 /**
  * Destination descriptor command write transaction type for RBP=0:
  * coherent copy of cacheable memory
  */
-#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))
+#define DPAA2_COHERENT_ALLOCATE_CACHE		0x6
+#define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
 #define MAX_HW_QUEUE_PER_CORE		64
 
-/**
- * In case of Virtual Queue mode, this specifies the number of
- * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue
- * in case there is no job present on the Virtual Queue ring.
- */
-#define QDMA_DEQUEUE_BUDGET		64
-
+#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
 /**
  * Represents a QDMA device.
  * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
@@ -90,6 +86,8 @@ struct qdma_virt_queue {
 	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct qdma_hw_queue *hw_queue;
+	/** Route by port */
+	struct rte_qdma_rbp rbp;
 	/** Associated lcore id */
 	uint32_t lcore_id;
 	/** States if this vq is in use or not */
@@ -118,7 +116,7 @@ struct qdma_io_meta {
 	 */
 	uint64_t cnxt;
 	/** VQ ID is stored as a part of metadata of the enqueue command */
-	 uint64_t id;
+	uint64_t id;
 };
 
 /** Source/Destination Descriptor */
@@ -127,9 +125,48 @@ struct qdma_sdd {
 	/** Stride configuration */
 	uint32_t stride;
 	/** Route-by-port command */
-	uint32_t rbpcmd;
-	uint32_t cmd;
-} __attribute__((__packed__));
+	union {
+		uint32_t rbpcmd;
+		struct rbpcmd_st {
+			uint32_t vfid:6;
+			uint32_t rsv4:2;
+			uint32_t pfid:1;
+			uint32_t rsv3:7;
+			uint32_t attr:3;
+			uint32_t rsv2:1;
+			uint32_t at:2;
+			uint32_t vfa:1;
+			uint32_t ca:1;
+			uint32_t tc:3;
+			uint32_t rsv1:5;
+		} rbpcmd_simple;
+	};
+	union {
+		uint32_t cmd;
+		struct rcmd_simple {
+			uint32_t portid:4;
+			uint32_t rsv1:14;
+			uint32_t rbp:1;
+			uint32_t ssen:1;
+			uint32_t rthrotl:4;
+			uint32_t sqos:3;
+			uint32_t ns:1;
+			uint32_t rdtype:4;
+		} read_cmd;
+		struct wcmd_simple {
+			uint32_t portid:4;
+			uint32_t rsv3:10;
+			uint32_t rsv2:2;
+			uint32_t lwc:2;
+			uint32_t rbp:1;
+			uint32_t dsen:1;
+			uint32_t rsv1:4;
+			uint32_t dqos:3;
+			uint32_t ns:1;
+			uint32_t wrttype:4;
+		} write_cmd;
+	};
+} __attribute__ ((__packed__));
 
 /** Represents a DPDMAI raw device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
index e1ccc19e8..bbc66a286 100644
--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -13,7 +13,7 @@
  */
 
 /** Maximum qdma burst size */
-#define RTE_QDMA_BURST_NB_MAX 32
+#define RTE_QDMA_BURST_NB_MAX 256
 
 /** Determines the mode of operation */
 enum {
@@ -73,6 +73,40 @@ struct rte_qdma_config {
 	int fle_pool_count;
 };
 
+struct rte_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:4;
+};
+
 /** Provides QDMA device statistics */
 struct rte_qdma_vq_stats {
 	/** States if this vq has exclusively associated hw queue */
@@ -105,8 +139,10 @@ struct rte_qdma_job {
 	/**
 	 * Status of the transaction.
 	 * This is filled in the dequeue operation by the driver.
+	 * upper 8bits acc_err for route by port.
+	 * lower 8bits fd error
 	 */
-	uint8_t status;
+	uint16_t status;
 };
 
 /**
@@ -177,6 +213,11 @@ rte_qdma_start(void);
 int
 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
 
+/*create vq for route-by-port*/
+int
+rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
+			struct rte_qdma_rbp *rbp);
+
 /**
  * Enqueue multiple jobs to a Virtual Queue.
  * If the enqueue is successful, the H/W will perform DMA operations
@@ -275,6 +316,21 @@ rte_qdma_vq_stats(uint16_t vq_id,
 int
 rte_qdma_vq_destroy(uint16_t vq_id);
 
+/**
+ * Destroy the RBP specific Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ *   RBP based Virtual Queue ID which needs to be deinialized.
+ *
+ * @returns
+ *   - 0: Success.
+ *   - <0: Error code.
+ */
+
+int __rte_experimental
+rte_qdma_vq_destroy_rbp(uint16_t vq_id);
 /**
  * Stop QDMA device.
  */
-- 
2.17.1

  parent reply	other threads:[~2019-04-04 11:04 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-26 12:18 [PATCH 1/5] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-03-26 12:18 ` [PATCH 2/5] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-03-26 12:18 ` [PATCH 3/5] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-03-26 12:18 ` [PATCH 4/5] raw/dpaa2_qdma: add rbp " Hemant Agrawal
2019-03-29 13:53   ` Thomas Monjalon
2019-03-26 12:18 ` [PATCH 5/5] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-03-29 13:53 ` [PATCH 1/5] raw/dpaa2_qdma: remove experimental tag from APIs Thomas Monjalon
2019-04-01 14:14   ` Hemant Agrawal
2019-04-04 11:04 ` [PATCH v2 1/7] config: increase the num of rawdev to be 64 Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 2/7] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 3/7] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 4/7] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-04-04 11:04   ` Hemant Agrawal [this message]
2019-04-04 11:04   ` [PATCH v2 6/7] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 7/7] raw/dpaa2_qdma: add support for non prefetch mode Hemant Agrawal
2019-04-04 11:07   ` [PATCH v2 1/7] config: increase the num of rawdev to be 64 Richardson, Bruce
2019-04-04 11:52     ` Hemant Agrawal
2019-04-04 11:50   ` [PATCH v3 " Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 2/7] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 3/7] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 4/7] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 5/7] raw/dpaa2_qdma: add rbp " Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 6/7] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 7/7] raw/dpaa2_qdma: add support for non prefetch mode Hemant Agrawal
2019-04-04 13:04     ` [PATCH v3 1/7] config: increase the num of rawdev to be 64 Bruce Richardson
2019-04-04 23:07       ` [dpdk-dev] " Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190404110215.14410-5-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=minghuan.lian@nxp.com \
    --cc=sachin.saxena@nxp.com \
    --cc=shreyansh.jain@nxp.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.