All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, nipun.gupta@nxp.com, hemant.agrawal@nxp.com
Cc: thomas.monjalon@6wind.com, Gagandeep Singh <g.singh@nxp.com>
Subject: [dpdk-dev] [PATCH 1/7] raw/dpaa2_qdma: change DPAA2 QDMA APIs to rawdev ops
Date: Mon,  7 Sep 2020 17:25:58 +0800	[thread overview]
Message-ID: <1599470764-30569-2-git-send-email-g.singh@nxp.com> (raw)
In-Reply-To: <1599470764-30569-1-git-send-email-g.singh@nxp.com>

dpaa2_qdma was partially using direct pmd APIs.
This patch changes that and adapt the driver to use
more of the rawdev APIs

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/raw/dpaa2_qdma/dpaa2_qdma.c         | 331 ++++++++++++++--------------
 drivers/raw/dpaa2_qdma/dpaa2_qdma.h         |   3 +-
 drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h | 207 ++---------------
 3 files changed, 187 insertions(+), 354 deletions(-)

diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index 0b9c4e3..a2ee6cc 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
  */
 
 #include <string.h>
@@ -30,7 +30,7 @@ uint32_t dpaa2_coherent_no_alloc_cache;
 uint32_t dpaa2_coherent_alloc_cache;
 
 /* QDMA device */
-static struct qdma_device qdma_dev;
+static struct qdma_device q_dev;
 
 /* QDMA H/W queues list */
 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
@@ -51,9 +51,11 @@ typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
 
 dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
 
-typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,
+typedef uint16_t (dpdmai_dev_get_job_t)(struct qdma_device *qdma_dev,
+					const struct qbman_fd *fd,
 					struct rte_qdma_job **job);
-typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,
+typedef int (dpdmai_dev_set_fd_t)(struct qdma_device *qdma_dev,
+				  struct qbman_fd *fd,
 				  struct rte_qdma_job *job,
 				  struct rte_qdma_rbp *rbp,
 				  uint16_t vq_id);
@@ -201,10 +203,12 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 	DPAA2_SET_FLE_FIN(fle);
 }
 
-static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
-					struct rte_qdma_job *job,
-					struct rte_qdma_rbp *rbp,
-					uint16_t vq_id)
+static inline int dpdmai_dev_set_fd_us(
+				struct qdma_device *qdma_dev __rte_unused,
+				struct qbman_fd *fd,
+				struct rte_qdma_job *job,
+				struct rte_qdma_rbp *rbp,
+				uint16_t vq_id)
 {
 	struct rte_qdma_job **ppjob;
 	size_t iova;
@@ -230,7 +234,8 @@ static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
 					   job->len, fd);
 	return ret;
 }
-static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
+static inline int dpdmai_dev_set_fd_lf(struct qdma_device *qdma_dev,
+					struct qbman_fd *fd,
 					struct rte_qdma_job *job,
 					struct rte_qdma_rbp *rbp,
 					uint16_t vq_id)
@@ -242,7 +247,7 @@ static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
 	 * Get an FLE/SDD from FLE pool.
 	 * Note: IO metadata is before the FLE and SDD memory.
 	 */
-	ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));
+	ret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&ppjob));
 	if (ret) {
 		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
 		return ret;
@@ -266,8 +271,10 @@ static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
 	return 0;
 }
 
-static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
-					struct rte_qdma_job **job)
+static inline uint16_t dpdmai_dev_get_job_us(
+				struct qdma_device *qdma_dev __rte_unused,
+				const struct qbman_fd *fd,
+				struct rte_qdma_job **job)
 {
 	uint16_t vqid;
 	size_t iova;
@@ -288,8 +295,9 @@ static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
 	return vqid;
 }
 
-static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
-					struct rte_qdma_job **job)
+static inline uint16_t dpdmai_dev_get_job_lf(struct qdma_device *qdma_dev,
+					     const struct qbman_fd *fd,
+					     struct rte_qdma_job **job)
 {
 	struct rte_qdma_job **ppjob;
 	uint16_t vqid;
@@ -307,7 +315,7 @@ static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
 	vqid = (*job)->vq_id;
 
 	/* Free FLE to the pool */
-	rte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);
+	rte_mempool_put(qdma_dev->fle_pool, (void *)ppjob);
 
 	return vqid;
 }
@@ -341,7 +349,7 @@ free_hw_queue(struct qdma_hw_queue *queue)
 
 
 static struct qdma_hw_queue *
-get_hw_queue(uint32_t lcore_id)
+get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
 {
 	struct qdma_per_core_info *core_info;
 	struct qdma_hw_queue *queue, *temp;
@@ -357,7 +365,7 @@ get_hw_queue(uint32_t lcore_id)
 	 * Allocate a HW queue if there are less queues
 	 * than maximum per core queues configured
 	 */
-	if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+	if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
 		queue = alloc_hw_queue(lcore_id);
 		if (queue) {
 			core_info->hw_queues[num_hw_queues] = queue;
@@ -416,41 +424,41 @@ put_hw_queue(struct qdma_hw_queue *queue)
 	}
 }
 
-int
-rte_qdma_init(void)
+static int
+dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
+		    __rte_unused const char *attr_name,
+		    uint64_t *attr_value)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
+
 	DPAA2_QDMA_FUNC_TRACE();
 
-	rte_spinlock_init(&qdma_dev.lock);
+	qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
 
 	return 0;
 }
 
-void
-rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
-}
-
-int
-rte_qdma_reset(void)
+static int
+dpaa2_qdma_reset(struct rte_rawdev *rawdev)
 {
 	struct qdma_hw_queue *queue;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev.state == 1) {
+	if (qdma_dev->state == 1) {
 		DPAA2_QDMA_ERR(
 			"Device is in running state. Stop before reset.");
 		return -EBUSY;
 	}
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
-	for (i = 0; i < qdma_dev.max_vqs; i++) {
+	for (i = 0; i < qdma_dev->max_vqs; i++) {
 		if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
 		    qdma_vqs[i].num_dequeues))
 			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
@@ -462,7 +470,7 @@ rte_qdma_reset(void)
 		queue->num_users = 0;
 
 	/* Reset and free virtual queues */
-	for (i = 0; i < qdma_dev.max_vqs; i++) {
+	for (i = 0; i < qdma_dev->max_vqs; i++) {
 		if (qdma_vqs[i].status_ring)
 			rte_ring_free(qdma_vqs[i].status_ring);
 	}
@@ -475,43 +483,39 @@ rte_qdma_reset(void)
 		sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
 
 	/* Free the FLE pool */
-	if (qdma_dev.fle_pool)
-		rte_mempool_free(qdma_dev.fle_pool);
+	if (qdma_dev->fle_pool)
+		rte_mempool_free(qdma_dev->fle_pool);
 
 	/* Reset QDMA device structure */
-	qdma_dev.mode = RTE_QDMA_MODE_HW;
-	qdma_dev.max_hw_queues_per_core = 0;
-	qdma_dev.fle_pool = NULL;
-	qdma_dev.fle_pool_count = 0;
-	qdma_dev.max_vqs = 0;
+	qdma_dev->mode = RTE_QDMA_MODE_HW;
+	qdma_dev->max_hw_queues_per_core = 0;
+	qdma_dev->fle_pool = NULL;
+	qdma_dev->fle_pool_count = 0;
+	qdma_dev->max_vqs = 0;
 
 	return 0;
 }
 
-int
-rte_qdma_configure(struct rte_qdma_config *qdma_config)
+static int
+dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
+			 rte_rawdev_obj_t config)
 {
-	int ret;
 	char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
+	struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev.state == 1) {
+	if (qdma_dev->state == 1) {
 		DPAA2_QDMA_ERR(
 			"Device is in running state. Stop before config.");
 		return -1;
 	}
 
-	/* Reset the QDMA device */
-	ret = rte_qdma_reset();
-	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		return ret;
-	}
-
 	/* Set mode */
-	qdma_dev.mode = qdma_config->mode;
+	qdma_dev->mode = qdma_config->mode;
 
 	/* Set max HW queue per core */
 	if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
@@ -519,7 +523,7 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)
 			       MAX_HW_QUEUE_PER_CORE);
 		return -EINVAL;
 	}
-	qdma_dev.max_hw_queues_per_core =
+	qdma_dev->max_hw_queues_per_core =
 		qdma_config->max_hw_queues_per_core;
 
 	/* Allocate Virtual Queues */
@@ -530,24 +534,24 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)
 		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
 		return -ENOMEM;
 	}
-	qdma_dev.max_vqs = qdma_config->max_vqs;
+	qdma_dev->max_vqs = qdma_config->max_vqs;
 
 	/* Allocate FLE pool; just append PID so that in case of
 	 * multiprocess, the pool's don't collide.
 	 */
 	snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
 		 getpid());
-	qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
+	qdma_dev->fle_pool = rte_mempool_create(fle_pool_name,
 			qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
 			QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
 			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev.fle_pool) {
+	if (!qdma_dev->fle_pool) {
 		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
 		rte_free(qdma_vqs);
 		qdma_vqs = NULL;
 		return -ENOMEM;
 	}
-	qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+	qdma_dev->fle_pool_count = qdma_config->fle_pool_count;
 
 	if (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {
 		dpdmai_dev_get_job = dpdmai_dev_get_job_us;
@@ -559,57 +563,67 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)
 	return 0;
 }
 
-int
-rte_qdma_start(void)
+static int
+dpaa2_qdma_start(struct rte_rawdev *rawdev)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev.state = 1;
+	qdma_dev->state = 1;
 
 	return 0;
 }
 
-int
-rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+static int
+dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,
+			  __rte_unused uint16_t queue_id,
+			  rte_rawdev_obj_t queue_conf)
 {
 	char ring_name[32];
 	int i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct rte_qdma_queue_config *q_config =
+		(struct rte_qdma_queue_config *)queue_conf;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	rte_spinlock_lock(&qdma_dev.lock);
+	rte_spinlock_lock(&qdma_dev->lock);
 
 	/* Get a free Virtual Queue */
-	for (i = 0; i < qdma_dev.max_vqs; i++) {
+	for (i = 0; i < qdma_dev->max_vqs; i++) {
 		if (qdma_vqs[i].in_use == 0)
 			break;
 	}
 
 	/* Return in case no VQ is free */
-	if (i == qdma_dev.max_vqs) {
-		rte_spinlock_unlock(&qdma_dev.lock);
+	if (i == qdma_dev->max_vqs) {
+		rte_spinlock_unlock(&qdma_dev->lock);
 		DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
 		return -ENODEV;
 	}
 
-	if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
-			(flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+	if (qdma_dev->mode == RTE_QDMA_MODE_HW ||
+			(q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
 		/* Allocate HW queue for a VQ */
-		qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+		qdma_vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);
 		qdma_vqs[i].exclusive_hw_queue = 1;
 	} else {
 		/* Allocate a Ring for Virutal Queue in VQ mode */
 		snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
 		qdma_vqs[i].status_ring = rte_ring_create(ring_name,
-			qdma_dev.fle_pool_count, rte_socket_id(), 0);
+			qdma_dev->fle_pool_count, rte_socket_id(), 0);
 		if (!qdma_vqs[i].status_ring) {
 			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			rte_spinlock_unlock(&qdma_dev.lock);
+			rte_spinlock_unlock(&qdma_dev->lock);
 			return rte_errno;
 		}
 
 		/* Get a HW queue (shared) for a VQ */
-		qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+		qdma_vqs[i].hw_queue = get_hw_queue(qdma_dev,
+						    q_config->lcore_id);
 		qdma_vqs[i].exclusive_hw_queue = 0;
 	}
 
@@ -618,28 +632,18 @@ rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
 		if (qdma_vqs[i].status_ring)
 			rte_ring_free(qdma_vqs[i].status_ring);
 		qdma_vqs[i].status_ring = NULL;
-		rte_spinlock_unlock(&qdma_dev.lock);
+		rte_spinlock_unlock(&qdma_dev->lock);
 		return -ENODEV;
 	}
 
 	qdma_vqs[i].in_use = 1;
-	qdma_vqs[i].lcore_id = lcore_id;
+	qdma_vqs[i].lcore_id = q_config->lcore_id;
 	memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
-	rte_spinlock_unlock(&qdma_dev.lock);
-
-	return i;
-}
-
-/*create vq for route-by-port*/
-int
-rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
-			struct rte_qdma_rbp *rbp)
-{
-	int i;
-
-	i = rte_qdma_vq_create(lcore_id, flags);
+	rte_spinlock_unlock(&qdma_dev->lock);
 
-	memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
+	if (q_config->rbp != NULL)
+		memcpy(&qdma_vqs[i].rbp, q_config->rbp,
+		       sizeof(struct rte_qdma_rbp));
 
 	return i;
 }
@@ -688,7 +692,7 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
 			dpaa2_eqcr_size : nb_jobs;
 
 		for (loop = 0; loop < num_to_send; loop++) {
-			ret = dpdmai_dev_set_fd(&fd[loop],
+			ret = dpdmai_dev_set_fd(dpdmai_dev->qdma_dev, &fd[loop],
 						job[num_tx], rbp, vq_id);
 			if (ret < 0) {
 				/* Set nb_jobs to loop, so outer while loop
@@ -723,12 +727,14 @@ dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
 	return num_tx;
 }
 
-int
-rte_qdma_vq_enqueue_multi(uint16_t vq_id,
-			  struct rte_qdma_job **job,
-			  uint16_t nb_jobs)
+static int
+dpaa2_qdma_enqueue(__rte_unused struct rte_rawdev *rawdev,
+		  __rte_unused struct rte_rawdev_buf **buffers,
+		  unsigned int nb_jobs,
+		  rte_rawdev_obj_t context)
 {
-	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+	struct rte_qdma_enqdeq *e_context = (struct rte_qdma_enqdeq *)context;
+	struct qdma_virt_queue *qdma_vq = &qdma_vqs[e_context->vq_id];
 	struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
 	int ret;
@@ -736,15 +742,15 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,
 	/* Return error in case of wrong lcore_id */
 	if (rte_lcore_id() != qdma_vq->lcore_id) {
 		DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
-				vq_id);
+				e_context->vq_id);
 		return -EINVAL;
 	}
 
 	ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
 				 qdma_pq->queue_id,
-				 vq_id,
+				 e_context->vq_id,
 				 &qdma_vq->rbp,
-				 job,
+				 e_context->job,
 				 nb_jobs);
 	if (ret < 0) {
 		DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
@@ -756,13 +762,6 @@ rte_qdma_vq_enqueue_multi(uint16_t vq_id,
 	return ret;
 }
 
-int
-rte_qdma_vq_enqueue(uint16_t vq_id,
-		    struct rte_qdma_job *job)
-{
-	return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
-}
-
 /* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob_prefetch(
@@ -877,7 +876,8 @@ dpdmai_dev_dequeue_multijob_prefetch(
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 
-		vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+		vqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,
+					  &job[num_rx]);
 		if (vq_id)
 			vq_id[num_rx] = vqid;
 
@@ -993,7 +993,8 @@ dpdmai_dev_dequeue_multijob_no_prefetch(
 			}
 			fd = qbman_result_DQ_fd(dq_storage);
 
-			vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+			vqid = dpdmai_dev_get_job(dpdmai_dev->qdma_dev, fd,
+						  &job[num_rx]);
 			if (vq_id)
 				vq_id[num_rx] = vqid;
 
@@ -1008,21 +1009,24 @@ dpdmai_dev_dequeue_multijob_no_prefetch(
 	return num_rx;
 }
 
-int
-rte_qdma_vq_dequeue_multi(uint16_t vq_id,
-			  struct rte_qdma_job **job,
-			  uint16_t nb_jobs)
+static int
+dpaa2_qdma_dequeue(__rte_unused struct rte_rawdev *rawdev,
+		   __rte_unused struct rte_rawdev_buf **buffers,
+		   unsigned int nb_jobs,
+		   rte_rawdev_obj_t cntxt)
 {
-	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+	struct rte_qdma_enqdeq *context = (struct rte_qdma_enqdeq *)cntxt;
+	struct qdma_virt_queue *qdma_vq = &qdma_vqs[context->vq_id];
 	struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
 	struct qdma_virt_queue *temp_qdma_vq;
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
-	int ring_count, ret = 0, i;
+	int ret = 0, i;
+	unsigned int ring_count;
 
 	/* Return error in case of wrong lcore_id */
 	if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
 		DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
-				vq_id);
+				context->vq_id);
 		return -1;
 	}
 
@@ -1036,7 +1040,7 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,
 	if (qdma_vq->exclusive_hw_queue) {
 		/* In case of exclusive queue directly fetch from HW queue */
 		ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
-					 NULL, job, nb_jobs);
+					 NULL, context->job, nb_jobs);
 		if (ret < 0) {
 			DPAA2_QDMA_ERR(
 				"Dequeue from DPDMAI device failed: %d", ret);
@@ -1055,11 +1059,11 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,
 			/* TODO - How to have right budget */
 			ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
 					qdma_pq->queue_id,
-					temp_vq_id, job, nb_jobs);
+					temp_vq_id, context->job, nb_jobs);
 			for (i = 0; i < ret; i++) {
 				temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
 				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(job[i]));
+					(void *)(context->job[i]));
 			}
 			ring_count = rte_ring_count(
 					qdma_vq->status_ring);
@@ -1070,7 +1074,8 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,
 			 * to provide to the user
 			 */
 			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-					(void **)job, ring_count, NULL);
+						    (void **)context->job,
+						    ring_count, NULL);
 			if (ret)
 				qdma_vq->num_dequeues += ret;
 		}
@@ -1079,19 +1084,6 @@ rte_qdma_vq_dequeue_multi(uint16_t vq_id,
 	return ret;
 }
 
-struct rte_qdma_job *
-rte_qdma_vq_dequeue(uint16_t vq_id)
-{
-	int ret;
-	struct rte_qdma_job *job = NULL;
-
-	ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
-	if (ret < 0)
-		DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
-
-	return job;
-}
-
 void
 rte_qdma_vq_stats(uint16_t vq_id,
 		  struct rte_qdma_vq_stats *vq_status)
@@ -1108,9 +1100,13 @@ rte_qdma_vq_stats(uint16_t vq_id,
 	}
 }
 
-int
-rte_qdma_vq_destroy(uint16_t vq_id)
+static int
+dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,
+			 uint16_t vq_id)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
 	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
 
 	DPAA2_QDMA_FUNC_TRACE();
@@ -1119,7 +1115,7 @@ rte_qdma_vq_destroy(uint16_t vq_id)
 	if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
 		return -EBUSY;
 
-	rte_spinlock_lock(&qdma_dev.lock);
+	rte_spinlock_lock(&qdma_dev->lock);
 
 	if (qdma_vq->exclusive_hw_queue)
 		free_hw_queue(qdma_vq->hw_queue);
@@ -1132,57 +1128,44 @@ rte_qdma_vq_destroy(uint16_t vq_id)
 
 	memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
 
-	rte_spinlock_unlock(&qdma_dev.lock);
+	rte_spinlock_unlock(&qdma_dev->lock);
 
 	return 0;
 }
 
-int
-rte_qdma_vq_destroy_rbp(uint16_t vq_id)
+static void
+dpaa2_qdma_stop(struct rte_rawdev *rawdev)
 {
-	struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+	struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case there are pending jobs on any VQ, return -EBUSY */
-	if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
-		return -EBUSY;
-
-	rte_spinlock_lock(&qdma_dev.lock);
-
-	if (qdma_vq->exclusive_hw_queue) {
-		free_hw_queue(qdma_vq->hw_queue);
-	} else {
-		if (qdma_vqs->status_ring)
-			rte_ring_free(qdma_vqs->status_ring);
-
-		put_hw_queue(qdma_vq->hw_queue);
-	}
-
-	memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
-
-	rte_spinlock_unlock(&qdma_dev.lock);
-
-	return 0;
+	qdma_dev->state = 0;
 }
 
-void
-rte_qdma_stop(void)
+static int
+dpaa2_qdma_close(struct rte_rawdev *rawdev)
 {
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev.state = 0;
-}
-
-void
-rte_qdma_destroy(void)
-{
-	DPAA2_QDMA_FUNC_TRACE();
+	dpaa2_qdma_reset(rawdev);
 
-	rte_qdma_reset();
+	return 0;
 }
 
-static const struct rte_rawdev_ops dpaa2_qdma_ops;
+static struct rte_rawdev_ops dpaa2_qdma_ops = {
+	.dev_configure            = dpaa2_qdma_configure,
+	.dev_start                = dpaa2_qdma_start,
+	.dev_stop                 = dpaa2_qdma_stop,
+	.dev_reset                = dpaa2_qdma_reset,
+	.dev_close                = dpaa2_qdma_close,
+	.queue_setup		  = dpaa2_qdma_queue_setup,
+	.queue_release		  = dpaa2_qdma_queue_release,
+	.attr_get		  = dpaa2_qdma_attr_get,
+	.enqueue_bufs		  = dpaa2_qdma_enqueue,
+	.dequeue_bufs		  = dpaa2_qdma_dequeue,
+};
 
 static int
 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
@@ -1204,7 +1187,7 @@ add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
 		queue->queue_id = i;
 
 		TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
-		qdma_dev.num_hw_queues++;
+		dpdmai_dev->qdma_dev->num_hw_queues++;
 	}
 
 	return 0;
@@ -1313,6 +1296,7 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
 	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+	dpdmai_dev->qdma_dev = &q_dev;
 	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
 			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
@@ -1427,6 +1411,8 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
 
 	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
 
+	rte_spinlock_init(&dpdmai_dev->qdma_dev->lock);
+
 	return 0;
 init_err:
 	dpaa2_dpdmai_dev_uninit(rawdev);
@@ -1462,6 +1448,13 @@ rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 		return ret;
 	}
 
+	/* Reset the QDMA device */
+	ret = dpaa2_qdma_reset(rawdev);
+	if (ret) {
+		DPAA2_QDMA_ERR("Resetting QDMA failed");
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
index 0176380..3c112d2 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
  */
 
 #ifndef __DPAA2_QDMA_H__
@@ -173,6 +173,7 @@ struct dpaa2_dpdmai_dev {
 	struct dpaa2_queue rx_queue[DPAA2_DPDMAI_MAX_QUEUES];
 	/** TX queues */
 	struct dpaa2_queue tx_queue[DPAA2_DPDMAI_MAX_QUEUES];
+	struct qdma_device *qdma_dev;
 };
 
 #endif /* __DPAA2_QDMA_H__ */
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
index 4e1268c..71894d3 100644
--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -1,10 +1,12 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
  */
 
 #ifndef __RTE_PMD_DPAA2_QDMA_H__
 #define __RTE_PMD_DPAA2_QDMA_H__
 
+#include <rte_rawdev.h>
+
 /**
  * @file
  *
@@ -154,150 +156,29 @@ struct rte_qdma_job {
 	uint16_t vq_id;
 };
 
-/**
- * Initialize the QDMA device.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-int
-rte_qdma_init(void);
-
-/**
- * Get the QDMA attributes.
- *
- * @param qdma_attr
- *   QDMA attributes providing total number of hw queues etc.
- */
-void
-rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
-
-/**
- * Reset the QDMA device. This API will completely reset the QDMA
- * device, bringing it to original state as if only rte_qdma_init() API
- * has been called.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-int
-rte_qdma_reset(void);
-
-/**
- * Configure the QDMA device.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-int
-rte_qdma_configure(struct rte_qdma_config *qdma_config);
-
-/**
- * Start the QDMA device.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-int
-rte_qdma_start(void);
-
-/**
- * Create a Virtual Queue on a particular lcore id.
- * This API can be called from any thread/core. User can create/destroy
- * VQ's at runtime.
- *
- * @param lcore_id
- *   LCORE ID on which this particular queue would be associated with.
- * @param flags
- *  RTE_QDMA_VQ_ flags. See macro definitions.
- *
- * @returns
- *   - >= 0: Virtual queue ID.
- *   - <0: Error code.
- */
-int
-rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
-
-/*create vq for route-by-port*/
-int
-rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
-			struct rte_qdma_rbp *rbp);
-
-/**
- * Enqueue multiple jobs to a Virtual Queue.
- * If the enqueue is successful, the H/W will perform DMA operations
- * on the basis of the QDMA jobs provided.
- *
- * @param vq_id
- *   Virtual Queue ID.
- * @param job
- *   List of QDMA Jobs containing relevant information related to DMA.
- * @param nb_jobs
- *   Number of QDMA jobs provided by the user.
- *
- * @returns
- *   - >=0: Number of jobs successfully submitted
- *   - <0: Error code.
- */
-int
-rte_qdma_vq_enqueue_multi(uint16_t vq_id,
-			  struct rte_qdma_job **job,
-			  uint16_t nb_jobs);
-
-/**
- * Enqueue a single job to a Virtual Queue.
- * If the enqueue is successful, the H/W will perform DMA operations
- * on the basis of the QDMA job provided.
- *
- * @param vq_id
- *   Virtual Queue ID.
- * @param job
- *   A QDMA Job containing relevant information related to DMA.
- *
- * @returns
- *   - >=0: Number of jobs successfully submitted
- *   - <0: Error code.
- */
-int
-rte_qdma_vq_enqueue(uint16_t vq_id,
-		    struct rte_qdma_job *job);
+struct rte_qdma_enqdeq {
+	uint16_t vq_id;
+	struct rte_qdma_job **job;
+};
 
-/**
- * Dequeue multiple completed jobs from a Virtual Queue.
- * Provides the list of completed jobs capped by nb_jobs.
- *
- * @param vq_id
- *   Virtual Queue ID.
- * @param job
- *   List of QDMA Jobs returned from the API.
- * @param nb_jobs
- *   Number of QDMA jobs requested for dequeue by the user.
- *
- * @returns
- *   - >=0: Number of jobs successfully received
- *   - <0: Error code.
- */
-int
-rte_qdma_vq_dequeue_multi(uint16_t vq_id,
-			  struct rte_qdma_job **job,
-			  uint16_t nb_jobs);
+struct rte_qdma_queue_config {
+	uint32_t lcore_id;
+	uint32_t flags;
+	struct rte_qdma_rbp *rbp;
+};
 
-/**
- * Dequeue a single completed jobs from a Virtual Queue.
- *
- * @param vq_id
- *   Virtual Queue ID.
- *
- * @returns
- *   - A completed job or NULL if no job is there.
- */
-struct rte_qdma_job *
-rte_qdma_vq_dequeue(uint16_t vq_id);
+#define rte_qdma_info rte_rawdev_info
+#define rte_qdma_start(id) rte_rawdev_start(id)
+#define rte_qdma_reset(id) rte_rawdev_reset(id)
+#define rte_qdma_configure(id, cf) rte_rawdev_configure(id, cf)
+#define rte_qdma_dequeue_buffers(id, buf, num, ctxt) \
+	rte_rawdev_dequeue_buffers(id, buf, num, ctxt)
+#define rte_qdma_enqueue_buffers(id, buf, num, ctxt) \
+	rte_rawdev_enqueue_buffers(id, buf, num, ctxt)
+#define rte_qdma_queue_setup(id, qid, cfg) \
+	rte_rawdev_queue_setup(id, qid, cfg)
 
+/*TODO introduce per queue stats API in rawdew */
 /**
  * Get a Virtual Queue statistics.
  *
@@ -310,46 +191,4 @@ void
 rte_qdma_vq_stats(uint16_t vq_id,
 		  struct rte_qdma_vq_stats *vq_stats);
 
-/**
- * Destroy the Virtual Queue specified by vq_id.
- * This API can be called from any thread/core. User can create/destroy
- * VQ's at runtime.
- *
- * @param vq_id
- *   Virtual Queue ID which needs to be uninitialized.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-int
-rte_qdma_vq_destroy(uint16_t vq_id);
-
-/**
- * Destroy the RBP specific Virtual Queue specified by vq_id.
- * This API can be called from any thread/core. User can create/destroy
- * VQ's at runtime.
- *
- * @param vq_id
- *   RBP based Virtual Queue ID which needs to be uninitialized.
- *
- * @returns
- *   - 0: Success.
- *   - <0: Error code.
- */
-
-int
-rte_qdma_vq_destroy_rbp(uint16_t vq_id);
-/**
- * Stop QDMA device.
- */
-void
-rte_qdma_stop(void);
-
-/**
- * Destroy the QDMA device.
- */
-void
-rte_qdma_destroy(void);
-
 #endif /* __RTE_PMD_DPAA2_QDMA_H__*/
-- 
2.7.4


  reply	other threads:[~2020-09-07  9:27 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-07  9:25 [dpdk-dev] [PATCH 0/7] raw/dpaa2_qdma: driver enhancement Gagandeep Singh
2020-09-07  9:25 ` Gagandeep Singh [this message]
2020-09-25 10:53   ` [dpdk-dev] [PATCH 1/7] raw/dpaa2_qdma: change DPAA2 QDMA APIs to rawdev ops Hemant Agrawal
2020-10-06 21:36   ` Thomas Monjalon
2020-10-14 10:27     ` Gagandeep Singh
2020-10-14 11:37       ` Bruce Richardson
2020-10-14 12:43         ` Ferruh Yigit
2020-09-07  9:25 ` [dpdk-dev] [PATCH 2/7] raw/dpaa2_qdma: memset to only required memory Gagandeep Singh
2020-09-07  9:26 ` [dpdk-dev] [PATCH 3/7] raw/dpaa2_qdma: refactor the code Gagandeep Singh
2020-09-07  9:26 ` [dpdk-dev] [PATCH 4/7] raw/dpaa2_qdma: optimize IOVA conversion Gagandeep Singh
2020-09-07  9:26 ` [dpdk-dev] [PATCH 5/7] raw/dpaa2_qdma: support scatter gather in enqueue Gagandeep Singh
2020-09-07  9:26 ` [dpdk-dev] [PATCH 6/7] raw/dpaa2_qdma: support FLE pool per queue Gagandeep Singh
2020-09-07  9:26 ` [dpdk-dev] [PATCH 7/7] raw/dpaa2_qdma: support enqueue without response wait Gagandeep Singh
2020-09-25 10:54 ` [dpdk-dev] [PATCH 0/7] raw/dpaa2_qdma: driver enhancement Hemant Agrawal
2020-10-15  9:47 ` [dpdk-dev] [PATCH v2 " Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 1/7] raw/dpaa2_qdma: change DPAA2 QDMA APIs to rawdev ops Gagandeep Singh
2020-10-19 11:45     ` Thomas Monjalon
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 2/7] raw/dpaa2_qdma: memset to only required memory Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 3/7] raw/dpaa2_qdma: refactor the code Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 4/7] raw/dpaa2_qdma: optimize IOVA conversion Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 5/7] raw/dpaa2_qdma: support scatter gather in enqueue Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 6/7] raw/dpaa2_qdma: support FLE pool per queue Gagandeep Singh
2020-10-15  9:47   ` [dpdk-dev] [PATCH v2 7/7] raw/dpaa2_qdma: support enqueue without response wait Gagandeep Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1599470764-30569-2-git-send-email-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=nipun.gupta@nxp.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.