All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: "thomas@monjalon.net" <thomas@monjalon.net>,
	Shreyansh Jain <shreyansh.jain@nxp.com>
Subject: [PATCH v2 7/7] raw/dpaa2_qdma: add support for non prefetch mode
Date: Thu, 4 Apr 2019 11:04:28 +0000	[thread overview]
Message-ID: <20190404110215.14410-7-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20190404110215.14410-1-hemant.agrawal@nxp.com>

This patch add support for non prefetch mode in Rx functions.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/raw/dpaa2_qdma/Makefile     |   1 +
 drivers/raw/dpaa2_qdma/dpaa2_qdma.c | 215 +++++++++++++++++++++++++++-
 drivers/raw/dpaa2_qdma/meson.build  |   2 +-
 3 files changed, 212 insertions(+), 6 deletions(-)

diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
index ee95662f1..450c76e76 100644
--- a/drivers/raw/dpaa2_qdma/Makefile
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -21,6 +21,7 @@ LDLIBS += -lrte_eal
 LDLIBS += -lrte_mempool
 LDLIBS += -lrte_mempool_dpaa2
 LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
 LDLIBS += -lrte_ring
 LDLIBS += -lrte_common_dpaax
 
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index 38f329a50..a41c1e385 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -14,6 +14,7 @@
 #include <rte_ring.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
+#include <rte_kvargs.h>
 
 #include <mc/fsl_dpdmai.h>
 #include <portal/dpaa2_hw_pvt.h>
@@ -23,6 +24,8 @@
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
+#define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
+
 /* Dynamic log type identifier */
 int dpaa2_qdma_logtype;
 
@@ -43,6 +46,14 @@ static struct qdma_virt_queue *qdma_vqs;
 /* QDMA per core data */
 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
 
+typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
+					    uint16_t rxq_id,
+					    uint16_t *vq_id,
+					    struct rte_qdma_job **job,
+					    uint16_t nb_jobs);
+
+dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
+
 static struct qdma_hw_queue *
 alloc_hw_queue(uint32_t lcore_id)
 {
@@ -608,12 +619,156 @@ static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,
 	return vqid;
 }
 
+/* Function to receive a QDMA job for a given device and queue*/
 static int
-dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
-		   uint16_t rxq_id,
-		   uint16_t *vq_id,
-		   struct rte_qdma_job **job,
-		   uint16_t nb_jobs)
+dpdmai_dev_dequeue_multijob_prefetch(
+			struct dpaa2_dpdmai_dev *dpdmai_dev,
+			uint16_t rxq_id,
+			uint16_t *vq_id,
+			struct rte_qdma_job **job,
+			uint16_t nb_jobs)
+{
+	struct dpaa2_queue *rxq;
+	struct qbman_result *dq_storage, *dq_storage1 = NULL;
+	struct qbman_pull_desc pulldesc;
+	struct qbman_swp *swp;
+	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
+	uint8_t status, pending;
+	uint8_t num_rx = 0;
+	const struct qbman_fd *fd;
+	uint16_t vqid;
+	int ret, pull_size;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+	fqid = rxq->fqid;
+	q_storage = rxq->q_storage;
+
+	if (unlikely(!q_storage->active_dqs)) {
+		q_storage->toggle = 0;
+		dq_storage = q_storage->dq_storage[q_storage->toggle];
+		q_storage->last_num_pkts = pull_size;
+		qbman_pull_desc_clear(&pulldesc);
+		qbman_pull_desc_set_numframes(&pulldesc,
+					      q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
+		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+			while (!qbman_check_command_complete(
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
+				;
+			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+		}
+		while (1) {
+			if (qbman_swp_pull(swp, &pulldesc)) {
+				DPAA2_QDMA_DP_WARN(
+					"VDQ command not issued.QBMAN busy\n");
+					/* Portal was busy, try again */
+				continue;
+			}
+			break;
+		}
+		q_storage->active_dqs = dq_storage;
+		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
+				   dq_storage);
+	}
+
+	dq_storage = q_storage->active_dqs;
+	rte_prefetch0((void *)(size_t)(dq_storage));
+	rte_prefetch0((void *)(size_t)(dq_storage + 1));
+
+	/* Prepare next pull descriptor. This will give space for the
+	 * prefething done on DQRR entries
+	 */
+	q_storage->toggle ^= 1;
+	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+	qbman_pull_desc_clear(&pulldesc);
+	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
+	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+
+	/* Check if the previous issued command is completed.
+	 * Also seems like the SWP is shared between the Ethernet Driver
+	 * and the SEC driver.
+	 */
+	while (!qbman_check_command_complete(dq_storage))
+		;
+	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
+		clear_swp_active_dqs(q_storage->active_dpio_id);
+
+	pending = 1;
+
+	do {
+		/* Loop until the dq_storage is updated with
+		 * new token by QBMAN
+		 */
+		while (!qbman_check_new_result(dq_storage))
+			;
+		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+		/* Check whether Last Pull command is Expired and
+		 * setting Condition for Loop termination
+		 */
+		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+			pending = 0;
+			/* Check for valid frame. */
+			status = qbman_result_DQ_flags(dq_storage);
+			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+				continue;
+		}
+		fd = qbman_result_DQ_fd(dq_storage);
+
+		vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
+		if (vq_id)
+			vq_id[num_rx] = vqid;
+
+		dq_storage++;
+		num_rx++;
+	} while (pending);
+
+	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+		while (!qbman_check_command_complete(
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+			;
+		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	}
+	/* issue a volatile dequeue command for next pull */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_QDMA_DP_WARN("VDQ command is not issued."
+					  "QBMAN is busy (2)\n");
+			continue;
+		}
+		break;
+	}
+
+	q_storage->active_dqs = dq_storage1;
+	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+
+	return num_rx;
+}
+
+static int
+dpdmai_dev_dequeue_multijob_no_prefetch(
+		struct dpaa2_dpdmai_dev *dpdmai_dev,
+		uint16_t rxq_id,
+		uint16_t *vq_id,
+		struct rte_qdma_job **job,
+		uint16_t nb_jobs)
 {
 	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage;
@@ -958,6 +1113,43 @@ dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
 {
@@ -1060,6 +1252,17 @@ dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
 		goto init_err;
 	}
 
+	if (dpaa2_get_devargs(rawdev->device->devargs,
+		DPAA2_QDMA_NO_PREFETCH)) {
+		/* If no prefetch is configured. */
+		dpdmai_dev_dequeue_multijob =
+				dpdmai_dev_dequeue_multijob_no_prefetch;
+		DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
+	} else {
+		dpdmai_dev_dequeue_multijob =
+			dpdmai_dev_dequeue_multijob_prefetch;
+	}
+
 	if (!dpaa2_coherent_no_alloc_cache) {
 		if (dpaa2_svr_family == SVR_LX2160A) {
 			dpaa2_coherent_no_alloc_cache =
@@ -1139,6 +1342,8 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 };
 
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
+	"no_prefetch=<int> ");
 
 RTE_INIT(dpaa2_qdma_init_log)
 {
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
index 2a4b69c16..1577946fa 100644
--- a/drivers/raw/dpaa2_qdma/meson.build
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -4,7 +4,7 @@
 version = 2
 
 build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
-deps += ['rawdev', 'mempool_dpaa2', 'ring']
+deps += ['rawdev', 'mempool_dpaa2', 'ring', 'kvargs']
 sources = files('dpaa2_qdma.c')
 
 allow_experimental_apis = true
-- 
2.17.1

  parent reply	other threads:[~2019-04-04 11:04 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-26 12:18 [PATCH 1/5] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-03-26 12:18 ` [PATCH 2/5] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-03-26 12:18 ` [PATCH 3/5] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-03-26 12:18 ` [PATCH 4/5] raw/dpaa2_qdma: add rbp " Hemant Agrawal
2019-03-29 13:53   ` Thomas Monjalon
2019-03-26 12:18 ` [PATCH 5/5] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-03-29 13:53 ` [PATCH 1/5] raw/dpaa2_qdma: remove experimental tag from APIs Thomas Monjalon
2019-04-01 14:14   ` Hemant Agrawal
2019-04-04 11:04 ` [PATCH v2 1/7] config: increase the num of rawdev to be 64 Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 2/7] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 3/7] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 4/7] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 5/7] raw/dpaa2_qdma: add rbp " Hemant Agrawal
2019-04-04 11:04   ` [PATCH v2 6/7] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-04-04 11:04   ` Hemant Agrawal [this message]
2019-04-04 11:07   ` [PATCH v2 1/7] config: increase the num of rawdev to be 64 Richardson, Bruce
2019-04-04 11:52     ` Hemant Agrawal
2019-04-04 11:50   ` [PATCH v3 " Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 2/7] raw/dpaa2_qdma: remove experimental tag from APIs Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 3/7] raw/dpaa2_qdma: fix to support multiprocess execution Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 4/7] raw/dpaa2_qdma: add burst mode support Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 5/7] raw/dpaa2_qdma: add rbp " Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 6/7] raw/dpaa2x: remove rte logs from data path Hemant Agrawal
2019-04-04 11:50     ` [PATCH v3 7/7] raw/dpaa2_qdma: add support for non prefetch mode Hemant Agrawal
2019-04-04 13:04     ` [PATCH v3 1/7] config: increase the num of rawdev to be 64 Bruce Richardson
2019-04-04 23:07       ` [dpdk-dev] " Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190404110215.14410-7-hemant.agrawal@nxp.com \
    --to=hemant.agrawal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=shreyansh.jain@nxp.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.