All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
To: gage.eads@intel.com, jerin.jacobkollanukkaran@cavium.com,
	harry.van.haaren@intel.com, nikhil.rao@intel.com,
	hemant.agrawal@nxp.com, liang.j.ma@intel.com
Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Subject: [PATCH 01/13] examples/eventdev: add Rx adapter support
Date: Fri,  8 Dec 2017 02:06:53 +0530	[thread overview]
Message-ID: <20171207203705.25020-2-pbhagavatula@caviumnetworks.com> (raw)
In-Reply-To: <20171207203705.25020-1-pbhagavatula@caviumnetworks.com>

Use event Rx adapter for packets Rx instead of explicit producer logic.
Use service run iter function for granular control instead of using
dedicated service lcore.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 examples/eventdev_pipeline_sw_pmd/main.c | 149 +++++++++++++++++--------------
 1 file changed, 82 insertions(+), 67 deletions(-)

diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 5f431d87d..bb87c9544 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,6 +46,7 @@
 #include <rte_cycles.h>
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_service.h>
 
 #define MAX_NUM_STAGES 8
@@ -78,6 +79,7 @@ struct fastpath_data {
 	uint32_t tx_lock;
 	uint32_t sched_lock;
 	uint32_t evdev_service_id;
+	uint32_t rxadptr_service_id;
 	bool rx_single;
 	bool tx_single;
 	bool sched_single;
@@ -105,6 +107,7 @@ struct config_data {
 	unsigned int worker_cq_depth;
 	int16_t next_qid[MAX_NUM_STAGES+2];
 	int16_t qid[MAX_NUM_STAGES];
+	uint8_t rx_adapter_id;
 };
 
 static struct config_data cdata = {
@@ -193,53 +196,12 @@ consumer(void)
 	return 0;
 }
 
-static int
-producer(void)
-{
-	static uint8_t eth_port;
-	struct rte_mbuf *mbufs[BATCH_SIZE+2];
-	struct rte_event ev[BATCH_SIZE+2];
-	uint32_t i, num_ports = prod_data.num_nic_ports;
-	int32_t qid = prod_data.qid;
-	uint8_t dev_id = prod_data.dev_id;
-	uint8_t port_id = prod_data.port_id;
-	uint32_t prio_idx = 0;
-
-	const uint16_t nb_rx = rte_eth_rx_burst(eth_port, 0, mbufs, BATCH_SIZE);
-	if (++eth_port == num_ports)
-		eth_port = 0;
-	if (nb_rx == 0) {
-		rte_pause();
-		return 0;
-	}
-
-	for (i = 0; i < nb_rx; i++) {
-		ev[i].flow_id = mbufs[i]->hash.rss;
-		ev[i].op = RTE_EVENT_OP_NEW;
-		ev[i].sched_type = cdata.queue_type;
-		ev[i].queue_id = qid;
-		ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
-		ev[i].sub_event_type = 0;
-		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
-		ev[i].mbuf = mbufs[i];
-		RTE_SET_USED(prio_idx);
-	}
-
-	const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
-	if (nb_tx != nb_rx) {
-		for (i = nb_tx; i < nb_rx; i++)
-			rte_pktmbuf_free(mbufs[i]);
-	}
-
-	return 0;
-}
-
 static inline void
 schedule_devices(unsigned int lcore_id)
 {
 	if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
 	    rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
-		producer();
+		rte_service_run_iter_on_app_lcore(fdata->rxadptr_service_id, 1);
 		rte_atomic32_clear((rte_atomic32_t *)&(fdata->rx_lock));
 	}
 
@@ -553,6 +515,79 @@ parse_app_args(int argc, char **argv)
 	}
 }
 
+static inline void
+init_rx_adapter(uint16_t nb_ports)
+{
+	int i;
+	int ret;
+	uint8_t evdev_id = 0;
+	uint8_t port_needed = 0;
+	struct rte_event_dev_info dev_info;
+
+	ret = rte_event_dev_info_get(evdev_id, &dev_info);
+
+	struct rte_event_port_conf rx_p_conf = {
+		.dequeue_depth = 8,
+		.enqueue_depth = 8,
+		.new_event_threshold = 1200,
+	};
+
+	if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+		rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+	if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+		rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+	ret = rte_event_eth_rx_adapter_create(cdata.rx_adapter_id, evdev_id,
+			&rx_p_conf);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "failed to create rx adapter[%d]",
+				cdata.rx_adapter_id);
+
+	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+		.ev.sched_type = cdata.queue_type,
+		.ev.queue_id = cdata.qid[0],
+	};
+
+	for (i = 0; i < nb_ports; i++) {
+		uint32_t cap;
+
+		ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"failed to get event rx adapter "
+					"capabilities");
+		/* Producer needs port. */
+		port_needed |= !(cap &
+				RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+
+		ret = rte_event_eth_rx_adapter_queue_add(cdata.rx_adapter_id, i,
+				-1, &queue_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"Failed to add queues to Rx adapter");
+	}
+
+	if (port_needed)
+		prod_data.port_id = cons_data.port_id + 1;
+	prod_data.dev_id = evdev_id;
+	prod_data.qid = cdata.qid[0];
+
+	ret = rte_event_eth_rx_adapter_service_id_get(cdata.rx_adapter_id,
+				&fdata->rxadptr_service_id);
+	if (ret != -ESRCH && ret != 0) {
+		rte_exit(EXIT_FAILURE,
+			"Error getting the service ID for Rx adapter\n");
+	}
+	rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+	rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id, 0);
+
+	ret = rte_event_eth_rx_adapter_start(cdata.rx_adapter_id);
+	if (ret)
+		rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+				cdata.rx_adapter_id);
+
+}
+
 /*
  * Initializes a given port using global settings and with the RX buffers
  * coming from the mbuf_pool passed as a parameter.
@@ -663,15 +698,14 @@ struct port_link {
 };
 
 static int
-setup_eventdev(struct prod_data *prod_data,
-		struct cons_data *cons_data,
+setup_eventdev(struct cons_data *cons_data,
 		struct worker_data *worker_data)
 {
 	const uint8_t dev_id = 0;
 	/* +1 stages is for a SINGLE_LINK TX stage */
 	const uint8_t nb_queues = cdata.num_stages + 1;
-	/* + 2 is one port for producer and one for consumer */
-	const uint8_t nb_ports = cdata.num_workers + 2;
+	/* + 1 for consumer */
+	const uint8_t nb_ports = cdata.num_workers + 1;
 	struct rte_event_dev_config config = {
 			.nb_event_queues = nb_queues,
 			.nb_event_ports = nb_ports,
@@ -818,26 +852,6 @@ setup_eventdev(struct prod_data *prod_data,
 				__LINE__, i);
 		return -1;
 	}
-	/* port for producer, no links */
-	struct rte_event_port_conf rx_p_conf = {
-			.dequeue_depth = 8,
-			.enqueue_depth = 8,
-			.new_event_threshold = 1200,
-	};
-
-	if (rx_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
-		rx_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
-	if (rx_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
-		rx_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
-
-	if (rte_event_port_setup(dev_id, i + 1, &rx_p_conf) < 0) {
-		printf("Error setting up port %d\n", i);
-		return -1;
-	}
-
-	*prod_data = (struct prod_data){.dev_id = dev_id,
-					.port_id = i + 1,
-					.qid = cdata.qid[0] };
 	*cons_data = (struct cons_data){.dev_id = dev_id,
 					.port_id = i };
 
@@ -940,12 +954,13 @@ main(int argc, char **argv)
 	if (worker_data == NULL)
 		rte_panic("rte_calloc failed\n");
 
-	int dev_id = setup_eventdev(&prod_data, &cons_data, worker_data);
+	int dev_id = setup_eventdev(&cons_data, worker_data);
 	if (dev_id < 0)
 		rte_exit(EXIT_FAILURE, "Error setting up eventdev\n");
 
 	prod_data.num_nic_ports = num_ports;
 	init_ports(num_ports);
+	init_rx_adapter(num_ports);
 
 	int worker_idx = 0;
 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
-- 
2.14.1

  reply	other threads:[~2017-12-07 20:38 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-07 20:36 [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
2017-12-07 20:36 ` Pavan Nikhilesh [this message]
2017-12-11 16:15   ` [PATCH 01/13] examples/eventdev: add Rx adapter support Eads, Gage
2017-12-12  8:17     ` Pavan Nikhilesh Bhagavatula
2017-12-12 15:59       ` Eads, Gage
2017-12-07 20:36 ` [PATCH 02/13] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
2017-12-11 16:15   ` Eads, Gage
2017-12-12  8:19     ` Pavan Nikhilesh Bhagavatula
2017-12-07 20:36 ` [PATCH 03/13] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 04/13] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 05/13] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
2017-12-19 11:23   ` Van Haaren, Harry
2017-12-07 20:36 ` [PATCH 06/13] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
2017-12-19 13:26   ` Van Haaren, Harry
2017-12-19 19:01     ` Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
2017-12-19 12:00   ` Van Haaren, Harry
2017-12-19 18:55     ` Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 08/13] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 09/13] examples/eventdev: add all type queue option Pavan Nikhilesh
2017-12-19 13:18   ` Van Haaren, Harry
2017-12-19 19:05     ` Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 10/13] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
2017-12-11 16:45   ` Eads, Gage
2017-12-07 20:37 ` [PATCH 11/13] examples/eventdev: add atq " Pavan Nikhilesh
2017-12-19 13:34   ` Van Haaren, Harry
2017-12-07 20:37 ` [PATCH 12/13] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 13/13] doc: update example eventdev_pipeline Pavan Nikhilesh
2017-12-11 11:29   ` Laatz, Kevin
2018-01-10 11:09 ` [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 02/15] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 04/15] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 06/15] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 07/15] examples/eventdev: modify work cycles Pavan Nikhilesh
2018-01-15 10:14     ` Van Haaren, Harry
2018-01-10 11:10   ` [PATCH v2 08/15] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 09/15] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 10/15] examples/eventdev: add all type queue option Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 11/15] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 12/15] examples/eventdev: add atq " Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 13/15] examples/eventdev: add mempool size configuration Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 14/15] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 15/15] doc: update example eventdev pipeline Pavan Nikhilesh
2018-01-16 11:34     ` Kovacevic, Marko
2018-01-16 10:35   ` [PATCH v2 01/15] examples/eventdev: add Rx adapter support Van Haaren, Harry
2018-01-16 16:12     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171207203705.25020-2-pbhagavatula@caviumnetworks.com \
    --to=pbhagavatula@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacobkollanukkaran@cavium.com \
    --cc=liang.j.ma@intel.com \
    --cc=nikhil.rao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.