All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
To: gage.eads@intel.com, jerin.jacobkollanukkaran@cavium.com,
	harry.van.haaren@intel.com, nikhil.rao@intel.com,
	hemant.agrawal@nxp.com, liang.j.ma@intel.com
Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Subject: [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline
Date: Fri,  8 Dec 2017 02:06:59 +0530	[thread overview]
Message-ID: <20171207203705.25020-8-pbhagavatula@caviumnetworks.com> (raw)
In-Reply-To: <20171207203705.25020-1-pbhagavatula@caviumnetworks.com>

Add worker pipeline when Tx is multi thread safe.
Probe Ethernet dev capabilities and select it it is supported.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 examples/eventdev_pipeline_sw_pmd/Makefile         |   1 +
 examples/eventdev_pipeline_sw_pmd/main.c           |  18 +-
 .../eventdev_pipeline_sw_pmd/pipeline_common.h     |   2 +
 .../eventdev_pipeline_sw_pmd/pipeline_worker_tx.c  | 433 +++++++++++++++++++++
 4 files changed, 452 insertions(+), 2 deletions(-)
 create mode 100644 examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c

diff --git a/examples/eventdev_pipeline_sw_pmd/Makefile b/examples/eventdev_pipeline_sw_pmd/Makefile
index 5e30556fb..59ee9840a 100644
--- a/examples/eventdev_pipeline_sw_pmd/Makefile
+++ b/examples/eventdev_pipeline_sw_pmd/Makefile
@@ -43,6 +43,7 @@ APP = eventdev_pipeline_sw_pmd
 # all source are stored in SRCS-y
 SRCS-y := main.c
 SRCS-y += pipeline_worker_generic.c
+SRCS-y += pipeline_worker_tx.c
 
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index 153467893..3be981c15 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -382,9 +382,20 @@ init_ports(unsigned int num_ports)
 static void
 do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
 {
-	RTE_SET_USED(nb_ethdev);
+	int i;
+	uint8_t mt_unsafe = 0;
 	uint8_t burst = 0;
 
+	for (i = 0; i < nb_ethdev; i++) {
+		struct rte_eth_dev_info dev_info;
+		memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+		rte_eth_dev_info_get(i, &dev_info);
+		/* Check if it is safe ask worker to tx. */
+		mt_unsafe |= !(dev_info.tx_offload_capa &
+				DEV_TX_OFFLOAD_MT_LOCKFREE);
+	}
+
 	struct rte_event_dev_info eventdev_info;
 	memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
 
@@ -392,7 +403,10 @@ do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
 	burst = eventdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE ? 1 :
 		0;
 
-	set_worker_generic_setup_data(&fdata->cap, burst);
+	if (mt_unsafe)
+		set_worker_generic_setup_data(&fdata->cap, burst);
+	else
+		set_worker_tx_setup_data(&fdata->cap, burst);
 }
 
 static void
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
index a5837c99b..0b27d1eb0 100644
--- a/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_common.h
@@ -108,6 +108,7 @@ struct config_data {
 	int dump_dev_signal;
 	unsigned int num_stages;
 	unsigned int worker_cq_depth;
+	unsigned int rx_stride;
 	int16_t next_qid[MAX_NUM_STAGES+2];
 	int16_t qid[MAX_NUM_STAGES];
 	uint8_t rx_adapter_id;
@@ -178,3 +179,4 @@ schedule_devices(unsigned int lcore_id)
 }
 
 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
+void set_worker_tx_setup_data(struct setup_data *caps, bool burst);
diff --git a/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
new file mode 100644
index 000000000..31b7d8936
--- /dev/null
+++ b/examples/eventdev_pipeline_sw_pmd/pipeline_worker_tx.c
@@ -0,0 +1,433 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright 2016 Cavium, Inc.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_common.h"
+
+static __rte_always_inline void
+worker_fwd_event(struct rte_event *ev, uint8_t sched)
+{
+	ev->event_type = RTE_EVENT_TYPE_CPU;
+	ev->op = RTE_EVENT_OP_FORWARD;
+	ev->sched_type = sched;
+}
+
+static __rte_always_inline void
+worker_event_enqueue(const uint8_t dev, const uint8_t port,
+		struct rte_event *ev)
+{
+	while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+		rte_pause();
+}
+
+static __rte_always_inline void
+worker_tx_pkt(struct rte_mbuf *mbuf)
+{
+	while (rte_eth_tx_burst(mbuf->port, 0, &mbuf, 1) != 1)
+		rte_pause();
+}
+
+/* Multi stage Pipeline Workers */
+
+static int
+worker_do_tx(void *arg)
+{
+	struct rte_event ev;
+
+	struct worker_data *data = (struct worker_data *)arg;
+	const uint8_t dev = data->dev_id;
+	const uint8_t port = data->port_id;
+	const uint8_t lst_qid = cdata.num_stages - 1;
+	size_t fwd = 0, received = 0, tx = 0;
+
+
+	while (!fdata->done) {
+
+		if (!rte_event_dequeue_burst(dev, port, &ev, 1, 0)) {
+			rte_pause();
+			continue;
+		}
+
+		received++;
+		const uint8_t cq_id = ev.queue_id % cdata.num_stages;
+
+		if (cq_id >= lst_qid) {
+			if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+				worker_tx_pkt(ev.mbuf);
+				tx++;
+				continue;
+			}
+
+			worker_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+			ev.queue_id = (cq_id == lst_qid) ?
+				cdata.next_qid[ev.queue_id] : ev.queue_id;
+		} else {
+			ev.queue_id = cdata.next_qid[ev.queue_id];
+			worker_fwd_event(&ev, cdata.queue_type);
+		}
+		work(ev.mbuf);
+
+		worker_event_enqueue(dev, port, &ev);
+		fwd++;
+	}
+
+	if (!cdata.quiet)
+		printf("  worker %u thread done. RX=%zu FWD=%zu TX=%zu\n",
+				rte_lcore_id(), received, fwd, tx);
+
+	return 0;
+}
+
+static int
+setup_eventdev_w(struct prod_data *prod_data,
+		struct cons_data *cons_data,
+		struct worker_data *worker_data)
+{
+	RTE_SET_USED(prod_data);
+	RTE_SET_USED(cons_data);
+	uint8_t i;
+	const uint8_t dev_id = 0;
+	const uint8_t nb_ports = cdata.num_workers;
+	uint8_t nb_slots = 0;
+	uint8_t nb_queues = rte_eth_dev_count() * cdata.num_stages;
+
+	struct rte_event_dev_config config = {
+			.nb_event_queues = nb_queues,
+			.nb_event_ports = nb_ports,
+			.nb_events_limit  = 4096,
+			.nb_event_queue_flows = 1024,
+			.nb_event_port_dequeue_depth = 128,
+			.nb_event_port_enqueue_depth = 128,
+	};
+	struct rte_event_port_conf wkr_p_conf = {
+			.dequeue_depth = cdata.worker_cq_depth,
+			.enqueue_depth = 64,
+			.new_event_threshold = 4096,
+	};
+	struct rte_event_queue_conf wkr_q_conf = {
+			.schedule_type = cdata.queue_type,
+			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+			.nb_atomic_flows = 1024,
+			.nb_atomic_order_sequences = 1024,
+	};
+
+	int ret, ndev = rte_event_dev_count();
+
+	if (ndev < 1) {
+		printf("%d: No Eventdev Devices Found\n", __LINE__);
+		return -1;
+	}
+
+
+	struct rte_event_dev_info dev_info;
+	ret = rte_event_dev_info_get(dev_id, &dev_info);
+	printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name);
+
+	if (dev_info.max_event_port_dequeue_depth <
+			config.nb_event_port_dequeue_depth)
+		config.nb_event_port_dequeue_depth =
+				dev_info.max_event_port_dequeue_depth;
+	if (dev_info.max_event_port_enqueue_depth <
+			config.nb_event_port_enqueue_depth)
+		config.nb_event_port_enqueue_depth =
+				dev_info.max_event_port_enqueue_depth;
+
+	ret = rte_event_dev_configure(dev_id, &config);
+	if (ret < 0) {
+		printf("%d: Error configuring device\n", __LINE__);
+		return -1;
+	}
+
+	printf("  Stages:\n");
+	for (i = 0; i < nb_queues; i++) {
+
+		uint8_t slot;
+
+		nb_slots = cdata.num_stages + 1;
+		slot = i % nb_slots;
+		wkr_q_conf.schedule_type = slot == cdata.num_stages ?
+			RTE_SCHED_TYPE_ATOMIC : cdata.queue_type;
+
+		if (rte_event_queue_setup(dev_id, i, &wkr_q_conf) < 0) {
+			printf("%d: error creating qid %d\n", __LINE__, i);
+			return -1;
+		}
+		cdata.qid[i] = i;
+		cdata.next_qid[i] = i+1;
+		if (cdata.enable_queue_priorities) {
+			const uint32_t prio_delta =
+				(RTE_EVENT_DEV_PRIORITY_LOWEST) /
+				nb_slots;
+
+			/* higher priority for queues closer to tx */
+			wkr_q_conf.priority =
+				RTE_EVENT_DEV_PRIORITY_LOWEST - prio_delta *
+				(i % nb_slots);
+		}
+
+		const char *type_str = "Atomic";
+		switch (wkr_q_conf.schedule_type) {
+		case RTE_SCHED_TYPE_ORDERED:
+			type_str = "Ordered";
+			break;
+		case RTE_SCHED_TYPE_PARALLEL:
+			type_str = "Parallel";
+			break;
+		}
+		printf("\tStage %d, Type %s\tPriority = %d\n", i, type_str,
+				wkr_q_conf.priority);
+	}
+
+	printf("\n");
+	if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth)
+		wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth;
+	if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth)
+		wkr_p_conf.enqueue_depth = config.nb_event_port_enqueue_depth;
+
+	/* set up one port per worker, linking to all stage queues */
+	for (i = 0; i < cdata.num_workers; i++) {
+		struct worker_data *w = &worker_data[i];
+		w->dev_id = dev_id;
+		if (rte_event_port_setup(dev_id, i, &wkr_p_conf) < 0) {
+			printf("Error setting up port %d\n", i);
+			return -1;
+		}
+
+		if (rte_event_port_link(dev_id, i, NULL, NULL, 0)
+				!= nb_queues) {
+			printf("%d: error creating link for port %d\n",
+					__LINE__, i);
+			return -1;
+		}
+		w->port_id = i;
+	}
+
+	cdata.rx_stride = nb_slots;
+	ret = rte_event_dev_service_id_get(dev_id,
+				&fdata->evdev_service_id);
+	if (ret != -ESRCH && ret != 0) {
+		printf("Error getting the service ID for sw eventdev\n");
+		return -1;
+	}
+	rte_service_runstate_set(fdata->evdev_service_id, 1);
+	rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
+	if (rte_event_dev_start(dev_id) < 0) {
+		printf("Error starting eventdev\n");
+		return -1;
+	}
+
+	return dev_id;
+}
+
+
+struct rx_adptr_services {
+	uint16_t nb_rx_adptrs;
+	uint32_t *rx_adpt_arr;
+};
+
+static int32_t
+service_rx_adapter(void *arg)
+{
+	int i;
+	struct rx_adptr_services *adptr_services = arg;
+
+	for (i = 0; i < adptr_services->nb_rx_adptrs; i++)
+		rte_service_run_iter_on_app_lcore(
+				adptr_services->rx_adpt_arr[i], 1);
+	return 0;
+}
+
+static void
+init_rx_adapter(uint16_t nb_ports)
+{
+	int i;
+	int ret;
+	uint8_t evdev_id = 0;
+	struct rx_adptr_services *adptr_services = NULL;
+	struct rte_event_dev_info dev_info;
+
+	ret = rte_event_dev_info_get(evdev_id, &dev_info);
+	adptr_services = rte_zmalloc(NULL, sizeof(struct rx_adptr_services), 0);
+
+	struct rte_event_port_conf rx_p_conf = {
+		.dequeue_depth = 8,
+		.enqueue_depth = 8,
+		.new_event_threshold = 1200,
+	};
+
+	if (rx_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth)
+		rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+	if (rx_p_conf.enqueue_depth > dev_info.max_event_port_enqueue_depth)
+		rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+
+	struct rte_event_eth_rx_adapter_queue_conf queue_conf = {
+		.ev.sched_type = cdata.queue_type,
+	};
+
+	for (i = 0; i < nb_ports; i++) {
+		uint32_t cap;
+		uint32_t service_id;
+
+		ret = rte_event_eth_rx_adapter_create(i, evdev_id, &rx_p_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"failed to create rx adapter[%d]",
+					cdata.rx_adapter_id);
+
+		ret = rte_event_eth_rx_adapter_caps_get(evdev_id, i, &cap);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"failed to get event rx adapter "
+					"capabilities");
+
+		queue_conf.ev.queue_id = cdata.rx_stride ?
+			(i * cdata.rx_stride)
+			: (uint8_t)cdata.qid[0];
+
+		ret = rte_event_eth_rx_adapter_queue_add(i, i, -1, &queue_conf);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"Failed to add queues to Rx adapter");
+
+
+		/* Producer needs to be scheduled. */
+		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+			ret = rte_event_eth_rx_adapter_service_id_get(i,
+					&service_id);
+			if (ret != -ESRCH && ret != 0) {
+				rte_exit(EXIT_FAILURE,
+				"Error getting the service ID for rx adptr\n");
+			}
+
+			rte_service_runstate_set(service_id, 1);
+			rte_service_set_runstate_mapped_check(service_id, 0);
+
+			adptr_services->nb_rx_adptrs++;
+			adptr_services->rx_adpt_arr = rte_realloc(
+					adptr_services->rx_adpt_arr,
+					adptr_services->nb_rx_adptrs *
+					sizeof(uint32_t), 0);
+			adptr_services->rx_adpt_arr[
+				adptr_services->nb_rx_adptrs - 1] =
+				service_id;
+		}
+
+		ret = rte_event_eth_rx_adapter_start(i);
+		if (ret)
+			rte_exit(EXIT_FAILURE, "Rx adapter[%d] start failed",
+					cdata.rx_adapter_id);
+	}
+
+	prod_data.dev_id = evdev_id;
+	prod_data.qid = 0;
+
+	if (adptr_services->nb_rx_adptrs) {
+		struct rte_service_spec service;
+
+		memset(&service, 0, sizeof(struct rte_service_spec));
+		snprintf(service.name, sizeof(service.name), "rx_service");
+		service.callback = service_rx_adapter;
+		service.callback_userdata = (void *)adptr_services;
+
+		int32_t ret = rte_service_component_register(&service,
+				&fdata->rxadptr_service_id);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+				"Rx adapter[%d] service register failed",
+				cdata.rx_adapter_id);
+
+		rte_service_runstate_set(fdata->rxadptr_service_id, 1);
+		rte_service_component_runstate_set(fdata->rxadptr_service_id,
+				1);
+		rte_service_set_runstate_mapped_check(fdata->rxadptr_service_id,
+				0);
+	} else
+		rte_free(adptr_services);
+
+
+	if (!adptr_services->nb_rx_adptrs && fdata->cap.consumer_loop == NULL &&
+			(dev_info.event_dev_cap &
+			 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))
+		fdata->cap.schedule_loop = NULL;
+}
+
+static void
+opt_check(void)
+{
+	int i;
+	int ret;
+	uint32_t cap = 0;
+	uint8_t rx_needed = 0;
+	struct rte_event_dev_info eventdev_info;
+
+	memset(&eventdev_info, 0, sizeof(struct rte_event_dev_info));
+	rte_event_dev_info_get(0, &eventdev_info);
+
+	for (i = 0; i < rte_eth_dev_count(); i++) {
+		ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
+		if (ret)
+			rte_exit(EXIT_FAILURE,
+					"failed to get event rx adapter "
+					"capabilities");
+		rx_needed |=
+			!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT);
+	}
+
+	if (cdata.worker_lcore_mask == 0 ||
+			(rx_needed && cdata.rx_lcore_mask == 0) ||
+			(cdata.sched_lcore_mask == 0 &&
+			 !(eventdev_info.event_dev_cap &
+				 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED))) {
+		printf("Core part of pipeline was not assigned any cores. "
+			"This will stall the pipeline, please check core masks "
+			"(use -h for details on setting core masks):\n"
+			"\trx: %"PRIu64"\n\ttx: %"PRIu64"\n\tsched: %"PRIu64
+			"\n\tworkers: %"PRIu64"\n",
+			cdata.rx_lcore_mask, cdata.tx_lcore_mask,
+			cdata.sched_lcore_mask,
+			cdata.worker_lcore_mask);
+		rte_exit(-1, "Fix core masks\n");
+	}
+}
+
+void
+set_worker_tx_setup_data(struct setup_data *caps, bool burst)
+{
+	RTE_SET_USED(burst);
+	caps->worker_loop = worker_do_tx;
+
+	caps->opt_check = opt_check;
+	caps->consumer_loop = NULL;
+	caps->schedule_loop = schedule_devices;
+	caps->eventdev_setup = setup_eventdev_w;
+	caps->rx_adapter_setup = init_rx_adapter;
+}
-- 
2.14.1

  parent reply	other threads:[~2017-12-07 20:39 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-07 20:36 [PATCH 00/13] examples/eventdev: add capability based pipeline support Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 01/13] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2017-12-11 16:15   ` Eads, Gage
2017-12-12  8:17     ` Pavan Nikhilesh Bhagavatula
2017-12-12 15:59       ` Eads, Gage
2017-12-07 20:36 ` [PATCH 02/13] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
2017-12-11 16:15   ` Eads, Gage
2017-12-12  8:19     ` Pavan Nikhilesh Bhagavatula
2017-12-07 20:36 ` [PATCH 03/13] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 04/13] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
2017-12-07 20:36 ` [PATCH 05/13] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
2017-12-19 11:23   ` Van Haaren, Harry
2017-12-07 20:36 ` [PATCH 06/13] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
2017-12-19 13:26   ` Van Haaren, Harry
2017-12-19 19:01     ` Pavan Nikhilesh
2017-12-07 20:36 ` Pavan Nikhilesh [this message]
2017-12-19 12:00   ` [PATCH 07/13] examples/eventdev: add thread safe Tx worker pipeline Van Haaren, Harry
2017-12-19 18:55     ` Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 08/13] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 09/13] examples/eventdev: add all type queue option Pavan Nikhilesh
2017-12-19 13:18   ` Van Haaren, Harry
2017-12-19 19:05     ` Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 10/13] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
2017-12-11 16:45   ` Eads, Gage
2017-12-07 20:37 ` [PATCH 11/13] examples/eventdev: add atq " Pavan Nikhilesh
2017-12-19 13:34   ` Van Haaren, Harry
2017-12-07 20:37 ` [PATCH 12/13] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
2017-12-07 20:37 ` [PATCH 13/13] doc: update example eventdev_pipeline Pavan Nikhilesh
2017-12-11 11:29   ` Laatz, Kevin
2018-01-10 11:09 ` [PATCH v2 01/15] examples/eventdev: add Rx adapter support Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 02/15] examples/eventdev: move common data into pipeline common Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 03/15] examples/eventdev: add framework for caps based pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 04/15] examples/eventdev: add generic worker pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 05/15] examples/eventdev: add ops to check cmdline args Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 06/15] examples/eventdev: add non burst mode generic worker Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 07/15] examples/eventdev: modify work cycles Pavan Nikhilesh
2018-01-15 10:14     ` Van Haaren, Harry
2018-01-10 11:10   ` [PATCH v2 08/15] examples/eventdev: add thread safe Tx worker pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 09/15] examples/eventdev: add burst for thread safe pipeline Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 10/15] examples/eventdev: add all type queue option Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 11/15] examples/eventdev: add single stage pipeline worker Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 12/15] examples/eventdev: add atq " Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 13/15] examples/eventdev: add mempool size configuration Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 14/15] examples/eventdev_pipeline_sw_pmd: rename example Pavan Nikhilesh
2018-01-10 11:10   ` [PATCH v2 15/15] doc: update example eventdev pipeline Pavan Nikhilesh
2018-01-16 11:34     ` Kovacevic, Marko
2018-01-16 10:35   ` [PATCH v2 01/15] examples/eventdev: add Rx adapter support Van Haaren, Harry
2018-01-16 16:12     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171207203705.25020-8-pbhagavatula@caviumnetworks.com \
    --to=pbhagavatula@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerin.jacobkollanukkaran@cavium.com \
    --cc=liang.j.ma@intel.com \
    --cc=nikhil.rao@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.