All of lore.kernel.org
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v5 20/35] event/cnxk: add SSO selftest and dump
Date: Tue, 4 May 2021 05:57:10 +0530	[thread overview]
Message-ID: <20210504002726.525-21-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20210504002726.525-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add selftest to verify sanity of SSO and also add function to
dump internal state of SSO.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 app/test/test_eventdev.c                    |   14 +
 drivers/event/cnxk/cn10k_eventdev.c         |    8 +
 drivers/event/cnxk/cn9k_eventdev.c          |   10 +-
 drivers/event/cnxk/cnxk_eventdev.c          |    8 +
 drivers/event/cnxk/cnxk_eventdev.h          |    5 +
 drivers/event/cnxk/cnxk_eventdev_selftest.c | 1570 +++++++++++++++++++
 drivers/event/cnxk/meson.build              |    1 +
 7 files changed, 1615 insertions(+), 1 deletion(-)
 create mode 100644 drivers/event/cnxk/cnxk_eventdev_selftest.c

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index bcfaa53cb..843d9766b 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1036,6 +1036,18 @@ test_eventdev_selftest_dlb2(void)
 	return test_eventdev_selftest_impl("dlb2_event", "");
 }
 
+static int
+test_eventdev_selftest_cn9k(void)
+{
+	return test_eventdev_selftest_impl("event_cn9k", "");
+}
+
+static int
+test_eventdev_selftest_cn10k(void)
+{
+	return test_eventdev_selftest_impl("event_cn10k", "");
+}
+
 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
@@ -1044,3 +1056,5 @@ REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
 		test_eventdev_selftest_octeontx2);
 REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
 REGISTER_TEST_COMMAND(eventdev_selftest_dlb2, test_eventdev_selftest_dlb2);
+REGISTER_TEST_COMMAND(eventdev_selftest_cn9k, test_eventdev_selftest_cn9k);
+REGISTER_TEST_COMMAND(eventdev_selftest_cn10k, test_eventdev_selftest_cn10k);
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 5b7cd672c..a0c6d32cc 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -401,6 +401,12 @@ cn10k_sso_close(struct rte_eventdev *event_dev)
 	return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
 }
 
+static int
+cn10k_sso_selftest(void)
+{
+	return cnxk_sso_selftest(RTE_STR(event_cn10k));
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -414,9 +420,11 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.port_unlink = cn10k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,
 
+	.dump = cnxk_sso_dump,
 	.dev_start = cn10k_sso_start,
 	.dev_stop = cn10k_sso_stop,
 	.dev_close = cn10k_sso_close,
+	.dev_selftest = cn10k_sso_selftest,
 };
 
 static int
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index f13f50f42..48991e522 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -222,7 +222,7 @@ cn9k_sso_hws_reset(void *arg, void *hws)
 	}
 }
 
-static void
+void
 cn9k_sso_set_rsrc(void *arg)
 {
 	struct cnxk_sso_evdev *dev = arg;
@@ -475,6 +475,12 @@ cn9k_sso_close(struct rte_eventdev *event_dev)
 	return cnxk_sso_close(event_dev, cn9k_sso_hws_unlink);
 }
 
+static int
+cn9k_sso_selftest(void)
+{
+	return cnxk_sso_selftest(RTE_STR(event_cn9k));
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -488,9 +494,11 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.port_unlink = cn9k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,
 
+	.dump = cnxk_sso_dump,
 	.dev_start = cn9k_sso_start,
 	.dev_stop = cn9k_sso_stop,
 	.dev_close = cn9k_sso_close,
+	.dev_selftest = cn9k_sso_selftest,
 };
 
 static int
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index a3900315a..0f084176c 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -326,6 +326,14 @@ cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
 	return 0;
 }
 
+void
+cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+	roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
+}
+
 static void
 cnxk_handle_event(void *arg, struct rte_event event)
 {
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 3011af153..9af04bc3d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -211,5 +211,10 @@ void cnxk_sso_stop(struct rte_eventdev *event_dev,
 		   cnxk_sso_hws_reset_t reset_fn,
 		   cnxk_sso_hws_flush_t flush_fn);
 int cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn);
+int cnxk_sso_selftest(const char *dev_name);
+void cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f);
+
+/* CN9K */
+void cn9k_sso_set_rsrc(void *arg);
 
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_selftest.c b/drivers/event/cnxk/cnxk_eventdev_selftest.c
new file mode 100644
index 000000000..69c15b1d0
--- /dev/null
+++ b/drivers/event/cnxk/cnxk_eventdev_selftest.c
@@ -0,0 +1,1570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_test.h>
+
+#include "cnxk_eventdev.h"
+
+#define NUM_PACKETS (1024)
+#define MAX_EVENTS  (1024)
+#define MAX_STAGES  (255)
+
+#define CNXK_TEST_RUN(setup, teardown, test)                                   \
+	cnxk_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+	uint32_t flow_id;
+	uint8_t event_type;
+	uint8_t sub_event_type;
+	uint8_t sched_type;
+	uint8_t queue;
+	uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+	memset(seqn_list, 0, sizeof(seqn_list));
+	seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+	if (seqn_list_index >= NUM_PACKETS)
+		return -1;
+
+	seqn_list[seqn_list_index++] = val;
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+	return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+	int i;
+
+	for (i = 0; i < limit; i++) {
+		if (seqn_list[i] != i) {
+			plt_err("Seqn mismatch %d %d", seqn_list[i], i);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+struct test_core_param {
+	uint32_t *total_events;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port;
+	uint8_t sched_type;
+};
+
+static int
+testsuite_setup(const char *eventdev_name)
+{
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		plt_err("%d: Eventdev %s not found", __LINE__, eventdev_name);
+		return -1;
+	}
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_event_dev_close(evdev);
+	total = 0;
+	passed = 0;
+	failed = 0;
+	unsupported = 0;
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+				struct rte_event_dev_info *info)
+{
+	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+	dev_conf->nb_event_ports = info->max_event_ports;
+	dev_conf->nb_event_queues = info->max_event_queues;
+	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+	dev_conf->nb_event_port_dequeue_depth =
+		info->max_event_port_dequeue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+		info->max_event_port_enqueue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+		info->max_event_port_enqueue_depth;
+	dev_conf->nb_events_limit = info->max_num_events;
+}
+
+enum {
+	TEST_EVENTDEV_SETUP_DEFAULT,
+	TEST_EVENTDEV_SETUP_PRIORITY,
+	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+	const char *pool_name = "evdev_cnxk_test_pool";
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_dev_info info;
+	int i, ret;
+
+	/* Create and destrory pool for each test case to make it standalone */
+	eventdev_test_mempool = rte_pktmbuf_pool_create(
+		pool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());
+	if (!eventdev_test_mempool) {
+		plt_err("ERROR creating mempool");
+		return -1;
+	}
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+	devconf_set_default_sane_values(&dev_conf, &info);
+	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+	ret = rte_event_dev_configure(evdev, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+
+	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+		if (queue_count > 8)
+			queue_count = 8;
+
+		/* Configure event queues(0 to n) with
+		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+		 * RTE_EVENT_DEV_PRIORITY_LOWEST
+		 */
+		uint8_t step =
+			(RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / queue_count;
+		for (i = 0; i < (int)queue_count; i++) {
+			struct rte_event_queue_conf queue_conf;
+
+			ret = rte_event_queue_default_conf_get(evdev, i,
+							       &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+						i);
+			queue_conf.priority = i * step;
+			ret = rte_event_queue_setup(evdev, i, &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+						i);
+		}
+
+	} else {
+		/* Configure event queues with default priority */
+		for (i = 0; i < (int)queue_count; i++) {
+			ret = rte_event_queue_setup(evdev, i, NULL);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+						i);
+		}
+	}
+	/* Configure event ports */
+	uint32_t port_count;
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &port_count),
+		"Port count get failed");
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_setup(evdev, i, NULL);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+				i);
+	}
+
+	ret = rte_event_dev_start(evdev);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+	return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+	rte_event_dev_stop(evdev);
+	rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+				 uint32_t flow_id, uint8_t event_type,
+				 uint8_t sub_event_type, uint8_t sched_type,
+				 uint8_t queue, uint8_t port)
+{
+	struct event_attr *attr;
+
+	/* Store the event attributes in mbuf for future reference */
+	attr = rte_pktmbuf_mtod(m, struct event_attr *);
+	attr->flow_id = flow_id;
+	attr->event_type = event_type;
+	attr->sub_event_type = sub_event_type;
+	attr->sched_type = sched_type;
+	attr->queue = queue;
+	attr->port = port;
+
+	ev->flow_id = flow_id;
+	ev->sub_event_type = sub_event_type;
+	ev->event_type = event_type;
+	/* Inject the new event */
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = sched_type;
+	ev->queue_id = queue;
+	ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+	      uint8_t sched_type, uint8_t queue, uint8_t port,
+	      unsigned int events)
+{
+	struct rte_mbuf *m;
+	unsigned int i;
+
+	for (i = 0; i < events; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		*rte_event_pmd_selftest_seqn(m) = i;
+		update_event_and_validation_attr(m, &ev, flow_id, event_type,
+						 sub_event_type, sched_type,
+						 queue, port);
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+	return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+	uint16_t valid_event;
+	struct rte_event ev;
+	int i;
+
+	/* Check for excess events, try for a few times and exit */
+	for (i = 0; i < 32; i++) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+		RTE_TEST_ASSERT_SUCCESS(valid_event,
+					"Unexpected valid event=%d",
+					*rte_event_pmd_selftest_seqn(ev.mbuf));
+	}
+	return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+	struct rte_event_dev_info info;
+	uint32_t queue_count;
+	unsigned int i;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	for (i = 0; i < total_events; i++) {
+		ret = inject_events(
+			rte_rand() % info.max_event_queue_flows /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			rte_rand() % queue_count /* queue */, 0 /* port */,
+			1 /* events */);
+		if (ret)
+			return -1;
+	}
+	return ret;
+}
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+			      "flow_id mismatch enq=%d deq =%d", attr->flow_id,
+			      ev->flow_id);
+	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+			      "event_type mismatch enq=%d deq =%d",
+			      attr->event_type, ev->event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+			      "sub_event_type mismatch enq=%d deq =%d",
+			      attr->sub_event_type, ev->sub_event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+			      "sched_type mismatch enq=%d deq =%d",
+			      attr->sched_type, ev->sched_type);
+	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+			      "queue mismatch enq=%d deq =%d", attr->queue,
+			      ev->queue_id);
+	return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+				 struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+	uint16_t valid_event;
+	struct rte_event ev;
+	int ret;
+
+	while (1) {
+		if (++forward_progress_cnt > UINT16_MAX) {
+			plt_err("Detected deadlock");
+			return -1;
+		}
+
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		forward_progress_cnt = 0;
+		ret = validate_event(&ev);
+		if (ret)
+			return -1;
+
+		if (fn != NULL) {
+			ret = fn(index, port, &ev);
+			RTE_TEST_ASSERT_SUCCESS(
+				ret, "Failed to validate test specific event");
+		}
+
+		++index;
+
+		rte_pktmbuf_free(ev.mbuf);
+		if (++events >= total_events)
+			break;
+	}
+
+	return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf),
+			      "index=%d != seqn=%d", index,
+			      *rte_event_pmd_selftest_seqn(ev->mbuf));
+	return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+	int ret;
+
+	ret = inject_events(0 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
+			    0 /* sub_event_type */, sched_type, 0 /* queue */,
+			    0 /* port */, MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+	int ret;
+
+	ret = generate_random_events(MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..queue_count with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+	if (queue_count > 8)
+		queue_count = 8;
+	uint32_t range = MAX_EVENTS / queue_count;
+	uint32_t expected_val = (index % range) * queue_count;
+
+	expected_val += ev->queue_id;
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(
+		*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val,
+		"seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+		*rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val,
+		range, queue_count, MAX_EVENTS);
+	return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+	int i, max_evts_roundoff;
+	/* See validate_queue_priority() comments for priority validate logic */
+	uint32_t queue_count;
+	struct rte_mbuf *m;
+	uint8_t queue;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+	if (queue_count > 8)
+		queue_count = 8;
+	max_evts_roundoff = MAX_EVENTS / queue_count;
+	max_evts_roundoff *= queue_count;
+
+	for (i = 0; i < max_evts_roundoff; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		*rte_event_pmd_selftest_seqn(m) = i;
+		queue = i % queue_count;
+		update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+						 0, RTE_SCHED_TYPE_PARALLEL,
+						 queue, 0);
+		rte_event_enqueue_burst(evdev, 0, &ev, 1);
+	}
+
+	return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint32_t *total_events = param->total_events;
+	uint8_t port = param->port;
+	uint16_t valid_event;
+	struct rte_event ev;
+	int ret;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		ret = validate_event(&ev);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+		rte_pktmbuf_free(ev.mbuf);
+		__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+	}
+
+	return 0;
+}
+
+static inline int
+wait_workers_to_join(const uint32_t *count)
+{
+	uint64_t cycles, print_cycles;
+
+	cycles = rte_get_timer_cycles();
+	print_cycles = cycles;
+	while (__atomic_load_n(count, __ATOMIC_RELAXED)) {
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			plt_info("Events %d",
+				 __atomic_load_n(count, __ATOMIC_RELAXED));
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 10000000000) {
+			plt_err("No schedules for seconds, deadlock (%d)",
+				__atomic_load_n(count, __ATOMIC_RELAXED));
+			rte_event_dev_dump(evdev, stdout);
+			cycles = new_cycles;
+			return -1;
+		}
+	}
+	rte_eal_mp_wait_lcore();
+
+	return 0;
+}
+
+static inline int
+launch_workers_and_wait(int (*main_thread)(void *),
+			int (*worker_thread)(void *), uint32_t total_events,
+			uint8_t nb_workers, uint8_t sched_type)
+{
+	uint32_t atomic_total_events;
+	struct test_core_param *param;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port = 0;
+	int w_lcore;
+	int ret;
+
+	if (!nb_workers)
+		return 0;
+
+	__atomic_store_n(&atomic_total_events, total_events, __ATOMIC_RELAXED);
+	seqn_list_init();
+
+	param = malloc(sizeof(struct test_core_param) * nb_workers);
+	if (!param)
+		return -1;
+
+	ret = rte_event_dequeue_timeout_ticks(
+		evdev, rte_rand() % 10000000 /* 10ms */, &dequeue_tmo_ticks);
+	if (ret) {
+		free(param);
+		return -1;
+	}
+
+	param[0].total_events = &atomic_total_events;
+	param[0].sched_type = sched_type;
+	param[0].port = 0;
+	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+	rte_wmb();
+
+	w_lcore = rte_get_next_lcore(
+		/* start core */ -1,
+		/* skip main */ 1,
+		/* wrap */ 0);
+	rte_eal_remote_launch(main_thread, &param[0], w_lcore);
+
+	for (port = 1; port < nb_workers; port++) {
+		param[port].total_events = &atomic_total_events;
+		param[port].sched_type = sched_type;
+		param[port].port = port;
+		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+		rte_atomic_thread_fence(__ATOMIC_RELEASE);
+		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+		rte_eal_remote_launch(worker_thread, &param[port], w_lcore);
+	}
+
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+	ret = wait_workers_to_join(&atomic_total_events);
+	free(param);
+
+	return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		plt_err("Not enough ports=%d or workers=%d", nr_ports,
+			rte_lcore_count() - 1);
+		return 0;
+	}
+
+	return launch_workers_and_wait(worker_multi_port_fn,
+				       worker_multi_port_fn, total_events,
+				       nr_ports, 0xff /* invalid */);
+}
+
+static void
+flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+	unsigned int *count = arg;
+
+	RTE_SET_USED(dev_id);
+	if (event.event_type == RTE_EVENT_TYPE_CPU)
+		*count = *count + 1;
+}
+
+static int
+test_dev_stop_flush(void)
+{
+	unsigned int total_events = MAX_EVENTS, count = 0;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+	if (ret)
+		return -2;
+	rte_event_dev_stop(evdev);
+	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+	if (ret)
+		return -3;
+	RTE_TEST_ASSERT_EQUAL(total_events, count,
+			      "count mismatch total_events=%d count=%d",
+			      total_events, count);
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+				   struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+			      "queue mismatch enq=%d deq =%d", port,
+			      ev->queue_id);
+
+	return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+	int i, nr_links, ret;
+	uint32_t queue_count;
+	uint32_t port_count;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &port_count),
+		"Port count get failed");
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_unlink(evdev, i, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+				i);
+	}
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+
+	nr_links = RTE_MIN(port_count, queue_count);
+	const unsigned int total_events = MAX_EVENTS / nr_links;
+
+	/* Link queue x to port x and inject events to queue x through port x */
+	for (i = 0; i < nr_links; i++) {
+		uint8_t queue = (uint8_t)i;
+
+		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+		ret = inject_events(0x100 /*flow_id */,
+				    RTE_EVENT_TYPE_CPU /* event_type */,
+				    rte_rand() % 256 /* sub_event_type */,
+				    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+				    queue /* queue */, i /* port */,
+				    total_events /* events */);
+		if (ret)
+			return -1;
+	}
+
+	/* Verify the events generated from correct queue */
+	for (i = 0; i < nr_links; i++) {
+		ret = consume_events(i /* port */, total_events,
+				     validate_queue_to_port_single_link);
+		if (ret)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+				  struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+			      "queue mismatch enq=%d deq =%d", port,
+			      ev->queue_id);
+
+	return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+	int ret, port0_events = 0, port1_events = 0;
+	uint32_t nr_queues = 0;
+	uint32_t nr_ports = 0;
+	uint8_t queue, port;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &nr_queues),
+		"Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &nr_queues),
+		"Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+
+	if (nr_ports < 2) {
+		plt_err("Not enough ports to test ports=%d", nr_ports);
+		return 0;
+	}
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (port = 0; port < nr_ports; port++) {
+		ret = rte_event_port_unlink(evdev, port, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+				port);
+	}
+
+	unsigned int total_events = MAX_EVENTS / nr_queues;
+	if (!total_events) {
+		nr_queues = MAX_EVENTS;
+		total_events = MAX_EVENTS / nr_queues;
+	}
+
+	/* Link all even number of queues to port0 and odd numbers to port 1*/
+	for (queue = 0; queue < nr_queues; queue++) {
+		port = queue & 0x1;
+		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+				queue, port);
+
+		ret = inject_events(0x100 /*flow_id */,
+				    RTE_EVENT_TYPE_CPU /* event_type */,
+				    rte_rand() % 256 /* sub_event_type */,
+				    rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+				    queue /* queue */, port /* port */,
+				    total_events /* events */);
+		if (ret)
+			return -1;
+
+		if (port == 0)
+			port0_events += total_events;
+		else
+			port1_events += total_events;
+	}
+
+	ret = consume_events(0 /* port */, port0_events,
+			     validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+	ret = consume_events(1 /* port */, port1_events,
+			     validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+	uint32_t *total_events = param->total_events;
+	uint8_t new_sched_type = param->sched_type;
+	uint8_t port = param->port;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+						      dequeue_tmo_ticks);
+		if (!valid_event)
+			continue;
+
+		/* Events from stage 0 */
+		if (ev.sub_event_type == 0) {
+			/* Move to atomic flow to maintain the ordering */
+			ev.flow_id = 0x2;
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sub_event_type = 1; /* stage 1 */
+			ev.sched_type = new_sched_type;
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		} else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+			uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+			if (seqn_list_update(seqn) == 0) {
+				rte_pktmbuf_free(ev.mbuf);
+				__atomic_sub_fetch(total_events, 1,
+						   __ATOMIC_RELAXED);
+			} else {
+				plt_err("Failed to update seqn_list");
+				return -1;
+			}
+		} else {
+			plt_err("Invalid ev.sub_event_type = %d",
+				ev.sub_event_type);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+				    uint8_t out_sched_type)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		plt_err("Not enough ports=%d or workers=%d", nr_ports,
+			rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with a 0 sequence number to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,
+		0 /* port */, total_events /* events */);
+	if (ret)
+		return -1;
+
+	rte_mb();
+	ret = launch_workers_and_wait(worker_flow_based_pipeline,
+				      worker_flow_based_pipeline, total_events,
+				      nr_ports, out_sched_type);
+	if (ret)
+		return -1;
+
+	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+	    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+		/* Check the events order maintained or not */
+		return seqn_list_check(total_events);
+	}
+
+	return 0;
+}
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						   RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						   RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						   RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						   RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						   RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						   RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						   RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						   RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+	return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						   RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+	uint32_t *total_events = param->total_events;
+	uint8_t new_sched_type = param->sched_type;
+	uint8_t port = param->port;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+						      dequeue_tmo_ticks);
+		if (!valid_event)
+			continue;
+
+		/* Events from stage 0(group 0) */
+		if (ev.queue_id == 0) {
+			/* Move to atomic flow to maintain the ordering */
+			ev.flow_id = 0x2;
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sched_type = new_sched_type;
+			ev.queue_id = 1; /* Stage 1*/
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		} else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+			uint32_t seqn = *rte_event_pmd_selftest_seqn(ev.mbuf);
+
+			if (seqn_list_update(seqn) == 0) {
+				rte_pktmbuf_free(ev.mbuf);
+				__atomic_sub_fetch(total_events, 1,
+						   __ATOMIC_RELAXED);
+			} else {
+				plt_err("Failed to update seqn_list");
+				return -1;
+			}
+		} else {
+			plt_err("Invalid ev.queue_id = %d", ev.queue_id);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+				     uint8_t out_sched_type)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t queue_count;
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+	if (queue_count < 2 || !nr_ports) {
+		plt_err("Not enough queues=%d ports=%d or workers=%d",
+			queue_count, nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with a 0 sequence number to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */, in_sched_type, 0 /* queue */,
+		0 /* port */, total_events /* events */);
+	if (ret)
+		return -1;
+
+	ret = launch_workers_and_wait(worker_group_based_pipeline,
+				      worker_group_based_pipeline, total_events,
+				      nr_ports, out_sched_type);
+	if (ret)
+		return -1;
+
+	if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+	    out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+		/* Check the events order maintained or not */
+		return seqn_list_check(total_events);
+	}
+
+	return 0;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						    RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						    RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+						    RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+	/* Ingress event order test */
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						    RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						    RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+						    RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						    RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						    RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+	return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+						    RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint32_t *total_events = param->total_events;
+	uint8_t port = param->port;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.sub_event_type == MAX_STAGES) { /* last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.sub_event_type++;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+
+	return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+	uint32_t nr_ports;
+	int ret;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		plt_err("Not enough ports=%d or workers=%d", nr_ports,
+			rte_lcore_count() - 1);
+		return 0;
+	}
+
+	/* Injects events with a 0 sequence number to total_events */
+	ret = inject_events(
+		0x1 /*flow_id */, RTE_EVENT_TYPE_CPU /* event_type */,
+		0 /* sub_event_type (stage 0) */,
+		rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+		0 /* queue */, 0 /* port */, MAX_EVENTS /* events */);
+	if (ret)
+		return -1;
+
+	return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+				       0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint8_t port = param->port;
+	uint32_t queue_count;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+	uint8_t nr_queues = queue_count;
+	uint32_t *total_events = param->total_events;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.queue_id == nr_queues - 1) { /* last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.queue_id++;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+
+	return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint8_t port = param->port;
+	uint32_t queue_count;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				       &queue_count),
+		"Queue count get failed");
+	uint8_t nr_queues = queue_count;
+	uint32_t *total_events = param->total_events;
+
+	while (__atomic_load_n(total_events, __ATOMIC_RELAXED) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		if (ev.queue_id == nr_queues - 1) { /* Last stage */
+			rte_pktmbuf_free(ev.mbuf);
+			__atomic_sub_fetch(total_events, 1, __ATOMIC_RELAXED);
+		} else {
+			ev.event_type = RTE_EVENT_TYPE_CPU;
+			ev.queue_id++;
+			ev.sub_event_type = rte_rand() % 256;
+			ev.sched_type =
+				rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+			ev.op = RTE_EVENT_OP_FORWARD;
+			rte_event_enqueue_burst(evdev, port, &ev, 1);
+		}
+	}
+
+	return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+	return launch_multi_port_max_stages_random_sched_type(
+		worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+	struct test_core_param *param = arg;
+	uint8_t port = param->port;
+	struct rte_mbuf *m;
+	int counter = 0;
+
+	while (counter < NUM_PACKETS) {
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		if (m == NULL)
+			continue;
+
+		*rte_event_pmd_selftest_seqn(m) = counter++;
+
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		ev.flow_id = 0x1; /* Generate a fat flow */
+		ev.sub_event_type = 0;
+		/* Inject the new event */
+		ev.op = RTE_EVENT_OP_NEW;
+		ev.event_type = RTE_EVENT_TYPE_CPU;
+		ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+		ev.queue_id = 0;
+		ev.mbuf = m;
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+
+	return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+	uint32_t nr_ports;
+
+	RTE_TEST_ASSERT_SUCCESS(
+		rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				       &nr_ports),
+		"Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (rte_lcore_count() < 3 || nr_ports < 2) {
+		plt_err("### Not enough cores for test.");
+		return 0;
+	}
+
+	launch_workers_and_wait(worker_ordered_flow_producer, fn, NUM_PACKETS,
+				nr_ports, RTE_SCHED_TYPE_ATOMIC);
+	/* Check the events order maintained or not */
+	return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+	return test_producer_consumer_ingress_order_test(
+		worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+	return test_producer_consumer_ingress_order_test(
+		worker_group_based_pipeline);
+}
+
+static void
+cnxk_test_run(int (*setup)(void), void (*tdown)(void), int (*test)(void),
+	      const char *name)
+{
+	if (setup() < 0) {
+		printf("Error setting up test %s", name);
+		unsupported++;
+	} else {
+		if (test() < 0) {
+			failed++;
+			printf("+ TestCase [%2d] : %s failed\n", total, name);
+		} else {
+			passed++;
+			printf("+ TestCase [%2d] : %s succeeded\n", total,
+			       name);
+		}
+	}
+
+	total++;
+	tdown();
+}
+
+static int
+cnxk_sso_testsuite_run(const char *dev_name)
+{
+	int rc;
+
+	testsuite_setup(dev_name);
+
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_simple_enqdeq_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_simple_enqdeq_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_simple_enqdeq_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_queue_enq_single_port_deq);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown, test_dev_stop_flush);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_queue_enq_multi_port_deq);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_queue_to_port_single_link);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_queue_to_port_multi_link);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_ordered_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_ordered_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_ordered_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_atomic_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_atomic_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_atomic_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_parallel_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_parallel_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_parallel_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_ordered_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_ordered_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_ordered_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_atomic_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_atomic_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_atomic_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_parallel_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_parallel_to_ordered);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_parallel_to_parallel);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_flow_max_stages_random_sched_type);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_queue_max_stages_random_sched_type);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_multi_port_mixed_max_stages_random_sched_type);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_flow_producer_consumer_ingress_order_test);
+	CNXK_TEST_RUN(eventdev_setup, eventdev_teardown,
+		      test_queue_producer_consumer_ingress_order_test);
+	CNXK_TEST_RUN(eventdev_setup_priority, eventdev_teardown,
+		      test_multi_queue_priority);
+	CNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+		      test_multi_port_flow_ordered_to_atomic);
+	CNXK_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown,
+		      test_multi_port_queue_ordered_to_atomic);
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+	printf("Not supported : %d\n", unsupported);
+
+	rc = failed;
+	testsuite_teardown();
+
+	return rc;
+}
+
+int
+cnxk_sso_selftest(const char *dev_name)
+{
+	const struct rte_memzone *mz;
+	struct cnxk_sso_evdev *dev;
+	int rc = -1;
+
+	mz = rte_memzone_lookup(CNXK_SSO_MZ_NAME);
+	if (mz == NULL)
+		return rc;
+
+	dev = (void *)*((uint64_t *)mz->addr);
+	if (roc_model_runtime_is_cn9k()) {
+		/* Verify single ws mode. */
+		printf("Verifying CN9K Single workslot mode\n");
+		dev->dual_ws = 0;
+		cn9k_sso_set_rsrc(dev);
+		if (cnxk_sso_testsuite_run(dev_name))
+			return rc;
+		/* Verift dual ws mode. */
+		printf("Verifying CN9K Dual workslot mode\n");
+		dev->dual_ws = 1;
+		cn9k_sso_set_rsrc(dev);
+		if (cnxk_sso_testsuite_run(dev_name))
+			return rc;
+	}
+
+	if (roc_model_runtime_is_cn10k()) {
+		printf("Verifying CN10K workslot getwork mode none\n");
+		dev->gw_mode = CN10K_GW_MODE_NONE;
+		if (cnxk_sso_testsuite_run(dev_name))
+			return rc;
+		printf("Verifying CN10K workslot getwork mode prefetch\n");
+		dev->gw_mode = CN10K_GW_MODE_PREF;
+		if (cnxk_sso_testsuite_run(dev_name))
+			return rc;
+		printf("Verifying CN10K workslot getwork mode smart prefetch\n");
+		dev->gw_mode = CN10K_GW_MODE_PREF_WFE;
+		if (cnxk_sso_testsuite_run(dev_name))
+			return rc;
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 57b3f66ea..e37ea3478 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -13,6 +13,7 @@ sources = files('cn10k_worker.c',
                 'cn9k_worker.c',
                 'cn9k_eventdev.c',
                 'cnxk_eventdev.c',
+                'cnxk_eventdev_selftest.c'
                 )
 
 deps += ['bus_pci', 'common_cnxk']
-- 
2.17.1


  parent reply	other threads:[~2021-05-04  0:30 UTC|newest]

Thread overview: 185+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-06 16:29 [dpdk-dev] [PATCH 00/36] Marvell CNXK Event device Driver pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 01/36] event/cnxk: add build infra and device setup pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 02/36] event/cnxk: add device capabilities function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 03/36] event/cnxk: add platform specific device probe pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 04/36] event/cnxk: add common configuration validation pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 05/36] event/cnxk: add platform specific device config pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 06/36] event/cnxk: add event queue config functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 07/36] event/cnxk: allocate event inflight buffers pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 08/36] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 09/36] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 10/36] event/cnxk: add port config functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 11/36] event/cnxk: add event port link and unlink pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 12/36] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 13/36] event/cnxk: add SSO HW device operations pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 14/36] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 15/36] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 16/36] event/cnxk: add device start function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 17/36] event/cnxk: add device stop and close functions pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 18/36] event/cnxk: add SSO selftest and dump pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 19/36] event/cnxk: support event timer pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 20/36] event/cnxk: add timer adapter capabilities pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 21/36] event/cnxk: create and free timer adapter pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 22/36] event/cnxk: add devargs to disable NPA pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 23/36] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 24/36] event/cnxk: add timer adapter info function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 25/36] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 26/36] event/cnxk: add TIM bucket operations pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 27/36] event/cnxk: add timer arm routine pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 28/36] event/cnxk: add timer arm timeout burst pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 29/36] event/cnxk: add timer cancel function pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 30/36] event/cnxk: add timer stats get and reset pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 31/36] event/cnxk: add timer adapter start and stop pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 32/36] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 33/36] event/cnxk: add Rx adapter support pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 34/36] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 35/36] event/cnxk: add Tx adapter support pbhagavatula
2021-03-06 16:29 ` [dpdk-dev] [PATCH 36/36] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-04-26 17:44 ` [dpdk-dev] [PATCH v2 00/33] Marvell CNXK Event device Driver pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 01/33] event/cnxk: add build infra and device setup pbhagavatula
2021-04-27  9:08     ` Kinsella, Ray
2021-04-28  8:01     ` David Marchand
2021-04-29  9:05     ` Jerin Jacob
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 02/33] event/cnxk: add device capabilities function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 03/33] event/cnxk: add platform specific device probe pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 04/33] event/cnxk: add common configuration validation pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 05/33] event/cnxk: add platform specific device config pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 06/33] event/cnxk: add event queue config functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 07/33] event/cnxk: allocate event inflight buffers pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 08/33] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 09/33] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 10/33] event/cnxk: add port config functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 11/33] event/cnxk: add event port link and unlink pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 12/33] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 13/33] event/cnxk: add SSO HW device operations pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 14/33] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 15/33] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 16/33] event/cnxk: add device start function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 17/33] event/cnxk: add device stop and close functions pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 18/33] event/cnxk: add SSO selftest and dump pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 19/33] event/cnxk: add event port and queue xstats pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 20/33] event/cnxk: support event timer pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 21/33] event/cnxk: add timer adapter capabilities pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 22/33] event/cnxk: create and free timer adapter pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 23/33] event/cnxk: add devargs to disable NPA pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 24/33] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 25/33] event/cnxk: add timer adapter info function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 26/33] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 27/33] event/cnxk: add TIM bucket operations pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 28/33] event/cnxk: add timer arm routine pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 29/33] event/cnxk: add timer arm timeout burst pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 30/33] event/cnxk: add timer cancel function pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 31/33] event/cnxk: add timer stats get and reset pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 32/33] event/cnxk: add timer adapter start and stop pbhagavatula
2021-04-26 17:44   ` [dpdk-dev] [PATCH v2 33/33] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-04-30  5:11   ` [dpdk-dev] [PATCH v2 00/33] Marvell CNXK Event device Driver Jerin Jacob
2021-04-30 13:53   ` [dpdk-dev] [PATCH v3 " pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 01/33] event/cnxk: add build infra and device setup pbhagavatula
2021-05-03  9:41       ` Jerin Jacob
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 02/33] event/cnxk: add device capabilities function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 03/33] event/cnxk: add platform specific device probe pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 04/33] event/cnxk: add common configuration validation pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 05/33] event/cnxk: add platform specific device config pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 06/33] event/cnxk: add event queue config functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 07/33] event/cnxk: allocate event inflight buffers pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 08/33] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 09/33] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 10/33] event/cnxk: add port config functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 11/33] event/cnxk: add event port link and unlink pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 12/33] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 13/33] event/cnxk: add SSO HW device operations pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 14/33] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 15/33] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 16/33] event/cnxk: add device start function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 17/33] event/cnxk: add device stop and close functions pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 18/33] event/cnxk: add SSO selftest and dump pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 19/33] event/cnxk: add event port and queue xstats pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 20/33] event/cnxk: support event timer pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 21/33] event/cnxk: add timer adapter capabilities pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 22/33] event/cnxk: create and free timer adapter pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 23/33] event/cnxk: add devargs to disable NPA pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 24/33] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 25/33] event/cnxk: add timer adapter info function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 26/33] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 27/33] event/cnxk: add TIM bucket operations pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 28/33] event/cnxk: add timer arm routine pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 29/33] event/cnxk: add timer arm timeout burst pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 30/33] event/cnxk: add timer cancel function pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 31/33] event/cnxk: add timer stats get and reset pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 32/33] event/cnxk: add timer adapter start and stop pbhagavatula
2021-04-30 13:53     ` [dpdk-dev] [PATCH v3 33/33] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-01 12:03     ` [dpdk-dev] [PATCH v3 00/33] Marvell CNXK Event device Driver Jerin Jacob
2021-05-03 15:22     ` [dpdk-dev] [PATCH v4 00/34] " pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 01/34] common/cnxk: rename deprecated constant pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 02/34] event/cnxk: add build infra and device setup pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 03/34] event/cnxk: add device capabilities function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 04/34] event/cnxk: add platform specific device probe pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 05/34] event/cnxk: add common configuration validation pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 06/34] event/cnxk: add platform specific device config pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 07/34] event/cnxk: add event queue config functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 08/34] event/cnxk: allocate event inflight buffers pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 09/34] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 10/34] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 11/34] event/cnxk: add port config functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 12/34] event/cnxk: add event port link and unlink pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 13/34] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 14/34] event/cnxk: add SSO HW device operations pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 15/34] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 16/34] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 17/34] event/cnxk: add device start function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 18/34] event/cnxk: add device stop and close functions pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 19/34] event/cnxk: add SSO selftest and dump pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 20/34] event/cnxk: add event port and queue xstats pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 21/34] event/cnxk: support event timer pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 22/34] event/cnxk: add timer adapter capabilities pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 23/34] event/cnxk: create and free timer adapter pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 24/34] event/cnxk: add devargs to disable NPA pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 25/34] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 26/34] event/cnxk: add timer adapter info function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 27/34] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 28/34] event/cnxk: add TIM bucket operations pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 29/34] event/cnxk: add timer arm routine pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 30/34] event/cnxk: add timer arm timeout burst pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 31/34] event/cnxk: add timer cancel function pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 32/34] event/cnxk: add timer stats get and reset pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 33/34] event/cnxk: add timer adapter start and stop pbhagavatula
2021-05-03 15:22       ` [dpdk-dev] [PATCH v4 34/34] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-04  0:26       ` [dpdk-dev] [PATCH v5 00/35] Marvell CNXK Event device Driver pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 01/35] common/cnxk: rename deprecated constant pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 02/35] common/cnxk: update inline asm prefix pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 03/35] event/cnxk: add build infra and device setup pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 04/35] event/cnxk: add device capabilities function pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 05/35] event/cnxk: add platform specific device probe pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 06/35] event/cnxk: add common configuration validation pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 07/35] event/cnxk: add platform specific device config pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 08/35] event/cnxk: add event queue config functions pbhagavatula
2021-05-04  0:26         ` [dpdk-dev] [PATCH v5 09/35] event/cnxk: allocate event inflight buffers pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 10/35] event/cnxk: add devargs for inflight buffer count pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 11/35] event/cnxk: add devargs to control SSO HWGRP QoS pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 12/35] event/cnxk: add port config functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 13/35] event/cnxk: add event port link and unlink pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 14/35] event/cnxk: add devargs to configure getwork mode pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 15/35] event/cnxk: add SSO HW device operations pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 16/35] event/cnxk: add SSO GWS fastpath enqueue functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 17/35] event/cnxk: add SSO GWS dequeue fastpath functions pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 18/35] event/cnxk: add device start function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 19/35] event/cnxk: add device stop and close functions pbhagavatula
2021-05-04  0:27         ` pbhagavatula [this message]
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 21/35] event/cnxk: add event port and queue xstats pbhagavatula
2021-05-04  9:51           ` Kinsella, Ray
2021-05-04 10:08             ` Jerin Jacob
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 22/35] event/cnxk: support event timer pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 23/35] event/cnxk: add timer adapter capabilities pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 24/35] event/cnxk: create and free timer adapter pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 25/35] event/cnxk: add devargs to disable NPA pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 26/35] event/cnxk: allow adapters to resize inflights pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 27/35] event/cnxk: add timer adapter info function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 28/35] event/cnxk: add devargs for chunk size and rings pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 29/35] event/cnxk: add TIM bucket operations pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 30/35] event/cnxk: add timer arm routine pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 31/35] event/cnxk: add timer arm timeout burst pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 32/35] event/cnxk: add timer cancel function pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 33/35] event/cnxk: add timer stats get and reset pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 34/35] event/cnxk: add timer adapter start and stop pbhagavatula
2021-05-04  0:27         ` [dpdk-dev] [PATCH v5 35/35] event/cnxk: add devargs to control timer adapters pbhagavatula
2021-05-04  8:30         ` [dpdk-dev] [PATCH v5 00/35] Marvell CNXK Event device Driver Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210504002726.525-21-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.