All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements
@ 2019-09-06 10:34 Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
                   ` (5 more replies)
  0 siblings, 6 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj


This patch series has minor fixes in dpaa2 eventdev support
 - default queue and cleanup logic
 - introducing selftest for dpaa2

Hemant Agrawal (5):
  event/dpaa2: fix def queue conf
  event/dpaa2: remove conditional compilation
  event/dpaa2: add destroy support
  event/dpaa2: add selftest cases
  test/event: enable dpaa2 self test

 app/test/test_eventdev.c                  |  7 ++++
 drivers/event/dpaa2/Makefile              |  3 +-
 drivers/event/dpaa2/dpaa2_eventdev.c      | 49 ++++++++++++++++++-----
 drivers/event/dpaa2/dpaa2_eventdev.h      |  2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h |  6 +++
 drivers/event/dpaa2/meson.build           |  3 +-
 6 files changed, 56 insertions(+), 14 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 1/5] event/dpaa2: fix def queue conf
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
@ 2019-09-06 10:34 ` Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj, stable, Hemant Agrawal

Test vector expect only one type of scheduling as default.
The old code is provide support scheduling types instead of default.

Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 926b7edd8..b8cb437a0 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright 2017 NXP
- *
+ * Copyright 2017,2019 NXP
  */
 
 #include <assert.h>
@@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	RTE_SET_USED(queue_conf);
 
 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
-	queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
-				      RTE_SCHED_TYPE_PARALLEL;
+	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 2/5] event/dpaa2: remove conditional compilation
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-06 10:34 ` Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: add destroy support Hemant Agrawal
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

This patch removes the conditional compilation for
cryptodev event support from RTE_LIBRTE_SECURITY flag.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile         | 2 --
 drivers/event/dpaa2/dpaa2_eventdev.c | 6 ------
 2 files changed, 8 deletions(-)

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 470157f25..e0bb527b1 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -24,10 +24,8 @@ LDLIBS += -lrte_common_dpaax
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
 
-ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
 LDLIBS += -lrte_pmd_dpaa2_sec
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
-endif
 
 # versioning export map
 EXPORT_MAP := rte_pmd_dpaa2_event_version.map
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index b8cb437a0..98b487603 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -33,9 +33,7 @@
 #include <dpaa2_hw_mempool.h>
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_ethdev.h>
-#ifdef RTE_LIBRTE_SECURITY
 #include <dpaa2_sec_event.h>
-#endif
 #include "dpaa2_eventdev.h"
 #include "dpaa2_eventdev_logs.h"
 #include <portal/dpaa2_hw_pvt.h>
@@ -794,7 +792,6 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
 	return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
 static int
 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
@@ -937,7 +934,6 @@ dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
 
 	return 0;
 }
-#endif
 
 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
@@ -960,13 +956,11 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
 	.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
 	.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
-#ifdef RTE_LIBRTE_SECURITY
 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
-#endif
 };
 
 static int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 3/5] event/dpaa2: add destroy support
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
@ 2019-09-06 10:34 ` Hemant Agrawal
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases Hemant Agrawal
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 35 ++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 98b487603..9255de16f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1059,6 +1059,39 @@ dpaa2_eventdev_create(const char *name)
 	return -EFAULT;
 }
 
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+	struct rte_eventdev *eventdev;
+	struct dpaa2_eventdev *priv;
+	int i;
+
+	eventdev = rte_event_pmd_get_named_dev(name);
+	if (eventdev == NULL) {
+		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+		return -1;
+	}
+
+	/* For secondary processes, the primary has done all the work */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	priv = eventdev->data->dev_private;
+	for (i = 0; i < priv->max_event_queues; i++) {
+		if (priv->evq_info[i].dpcon)
+			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+		if (priv->evq_info[i].dpci)
+			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+	}
+	priv->max_event_queues = 0;
+
+	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+	return 0;
+}
+
+
 static int
 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
 {
@@ -1077,6 +1110,8 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
 	name = rte_vdev_device_name(vdev);
 	DPAA2_EVENTDEV_INFO("Closing %s", name);
 
+	dpaa2_eventdev_destroy(name);
+
 	return rte_event_pmd_vdev_uninit(name);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                   ` (2 preceding siblings ...)
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: add destroy support Hemant Agrawal
@ 2019-09-06 10:34 ` Hemant Agrawal
  2019-09-06 19:29   ` Aaron Conole
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 5/5] test/event: enable dpaa2 self test Hemant Agrawal
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  5 siblings, 1 reply; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile              | 1 +
 drivers/event/dpaa2/dpaa2_eventdev.c      | 1 +
 drivers/event/dpaa2/dpaa2_eventdev.h      | 2 ++
 drivers/event/dpaa2/dpaa2_eventdev_logs.h | 6 ++++++
 drivers/event/dpaa2/meson.build           | 3 ++-
 5 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index e0bb527b1..634179383 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 9255de16f..902a80f36 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -951,6 +951,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.port_unlink      = dpaa2_eventdev_port_unlink,
 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
 	.dump             = dpaa2_eventdev_dump,
+	.dev_selftest     = test_eventdev_dpaa2,
 	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
 	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index bdac1aa56..abc038e49 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -98,4 +98,6 @@ struct dpaa2_eventdev {
 struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
 void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
 
+int test_eventdev_dpaa2(void);
+
 #endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 86f2e5393..bb5a0e26c 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
 #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
 	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
 
+#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
+#define dpaa2_evdev_selftest dpaa2_evdev_info
+
 #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
index f7da7fad5..cea87d77b 100644
--- a/drivers/event/dpaa2/meson.build
+++ b/drivers/event/dpaa2/meson.build
@@ -9,7 +9,8 @@ if not is_linux
 endif
 deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
 sources = files('dpaa2_hw_dpcon.c',
-		'dpaa2_eventdev.c')
+		'dpaa2_eventdev.c',
+		'dpaa2_eventdev_selftest.c)
 
 allow_experimental_apis = true
 includes += include_directories('../../crypto/dpaa2_sec/')
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 5/5] test/event: enable dpaa2 self test
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                   ` (3 preceding siblings ...)
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-06 10:34 ` Hemant Agrawal
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-06 10:34 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_eventdev.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 783140dfe..427dbbf77 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1020,9 +1020,16 @@ test_eventdev_selftest_octeontx2(void)
 	return test_eventdev_selftest_impl("otx2_eventdev", "");
 }
 
+static int
+test_eventdev_selftest_dpaa2(void)
+{
+	return test_eventdev_selftest_impl("event_dpaa2", "");
+}
+
 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
 		test_eventdev_selftest_octeontx);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
 		test_eventdev_selftest_octeontx2);
+REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-06 19:29   ` Aaron Conole
  0 siblings, 0 replies; 34+ messages in thread
From: Aaron Conole @ 2019-09-06 19:29 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, jerinj

Hemant Agrawal <hemant.agrawal@nxp.com> writes:

> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  drivers/event/dpaa2/Makefile              | 1 +
>  drivers/event/dpaa2/dpaa2_eventdev.c      | 1 +
>  drivers/event/dpaa2/dpaa2_eventdev.h      | 2 ++
>  drivers/event/dpaa2/dpaa2_eventdev_logs.h | 6 ++++++
>  drivers/event/dpaa2/meson.build           | 3 ++-
>  5 files changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
> index e0bb527b1..634179383 100644
> --- a/drivers/event/dpaa2/Makefile
> +++ b/drivers/event/dpaa2/Makefile
> @@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
>  #
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
> +SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
>  
>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> index 9255de16f..902a80f36 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -951,6 +951,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
>  	.port_unlink      = dpaa2_eventdev_port_unlink,
>  	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
>  	.dump             = dpaa2_eventdev_dump,
> +	.dev_selftest     = test_eventdev_dpaa2,
>  	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
>  	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
>  	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
> index bdac1aa56..abc038e49 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.h
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.h
> @@ -98,4 +98,6 @@ struct dpaa2_eventdev {
>  struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
>  void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
>  
> +int test_eventdev_dpaa2(void);
> +
>  #endif /* __DPAA2_EVENTDEV_H__ */
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> index 86f2e5393..bb5a0e26c 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> +++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> @@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
>  #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
>  	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
>  
> +#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
> +#define dpaa2_evdev_selftest dpaa2_evdev_info
> +
>  #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
> diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
> index f7da7fad5..cea87d77b 100644
> --- a/drivers/event/dpaa2/meson.build
> +++ b/drivers/event/dpaa2/meson.build
> @@ -9,7 +9,8 @@ if not is_linux
>  endif
>  deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
>  sources = files('dpaa2_hw_dpcon.c',
> -		'dpaa2_eventdev.c')
> +		'dpaa2_eventdev.c',
> +		'dpaa2_eventdev_selftest.c)

Unterminated string.

>  allow_experimental_apis = true
>  includes += include_directories('../../crypto/dpaa2_sec/')

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements
  2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                   ` (4 preceding siblings ...)
  2019-09-06 10:34 ` [dpdk-dev] [PATCH 5/5] test/event: enable dpaa2 self test Hemant Agrawal
@ 2019-09-07  6:42 ` Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
                     ` (5 more replies)
  5 siblings, 6 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch series has minor fixes in dpaa2 eventdev support
 - default queue and cleanup logic
 - introducing selftest for dpaa2

v2: fix compilation issue wit meson and missing selftest file

Hemant Agrawal (5):
  event/dpaa2: fix def queue conf
  event/dpaa2: remove conditional compilation
  event/dpaa2: add destroy support
  event/dpaa2: add selftest cases
  test/event: enable dpaa2 self test

 app/test/test_eventdev.c                      |   7 +
 drivers/event/dpaa2/Makefile                  |   3 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |  49 +-
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   6 +
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 874 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 7 files changed, 930 insertions(+), 14 deletions(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
@ 2019-09-07  6:42   ` Hemant Agrawal
  2019-09-13  6:24     ` Jerin Jacob
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
                     ` (4 subsequent siblings)
  5 siblings, 1 reply; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, stable, Hemant Agrawal

Test vector expect only one type of scheduling as default.
The old code is provide support scheduling types instead of default.

Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 926b7edd8..b8cb437a0 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright 2017 NXP
- *
+ * Copyright 2017,2019 NXP
  */
 
 #include <assert.h>
@@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	RTE_SET_USED(queue_conf);
 
 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
-	queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
-				      RTE_SCHED_TYPE_PARALLEL;
+	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] event/dpaa2: remove conditional compilation
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-07  6:42   ` Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: add destroy support Hemant Agrawal
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

This patch removes the conditional compilation for
cryptodev event support from RTE_LIBRTE_SECURITY flag.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile         | 2 --
 drivers/event/dpaa2/dpaa2_eventdev.c | 6 ------
 2 files changed, 8 deletions(-)

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 470157f25..e0bb527b1 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -24,10 +24,8 @@ LDLIBS += -lrte_common_dpaax
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
 
-ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
 LDLIBS += -lrte_pmd_dpaa2_sec
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
-endif
 
 # versioning export map
 EXPORT_MAP := rte_pmd_dpaa2_event_version.map
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index b8cb437a0..98b487603 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -33,9 +33,7 @@
 #include <dpaa2_hw_mempool.h>
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_ethdev.h>
-#ifdef RTE_LIBRTE_SECURITY
 #include <dpaa2_sec_event.h>
-#endif
 #include "dpaa2_eventdev.h"
 #include "dpaa2_eventdev_logs.h"
 #include <portal/dpaa2_hw_pvt.h>
@@ -794,7 +792,6 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
 	return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
 static int
 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
@@ -937,7 +934,6 @@ dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
 
 	return 0;
 }
-#endif
 
 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
@@ -960,13 +956,11 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
 	.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
 	.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
-#ifdef RTE_LIBRTE_SECURITY
 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
-#endif
 };
 
 static int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] event/dpaa2: add destroy support
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
@ 2019-09-07  6:42   ` Hemant Agrawal
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases Hemant Agrawal
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 35 ++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 98b487603..9255de16f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1059,6 +1059,39 @@ dpaa2_eventdev_create(const char *name)
 	return -EFAULT;
 }
 
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+	struct rte_eventdev *eventdev;
+	struct dpaa2_eventdev *priv;
+	int i;
+
+	eventdev = rte_event_pmd_get_named_dev(name);
+	if (eventdev == NULL) {
+		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+		return -1;
+	}
+
+	/* For secondary processes, the primary has done all the work */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	priv = eventdev->data->dev_private;
+	for (i = 0; i < priv->max_event_queues; i++) {
+		if (priv->evq_info[i].dpcon)
+			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+		if (priv->evq_info[i].dpci)
+			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+	}
+	priv->max_event_queues = 0;
+
+	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+	return 0;
+}
+
+
 static int
 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
 {
@@ -1077,6 +1110,8 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
 	name = rte_vdev_device_name(vdev);
 	DPAA2_EVENTDEV_INFO("Closing %s", name);
 
+	dpaa2_eventdev_destroy(name);
+
 	return rte_event_pmd_vdev_uninit(name);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                     ` (2 preceding siblings ...)
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: add destroy support Hemant Agrawal
@ 2019-09-07  6:42   ` Hemant Agrawal
  2019-09-09 13:10     ` Aaron Conole
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 5/5] test/event: enable dpaa2 self test Hemant Agrawal
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  5 siblings, 1 reply; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

This patch add support for testing dpaa2 eventdev self test
for basic sanity for parallel and atomic queues.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile                  |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.c          |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   6 +
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 874 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 6 files changed, 886 insertions(+), 1 deletion(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index e0bb527b1..634179383 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 9255de16f..902a80f36 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -951,6 +951,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.port_unlink      = dpaa2_eventdev_port_unlink,
 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
 	.dump             = dpaa2_eventdev_dump,
+	.dev_selftest     = test_eventdev_dpaa2,
 	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
 	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index bdac1aa56..abc038e49 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -98,4 +98,6 @@ struct dpaa2_eventdev {
 struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
 void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
 
+int test_eventdev_dpaa2(void);
+
 #endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 86f2e5393..bb5a0e26c 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
 #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
 	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
 
+#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
+#define dpaa2_evdev_selftest dpaa2_evdev_info
+
 #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
new file mode 100644
index 000000000..e02f0f545
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
@@ -0,0 +1,874 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+#define MAX_PORTS 4
+#define NUM_PACKETS (1 << 18)
+//todo #define MAX_EVENTS  1024
+#define MAX_EVENTS  8
+#define DPAA2_TEST_RUN(setup, teardown, test) \
+	dpaa2_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+	uint32_t flow_id;
+	uint8_t event_type;
+	uint8_t sub_event_type;
+	uint8_t sched_type;
+	uint8_t queue;
+	uint8_t port;
+	uint8_t seq;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+	memset(seqn_list, 0, sizeof(seqn_list));
+	seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+	if (seqn_list_index >= NUM_PACKETS)
+		return -1;
+
+	seqn_list[seqn_list_index++] = val;
+	rte_smp_wmb();
+	return 0;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+	int i;
+
+	for (i = 0; i < limit; i++) {
+		if (seqn_list[i] != i) {
+			dpaa2_evdev_dbg("Seqn mismatch %d %d", seqn_list[i], i);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+struct test_core_param {
+	rte_atomic32_t *total_events;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port;
+	uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+	const char *eventdev_name = "event_dpaa2";
+
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
+				__LINE__, eventdev_name);
+		if (rte_vdev_init(eventdev_name, NULL) < 0) {
+			dpaa2_evdev_dbg("Error creating eventdev %s",
+					eventdev_name);
+			return -1;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			dpaa2_evdev_dbg("Error finding newly created eventdev");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+			struct rte_event_dev_info *info)
+{
+	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+	dev_conf->nb_event_ports = info->max_event_ports;
+	dev_conf->nb_event_queues = info->max_event_queues;
+	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+	dev_conf->nb_event_port_dequeue_depth =
+			info->max_event_port_dequeue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_events_limit =
+			info->max_num_events;
+}
+
+enum {
+	TEST_EVENTDEV_SETUP_DEFAULT,
+	TEST_EVENTDEV_SETUP_PRIORITY,
+	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+	int i, ret;
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_dev_info info;
+	const char *pool_name = "evdev_dpaa2_test_pool";
+
+	/* Create and destrory pool for each test case to make it standalone */
+	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+					MAX_EVENTS,
+					0 /*MBUF_CACHE_SIZE*/,
+					0,
+					512, /* Use very small mbufs */
+					rte_socket_id());
+	if (!eventdev_test_mempool) {
+		dpaa2_evdev_dbg("ERROR creating mempool");
+		return -1;
+	}
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+			"ERROR max_num_events=%d < max_events=%d",
+				info.max_num_events, MAX_EVENTS);
+
+	devconf_set_default_sane_values(&dev_conf, &info);
+	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+	ret = rte_event_dev_configure(evdev, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+		if (queue_count > 8) {
+			dpaa2_evdev_dbg(
+				"test expects the unique priority per queue");
+			return -ENOTSUP;
+		}
+
+		/* Configure event queues(0 to n) with
+		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+		 * RTE_EVENT_DEV_PRIORITY_LOWEST
+		 */
+		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+				queue_count;
+		for (i = 0; i < (int)queue_count; i++) {
+			struct rte_event_queue_conf queue_conf;
+
+			ret = rte_event_queue_default_conf_get(evdev, i,
+						&queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+					i);
+			queue_conf.priority = i * step;
+			ret = rte_event_queue_setup(evdev, i, &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+
+	} else {
+		/* Configure event queues with default priority */
+		for (i = 0; i < (int)queue_count; i++) {
+			ret = rte_event_queue_setup(evdev, i, NULL);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+	}
+	/* Configure event ports */
+	uint32_t port_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_setup(evdev, i, NULL);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+				i);
+	}
+
+	ret = rte_event_dev_start(evdev);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+	return 0;
+}
+
+static inline int
+eventdev_setup(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+	rte_event_dev_stop(evdev);
+	rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+			uint32_t flow_id, uint8_t event_type,
+			uint8_t sub_event_type, uint8_t sched_type,
+			uint8_t queue, uint8_t port, uint8_t seq)
+{
+	struct event_attr *attr;
+
+	/* Store the event attributes in mbuf for future reference */
+	attr = rte_pktmbuf_mtod(m, struct event_attr *);
+	attr->flow_id = flow_id;
+	attr->event_type = event_type;
+	attr->sub_event_type = sub_event_type;
+	attr->sched_type = sched_type;
+	attr->queue = queue;
+	attr->port = port;
+	attr->seq = seq;
+
+	ev->flow_id = flow_id;
+	ev->sub_event_type = sub_event_type;
+	ev->event_type = event_type;
+	/* Inject the new event */
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = sched_type;
+	ev->queue_id = queue;
+	ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+		uint8_t sched_type, uint8_t queue, uint8_t port,
+		unsigned int events)
+{
+	struct rte_mbuf *m;
+	unsigned int i;
+
+	for (i = 0; i < events; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		update_event_and_validation_attr(m, &ev, flow_id, event_type,
+			sub_event_type, sched_type, queue, port, i);
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+	return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+	int i;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	/* Check for excess events, try for a few times and exit */
+	for (i = 0; i < 32; i++) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+		RTE_TEST_ASSERT_SUCCESS(valid_event,
+				"Unexpected valid event=%d", ev.mbuf->seqn);
+	}
+	return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+	struct rte_event_dev_info info;
+	unsigned int i;
+	int ret;
+
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	for (i = 0; i < total_events; i++) {
+		ret = inject_events(
+			rte_rand() % info.max_event_queue_flows /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			rte_rand() % queue_count /* queue */,
+			0 /* port */,
+			1 /* events */);
+		if (ret)
+			return -1;
+	}
+	return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+			"flow_id mismatch enq=%d deq =%d",
+			attr->flow_id, ev->flow_id);
+	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+			"event_type mismatch enq=%d deq =%d",
+			attr->event_type, ev->event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+			"sub_event_type mismatch enq=%d deq =%d",
+			attr->sub_event_type, ev->sub_event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+			"sched_type mismatch enq=%d deq =%d",
+			attr->sched_type, ev->sched_type);
+	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+			"queue mismatch enq=%d deq =%d",
+			attr->queue, ev->queue_id);
+	return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+				 struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+	int ret;
+	uint16_t valid_event;
+	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+	struct rte_event ev;
+
+	while (1) {
+		if (++forward_progress_cnt > UINT16_MAX) {
+			dpaa2_evdev_dbg("Detected deadlock");
+			return -1;
+		}
+
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		forward_progress_cnt = 0;
+		ret = validate_event(&ev);
+		if (ret)
+			return -1;
+
+		if (fn != NULL) {
+			ret = fn(index, port, &ev);
+			RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to validate test specific event");
+		}
+
+		++index;
+
+		rte_pktmbuf_free(ev.mbuf);
+		if (++events >= total_events)
+			break;
+	}
+
+	return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(index, attr->seq,
+		"index=%d != seqn=%d", index, attr->seq);
+	return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+	int ret;
+
+	ret = inject_events(0 /*flow_id */,
+				RTE_EVENT_TYPE_CPU /* event_type */,
+				0 /* sub_event_type */,
+				sched_type,
+				0 /* queue */,
+				0 /* port */,
+				MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+	int ret;
+
+	ret = generate_random_events(MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	rte_atomic32_t *total_events = param->total_events;
+	int ret;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		ret = validate_event(&ev);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+		rte_pktmbuf_free(ev.mbuf);
+		rte_atomic32_sub(total_events, 1);
+	}
+	return 0;
+}
+
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+	uint64_t cycles, print_cycles;
+
+	RTE_SET_USED(count);
+
+	print_cycles = cycles = rte_get_timer_cycles();
+	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			dpaa2_evdev_dbg("\r%s: events %d", __func__,
+				rte_atomic32_read(count));
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+			dpaa2_evdev_dbg(
+				"%s: No schedules for seconds, deadlock (%d)",
+				__func__,
+				rte_atomic32_read(count));
+			rte_event_dev_dump(evdev, stdout);
+			cycles = new_cycles;
+			return -1;
+		}
+	}
+	rte_eal_mp_wait_lcore();
+	return 0;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+			int (*slave_workers)(void *), uint32_t total_events,
+			uint8_t nb_workers, uint8_t sched_type)
+{
+	uint8_t port = 0;
+	int w_lcore;
+	int ret;
+	struct test_core_param *param;
+	rte_atomic32_t atomic_total_events;
+	uint64_t dequeue_tmo_ticks;
+
+	if (!nb_workers)
+		return 0;
+
+	rte_atomic32_set(&atomic_total_events, total_events);
+	seqn_list_init();
+
+	param = malloc(sizeof(struct test_core_param) * nb_workers);
+	if (!param)
+		return -1;
+
+	ret = rte_event_dequeue_timeout_ticks(evdev,
+		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+	if (ret) {
+		free(param);
+		return -1;
+	}
+
+	param[0].total_events = &atomic_total_events;
+	param[0].sched_type = sched_type;
+	param[0].port = 0;
+	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+	rte_smp_wmb();
+
+	w_lcore = rte_get_next_lcore(
+			/* start core */ -1,
+			/* skip master */ 1,
+			/* wrap */ 0);
+	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+	for (port = 1; port < nb_workers; port++) {
+		param[port].total_events = &atomic_total_events;
+		param[port].sched_type = sched_type;
+		param[port].port = port;
+		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+		rte_smp_wmb();
+		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+	}
+
+	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+	free(param);
+	return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		dpaa2_evdev_dbg("%s: Not enough ports=%d or workers=%d",
+			__func__, nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	return launch_workers_and_wait(worker_multi_port_fn,
+					worker_multi_port_fn, total_events,
+					nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+	unsigned int *count = arg;
+
+	RTE_SET_USED(dev_id);
+	if (event.event_type == RTE_EVENT_TYPE_CPU)
+		*count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+	unsigned int total_events = MAX_EVENTS, count = 0;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+	if (ret)
+		return -2;
+	rte_event_dev_stop(evdev);
+	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+	if (ret)
+		return -3;
+	RTE_TEST_ASSERT_EQUAL(total_events, count,
+				"count mismatch total_events=%d count=%d",
+				total_events, count);
+	return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+	int i, nr_links, ret;
+
+	uint32_t port_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_unlink(evdev, i, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0,
+				"Failed to unlink all queues port=%d", i);
+	}
+
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	nr_links = RTE_MIN(port_count, queue_count);
+	const unsigned int total_events = MAX_EVENTS / nr_links;
+
+	/* Link queue x to port x and inject events to queue x through port x */
+	for (i = 0; i < nr_links; i++) {
+		uint8_t queue = (uint8_t)i;
+
+		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			i /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+	}
+
+	/* Verify the events generated from correct queue */
+	for (i = 0; i < nr_links; i++) {
+		ret = consume_events(i /* port */, total_events,
+				validate_queue_to_port_single_link);
+		if (ret)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+	int ret, port0_events = 0, port1_events = 0;
+	uint8_t queue, port;
+	uint32_t nr_queues = 0;
+	uint32_t nr_ports = 0;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &nr_queues), "Queue count get failed");
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				&nr_queues), "Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+
+	if (nr_ports < 2) {
+		dpaa2_evdev_dbg("%s: Not enough ports to test ports=%d",
+				__func__, nr_ports);
+		return 0;
+	}
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (port = 0; port < nr_ports; port++) {
+		ret = rte_event_port_unlink(evdev, port, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+					port);
+	}
+
+	const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+	/* Link all even number of queues to port0 and odd numbers to port 1*/
+	for (queue = 0; queue < nr_queues; queue++) {
+		port = queue & 0x1;
+		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+					queue, port);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			port /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+
+		if (port == 0)
+			port0_events += total_events;
+		else
+			port1_events += total_events;
+	}
+
+	ret = consume_events(0 /* port */, port0_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+	ret = consume_events(1 /* port */, port1_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
+		int (*test)(void), const char *name)
+{
+	if (setup() < 0) {
+		RTE_LOG(INFO, PMD, "Error setting up test %s", name);
+		unsupported++;
+	} else {
+		if (test() < 0) {
+			failed++;
+			RTE_LOG(INFO, PMD, "%s Failed\n", name);
+		} else {
+			passed++;
+			RTE_LOG(INFO, PMD, "%s Passed", name);
+		}
+	}
+
+	total++;
+	tdown();
+}
+
+int
+test_eventdev_dpaa2(void)
+{
+	testsuite_setup();
+
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_atomic);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_parallel);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_single_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_dev_stop_flush);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_multi_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_single_link);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_multi_link);
+
+	DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
+	DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
+	DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
+	DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
+
+	testsuite_teardown();
+
+	if (failed)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
index f7da7fad5..72f97d4c1 100644
--- a/drivers/event/dpaa2/meson.build
+++ b/drivers/event/dpaa2/meson.build
@@ -9,7 +9,8 @@ if not is_linux
 endif
 deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
 sources = files('dpaa2_hw_dpcon.c',
-		'dpaa2_eventdev.c')
+		'dpaa2_eventdev.c',
+		'dpaa2_eventdev_selftest.c')
 
 allow_experimental_apis = true
 includes += include_directories('../../crypto/dpaa2_sec/')
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] test/event: enable dpaa2 self test
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                     ` (3 preceding siblings ...)
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-07  6:42   ` Hemant Agrawal
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-07  6:42 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Hemant Agrawal

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_eventdev.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 783140dfe..427dbbf77 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1020,9 +1020,16 @@ test_eventdev_selftest_octeontx2(void)
 	return test_eventdev_selftest_impl("otx2_eventdev", "");
 }
 
+static int
+test_eventdev_selftest_dpaa2(void)
+{
+	return test_eventdev_selftest_impl("event_dpaa2", "");
+}
+
 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
 		test_eventdev_selftest_octeontx);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
 		test_eventdev_selftest_octeontx2);
+REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-09 13:10     ` Aaron Conole
  2019-09-10  7:19       ` Hemant Agrawal
  0 siblings, 1 reply; 34+ messages in thread
From: Aaron Conole @ 2019-09-09 13:10 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, jerinj

Hemant Agrawal <hemant.agrawal@nxp.com> writes:

> This patch add support for testing dpaa2 eventdev self test
> for basic sanity for parallel and atomic queues.
>
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  drivers/event/dpaa2/Makefile                  |   1 +
>  drivers/event/dpaa2/dpaa2_eventdev.c          |   1 +
>  drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
>  drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   6 +
>  drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 874 ++++++++++++++++++
>  drivers/event/dpaa2/meson.build               |   3 +-
>  6 files changed, 886 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c
>
> diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
> index e0bb527b1..634179383 100644
> --- a/drivers/event/dpaa2/Makefile
> +++ b/drivers/event/dpaa2/Makefile
> @@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
>  #
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
>  SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
> +SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
>  
>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> index 9255de16f..902a80f36 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -951,6 +951,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
>  	.port_unlink      = dpaa2_eventdev_port_unlink,
>  	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
>  	.dump             = dpaa2_eventdev_dump,
> +	.dev_selftest     = test_eventdev_dpaa2,
>  	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
>  	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
>  	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
> index bdac1aa56..abc038e49 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.h
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.h
> @@ -98,4 +98,6 @@ struct dpaa2_eventdev {
>  struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
>  void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
>  
> +int test_eventdev_dpaa2(void);
> +
>  #endif /* __DPAA2_EVENTDEV_H__ */
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> index 86f2e5393..bb5a0e26c 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> +++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
> @@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
>  #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
>  	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
>  
> +#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
> +#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
> +#define dpaa2_evdev_selftest dpaa2_evdev_info
> +
>  #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
> new file mode 100644
> index 000000000..e02f0f545
> --- /dev/null
> +++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c

In general, please prefer 'err' level logs in selftests.  At least
something that will show up when debugging.  There are error cases below
that are _dbg logged which won't help diagnose issues.

Also, in a .c file, please don't use 'static inline'.  'static' is good
enough.

> @@ -0,0 +1,874 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2018 NXP
> + */
> +
> +#include <rte_atomic.h>
> +#include <rte_common.h>
> +#include <rte_cycles.h>
> +#include <rte_debug.h>
> +#include <rte_eal.h>
> +#include <rte_ethdev.h>
> +#include <rte_eventdev.h>
> +#include <rte_hexdump.h>
> +#include <rte_mbuf.h>
> +#include <rte_malloc.h>
> +#include <rte_memcpy.h>
> +#include <rte_launch.h>
> +#include <rte_lcore.h>
> +#include <rte_per_lcore.h>
> +#include <rte_random.h>
> +#include <rte_bus_vdev.h>
> +#include <rte_test.h>
> +
> +#include "dpaa2_eventdev.h"
> +#include "dpaa2_eventdev_logs.h"
> +
> +#define MAX_PORTS 4
> +#define NUM_PACKETS (1 << 18)
> +//todo #define MAX_EVENTS  1024

^^ Please, don't mix C and C++ comment styles

> +#define MAX_EVENTS  8
> +#define DPAA2_TEST_RUN(setup, teardown, test) \
> +	dpaa2_test_run(setup, teardown, test, #test)
> +
> +static int total;
> +static int passed;
> +static int failed;
> +static int unsupported;
> +
> +static int evdev;
> +static struct rte_mempool *eventdev_test_mempool;
> +
> +struct event_attr {
> +	uint32_t flow_id;
> +	uint8_t event_type;
> +	uint8_t sub_event_type;
> +	uint8_t sched_type;
> +	uint8_t queue;
> +	uint8_t port;
> +	uint8_t seq;
> +};
> +
> +static uint32_t seqn_list_index;
> +static int seqn_list[NUM_PACKETS];
> +
> +static inline void
> +seqn_list_init(void)
> +{
> +	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
> +	memset(seqn_list, 0, sizeof(seqn_list));
> +	seqn_list_index = 0;
> +}
> +
> +static inline int
> +seqn_list_update(int val)

^^ Function unused.

> +{
> +	if (seqn_list_index >= NUM_PACKETS)
> +		return -1;
> +
> +	seqn_list[seqn_list_index++] = val;
> +	rte_smp_wmb();
> +	return 0;
> +}
> +
> +static inline int
> +seqn_list_check(int limit)

^^ Function unused.

> +{
> +	int i;
> +
> +	for (i = 0; i < limit; i++) {
> +		if (seqn_list[i] != i) {
> +			dpaa2_evdev_dbg("Seqn mismatch %d %d", seqn_list[i], i);
> +			return -1;
> +		}
> +	}
> +	return 0;
> +}
> +
> +struct test_core_param {
> +	rte_atomic32_t *total_events;
> +	uint64_t dequeue_tmo_ticks;
> +	uint8_t port;
> +	uint8_t sched_type;
> +};
> +
> +static int
> +testsuite_setup(void)
> +{
> +	const char *eventdev_name = "event_dpaa2";
> +
> +	evdev = rte_event_dev_get_dev_id(eventdev_name);
> +	if (evdev < 0) {
> +		dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
> +				__LINE__, eventdev_name);
> +		if (rte_vdev_init(eventdev_name, NULL) < 0) {
> +			dpaa2_evdev_dbg("Error creating eventdev %s",
> +					eventdev_name);
> +			return -1;
> +		}
> +		evdev = rte_event_dev_get_dev_id(eventdev_name);
> +		if (evdev < 0) {
> +			dpaa2_evdev_dbg("Error finding newly created eventdev");
> +			return -1;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +testsuite_teardown(void)
> +{
> +	rte_event_dev_close(evdev);
> +}
> +
> +static inline void
> +devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
> +			struct rte_event_dev_info *info)
> +{
> +	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
> +	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
> +	dev_conf->nb_event_ports = info->max_event_ports;
> +	dev_conf->nb_event_queues = info->max_event_queues;
> +	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
> +	dev_conf->nb_event_port_dequeue_depth =
> +			info->max_event_port_dequeue_depth;
> +	dev_conf->nb_event_port_enqueue_depth =
> +			info->max_event_port_enqueue_depth;
> +	dev_conf->nb_event_port_enqueue_depth =
> +			info->max_event_port_enqueue_depth;
> +	dev_conf->nb_events_limit =
> +			info->max_num_events;
> +}
> +
> +enum {
> +	TEST_EVENTDEV_SETUP_DEFAULT,
> +	TEST_EVENTDEV_SETUP_PRIORITY,
> +	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
> +};
> +
> +static inline int
> +_eventdev_setup(int mode)
> +{
> +	int i, ret;
> +	struct rte_event_dev_config dev_conf;
> +	struct rte_event_dev_info info;
> +	const char *pool_name = "evdev_dpaa2_test_pool";
> +
> +	/* Create and destrory pool for each test case to make it standalone */
> +	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
> +					MAX_EVENTS,
> +					0 /*MBUF_CACHE_SIZE*/,
> +					0,
> +					512, /* Use very small mbufs */
> +					rte_socket_id());
> +	if (!eventdev_test_mempool) {
> +		dpaa2_evdev_dbg("ERROR creating mempool");
> +		return -1;
> +	}
> +
> +	ret = rte_event_dev_info_get(evdev, &info);
> +	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
> +	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
> +			"ERROR max_num_events=%d < max_events=%d",
> +				info.max_num_events, MAX_EVENTS);
> +
> +	devconf_set_default_sane_values(&dev_conf, &info);
> +	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
> +		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
> +
> +	ret = rte_event_dev_configure(evdev, &dev_conf);
> +	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
> +
> +	uint32_t queue_count;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
> +			    &queue_count), "Queue count get failed");
> +
> +	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
> +		if (queue_count > 8) {
> +			dpaa2_evdev_dbg(
> +				"test expects the unique priority per queue");
> +			return -ENOTSUP;
> +		}
> +
> +		/* Configure event queues(0 to n) with
> +		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
> +		 * RTE_EVENT_DEV_PRIORITY_LOWEST
> +		 */
> +		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
> +				queue_count;
> +		for (i = 0; i < (int)queue_count; i++) {
> +			struct rte_event_queue_conf queue_conf;
> +
> +			ret = rte_event_queue_default_conf_get(evdev, i,
> +						&queue_conf);
> +			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
> +					i);
> +			queue_conf.priority = i * step;
> +			ret = rte_event_queue_setup(evdev, i, &queue_conf);
> +			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
> +					i);
> +		}
> +
> +	} else {
> +		/* Configure event queues with default priority */
> +		for (i = 0; i < (int)queue_count; i++) {
> +			ret = rte_event_queue_setup(evdev, i, NULL);
> +			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
> +					i);
> +		}
> +	}
> +	/* Configure event ports */
> +	uint32_t port_count;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +				RTE_EVENT_DEV_ATTR_PORT_COUNT,
> +				&port_count), "Port count get failed");
> +	for (i = 0; i < (int)port_count; i++) {
> +		ret = rte_event_port_setup(evdev, i, NULL);
> +		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
> +		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
> +		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
> +				i);
> +	}
> +
> +	ret = rte_event_dev_start(evdev);
> +	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
> +
> +	return 0;
> +}
> +
> +static inline int
> +eventdev_setup(void)
> +{
> +	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
> +}
> +
> +static inline int
> +eventdev_setup_priority(void)
> +{
> +	return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
> +}
> +
> +static inline int
> +eventdev_setup_dequeue_timeout(void)
> +{
> +	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
> +}
> +
> +static inline void
> +eventdev_teardown(void)
> +{
> +	rte_event_dev_stop(evdev);
> +	rte_mempool_free(eventdev_test_mempool);
> +}
> +
> +static inline void
> +update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
> +			uint32_t flow_id, uint8_t event_type,
> +			uint8_t sub_event_type, uint8_t sched_type,
> +			uint8_t queue, uint8_t port, uint8_t seq)
> +{
> +	struct event_attr *attr;
> +
> +	/* Store the event attributes in mbuf for future reference */
> +	attr = rte_pktmbuf_mtod(m, struct event_attr *);
> +	attr->flow_id = flow_id;
> +	attr->event_type = event_type;
> +	attr->sub_event_type = sub_event_type;
> +	attr->sched_type = sched_type;
> +	attr->queue = queue;
> +	attr->port = port;
> +	attr->seq = seq;
> +
> +	ev->flow_id = flow_id;
> +	ev->sub_event_type = sub_event_type;
> +	ev->event_type = event_type;
> +	/* Inject the new event */
> +	ev->op = RTE_EVENT_OP_NEW;
> +	ev->sched_type = sched_type;
> +	ev->queue_id = queue;
> +	ev->mbuf = m;
> +}
> +
> +static inline int
> +inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
> +		uint8_t sched_type, uint8_t queue, uint8_t port,
> +		unsigned int events)
> +{
> +	struct rte_mbuf *m;
> +	unsigned int i;
> +
> +	for (i = 0; i < events; i++) {
> +		struct rte_event ev = {.event = 0, .u64 = 0};
> +
> +		m = rte_pktmbuf_alloc(eventdev_test_mempool);
> +		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
> +
> +		update_event_and_validation_attr(m, &ev, flow_id, event_type,
> +			sub_event_type, sched_type, queue, port, i);
> +		rte_event_enqueue_burst(evdev, port, &ev, 1);
> +	}
> +	return 0;
> +}
> +
> +static inline int
> +check_excess_events(uint8_t port)
> +{
> +	int i;
> +	uint16_t valid_event;
> +	struct rte_event ev;
> +
> +	/* Check for excess events, try for a few times and exit */
> +	for (i = 0; i < 32; i++) {
> +		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
> +
> +		RTE_TEST_ASSERT_SUCCESS(valid_event,
> +				"Unexpected valid event=%d", ev.mbuf->seqn);
> +	}
> +	return 0;
> +}
> +
> +static inline int
> +generate_random_events(const unsigned int total_events)
> +{
> +	struct rte_event_dev_info info;
> +	unsigned int i;
> +	int ret;
> +
> +	uint32_t queue_count;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
> +			    &queue_count), "Queue count get failed");
> +
> +	ret = rte_event_dev_info_get(evdev, &info);
> +	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
> +	for (i = 0; i < total_events; i++) {
> +		ret = inject_events(
> +			rte_rand() % info.max_event_queue_flows /*flow_id */,
> +			RTE_EVENT_TYPE_CPU /* event_type */,
> +			rte_rand() % 256 /* sub_event_type */,
> +			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
> +			rte_rand() % queue_count /* queue */,
> +			0 /* port */,
> +			1 /* events */);
> +		if (ret)
> +			return -1;
> +	}
> +	return ret;
> +}
> +
> +
> +static inline int
> +validate_event(struct rte_event *ev)
> +{
> +	struct event_attr *attr;
> +
> +	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
> +	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
> +			"flow_id mismatch enq=%d deq =%d",
> +			attr->flow_id, ev->flow_id);
> +	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
> +			"event_type mismatch enq=%d deq =%d",
> +			attr->event_type, ev->event_type);
> +	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
> +			"sub_event_type mismatch enq=%d deq =%d",
> +			attr->sub_event_type, ev->sub_event_type);
> +	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
> +			"sched_type mismatch enq=%d deq =%d",
> +			attr->sched_type, ev->sched_type);
> +	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
> +			"queue mismatch enq=%d deq =%d",
> +			attr->queue, ev->queue_id);
> +	return 0;
> +}
> +
> +typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
> +				 struct rte_event *ev);
> +
> +static inline int
> +consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
> +{
> +	int ret;
> +	uint16_t valid_event;
> +	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
> +	struct rte_event ev;
> +
> +	while (1) {
> +		if (++forward_progress_cnt > UINT16_MAX) {
> +			dpaa2_evdev_dbg("Detected deadlock");
> +			return -1;
> +		}
> +
> +		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
> +		if (!valid_event)
> +			continue;
> +
> +		forward_progress_cnt = 0;
> +		ret = validate_event(&ev);
> +		if (ret)
> +			return -1;
> +
> +		if (fn != NULL) {
> +			ret = fn(index, port, &ev);
> +			RTE_TEST_ASSERT_SUCCESS(ret,
> +				"Failed to validate test specific event");
> +		}
> +
> +		++index;
> +
> +		rte_pktmbuf_free(ev.mbuf);
> +		if (++events >= total_events)
> +			break;
> +	}
> +
> +	return check_excess_events(port);
> +}
> +
> +static int
> +validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
> +{
> +	struct event_attr *attr;
> +
> +	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
> +
> +	RTE_SET_USED(port);
> +	RTE_TEST_ASSERT_EQUAL(index, attr->seq,
> +		"index=%d != seqn=%d", index, attr->seq);
> +	return 0;
> +}
> +
> +static inline int
> +test_simple_enqdeq(uint8_t sched_type)
> +{
> +	int ret;
> +
> +	ret = inject_events(0 /*flow_id */,
> +				RTE_EVENT_TYPE_CPU /* event_type */,
> +				0 /* sub_event_type */,
> +				sched_type,
> +				0 /* queue */,
> +				0 /* port */,
> +				MAX_EVENTS);
> +	if (ret)
> +		return -1;
> +
> +	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
> +}
> +
> +static int
> +test_simple_enqdeq_atomic(void)
> +{
> +	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
> +}
> +
> +static int
> +test_simple_enqdeq_parallel(void)
> +{
> +	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
> +}
> +
> +/*
> + * Generate a prescribed number of events and spread them across available
> + * queues. On dequeue, using single event port(port 0) verify the enqueued
> + * event attributes
> + */
> +static int
> +test_multi_queue_enq_single_port_deq(void)
> +{
> +	int ret;
> +
> +	ret = generate_random_events(MAX_EVENTS);
> +	if (ret)
> +		return -1;
> +
> +	return consume_events(0 /* port */, MAX_EVENTS, NULL);
> +}
> +
> +static int
> +worker_multi_port_fn(void *arg)
> +{
> +	struct test_core_param *param = arg;
> +	struct rte_event ev;
> +	uint16_t valid_event;
> +	uint8_t port = param->port;
> +	rte_atomic32_t *total_events = param->total_events;
> +	int ret;
> +
> +	while (rte_atomic32_read(total_events) > 0) {
> +		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
> +		if (!valid_event)
> +			continue;
> +
> +		ret = validate_event(&ev);
> +		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
> +		rte_pktmbuf_free(ev.mbuf);
> +		rte_atomic32_sub(total_events, 1);
> +	}
> +	return 0;
> +}
> +
> +static inline int
> +wait_workers_to_join(int lcore, const rte_atomic32_t *count)
> +{
> +	uint64_t cycles, print_cycles;
> +
> +	RTE_SET_USED(count);
> +
> +	print_cycles = cycles = rte_get_timer_cycles();
> +	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
> +		uint64_t new_cycles = rte_get_timer_cycles();
> +
> +		if (new_cycles - print_cycles > rte_get_timer_hz()) {
> +			dpaa2_evdev_dbg("\r%s: events %d", __func__,
> +				rte_atomic32_read(count));
> +			print_cycles = new_cycles;
> +		}
> +		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
> +			dpaa2_evdev_dbg(
> +				"%s: No schedules for seconds, deadlock (%d)",
> +				__func__,
> +				rte_atomic32_read(count));
> +			rte_event_dev_dump(evdev, stdout);
> +			cycles = new_cycles;
> +			return -1;
> +		}
> +	}
> +	rte_eal_mp_wait_lcore();
> +	return 0;
> +}
> +
> +
> +static inline int
> +launch_workers_and_wait(int (*master_worker)(void *),
> +			int (*slave_workers)(void *), uint32_t total_events,
> +			uint8_t nb_workers, uint8_t sched_type)
> +{
> +	uint8_t port = 0;
> +	int w_lcore;
> +	int ret;
> +	struct test_core_param *param;
> +	rte_atomic32_t atomic_total_events;
> +	uint64_t dequeue_tmo_ticks;
> +
> +	if (!nb_workers)
> +		return 0;
> +
> +	rte_atomic32_set(&atomic_total_events, total_events);
> +	seqn_list_init();
> +
> +	param = malloc(sizeof(struct test_core_param) * nb_workers);
> +	if (!param)
> +		return -1;
> +
> +	ret = rte_event_dequeue_timeout_ticks(evdev,
> +		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
> +	if (ret) {
> +		free(param);
> +		return -1;
> +	}
> +
> +	param[0].total_events = &atomic_total_events;
> +	param[0].sched_type = sched_type;
> +	param[0].port = 0;
> +	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
> +	rte_smp_wmb();
> +
> +	w_lcore = rte_get_next_lcore(
> +			/* start core */ -1,
> +			/* skip master */ 1,
> +			/* wrap */ 0);
> +	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
> +
> +	for (port = 1; port < nb_workers; port++) {
> +		param[port].total_events = &atomic_total_events;
> +		param[port].sched_type = sched_type;
> +		param[port].port = port;
> +		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
> +		rte_smp_wmb();
> +		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
> +		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
> +	}
> +
> +	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
> +	free(param);
> +	return ret;
> +}
> +
> +/*
> + * Generate a prescribed number of events and spread them across available
> + * queues. Dequeue the events through multiple ports and verify the enqueued
> + * event attributes
> + */
> +static int
> +test_multi_queue_enq_multi_port_deq(void)
> +{
> +	const unsigned int total_events = MAX_EVENTS;
> +	uint32_t nr_ports;
> +	int ret;
> +
> +	ret = generate_random_events(total_events);
> +	if (ret)
> +		return -1;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +				RTE_EVENT_DEV_ATTR_PORT_COUNT,
> +				&nr_ports), "Port count get failed");
> +	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
> +
> +	if (!nr_ports) {
> +		dpaa2_evdev_dbg("%s: Not enough ports=%d or workers=%d",
> +			__func__, nr_ports, rte_lcore_count() - 1);
> +		return 0;
> +	}
> +
> +	return launch_workers_and_wait(worker_multi_port_fn,
> +					worker_multi_port_fn, total_events,
> +					nr_ports, 0xff /* invalid */);
> +}
> +
> +static
> +void flush(uint8_t dev_id, struct rte_event event, void *arg)
> +{
> +	unsigned int *count = arg;
> +
> +	RTE_SET_USED(dev_id);
> +	if (event.event_type == RTE_EVENT_TYPE_CPU)
> +		*count = *count + 1;
> +
> +}
> +
> +static int
> +test_dev_stop_flush(void)
> +{
> +	unsigned int total_events = MAX_EVENTS, count = 0;
> +	int ret;
> +
> +	ret = generate_random_events(total_events);
> +	if (ret)
> +		return -1;
> +
> +	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
> +	if (ret)
> +		return -2;
> +	rte_event_dev_stop(evdev);
> +	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
> +	if (ret)
> +		return -3;
> +	RTE_TEST_ASSERT_EQUAL(total_events, count,
> +				"count mismatch total_events=%d count=%d",
> +				total_events, count);
> +	return 0;
> +}
> +
> +static int
> +validate_queue_to_port_single_link(uint32_t index, uint8_t port,
> +			struct rte_event *ev)
> +{
> +	RTE_SET_USED(index);
> +	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
> +				"queue mismatch enq=%d deq =%d",
> +				port, ev->queue_id);
> +	return 0;
> +}
> +
> +/*
> + * Link queue x to port x and check correctness of link by checking
> + * queue_id == x on dequeue on the specific port x
> + */
> +static int
> +test_queue_to_port_single_link(void)
> +{
> +	int i, nr_links, ret;
> +
> +	uint32_t port_count;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +				RTE_EVENT_DEV_ATTR_PORT_COUNT,
> +				&port_count), "Port count get failed");
> +
> +	/* Unlink all connections that created in eventdev_setup */
> +	for (i = 0; i < (int)port_count; i++) {
> +		ret = rte_event_port_unlink(evdev, i, NULL, 0);
> +		RTE_TEST_ASSERT(ret >= 0,
> +				"Failed to unlink all queues port=%d", i);
> +	}
> +
> +	uint32_t queue_count;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
> +			    &queue_count), "Queue count get failed");
> +
> +	nr_links = RTE_MIN(port_count, queue_count);
> +	const unsigned int total_events = MAX_EVENTS / nr_links;
> +
> +	/* Link queue x to port x and inject events to queue x through port x */
> +	for (i = 0; i < nr_links; i++) {
> +		uint8_t queue = (uint8_t)i;
> +
> +		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
> +		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
> +
> +		ret = inject_events(
> +			0x100 /*flow_id */,
> +			RTE_EVENT_TYPE_CPU /* event_type */,
> +			rte_rand() % 256 /* sub_event_type */,
> +			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
> +			queue /* queue */,
> +			i /* port */,
> +			total_events /* events */);
> +		if (ret)
> +			return -1;
> +	}
> +
> +	/* Verify the events generated from correct queue */
> +	for (i = 0; i < nr_links; i++) {
> +		ret = consume_events(i /* port */, total_events,
> +				validate_queue_to_port_single_link);
> +		if (ret)
> +			return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
> +			struct rte_event *ev)
> +{
> +	RTE_SET_USED(index);
> +	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
> +				"queue mismatch enq=%d deq =%d",
> +				port, ev->queue_id);
> +	return 0;
> +}
> +
> +/*
> + * Link all even number of queues to port 0 and all odd number of queues to
> + * port 1 and verify the link connection on dequeue
> + */
> +static int
> +test_queue_to_port_multi_link(void)
> +{
> +	int ret, port0_events = 0, port1_events = 0;
> +	uint8_t queue, port;
> +	uint32_t nr_queues = 0;
> +	uint32_t nr_ports = 0;
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
> +			    &nr_queues), "Queue count get failed");
> +
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
> +				&nr_queues), "Queue count get failed");
> +	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
> +				RTE_EVENT_DEV_ATTR_PORT_COUNT,
> +				&nr_ports), "Port count get failed");
> +
> +	if (nr_ports < 2) {
> +		dpaa2_evdev_dbg("%s: Not enough ports to test ports=%d",
> +				__func__, nr_ports);
> +		return 0;
> +	}
> +
> +	/* Unlink all connections that created in eventdev_setup */
> +	for (port = 0; port < nr_ports; port++) {
> +		ret = rte_event_port_unlink(evdev, port, NULL, 0);
> +		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
> +					port);
> +	}
> +
> +	const unsigned int total_events = MAX_EVENTS / nr_queues;
> +
> +	/* Link all even number of queues to port0 and odd numbers to port 1*/
> +	for (queue = 0; queue < nr_queues; queue++) {
> +		port = queue & 0x1;
> +		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
> +		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
> +					queue, port);
> +
> +		ret = inject_events(
> +			0x100 /*flow_id */,
> +			RTE_EVENT_TYPE_CPU /* event_type */,
> +			rte_rand() % 256 /* sub_event_type */,
> +			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
> +			queue /* queue */,
> +			port /* port */,
> +			total_events /* events */);
> +		if (ret)
> +			return -1;
> +
> +		if (port == 0)
> +			port0_events += total_events;
> +		else
> +			port1_events += total_events;
> +	}
> +
> +	ret = consume_events(0 /* port */, port0_events,
> +				validate_queue_to_port_multi_link);
> +	if (ret)
> +		return -1;
> +	ret = consume_events(1 /* port */, port1_events,
> +				validate_queue_to_port_multi_link);
> +	if (ret)
> +		return -1;
> +
> +	return 0;
> +}
> +
> +static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
> +		int (*test)(void), const char *name)
> +{
> +	if (setup() < 0) {
> +		RTE_LOG(INFO, PMD, "Error setting up test %s", name);
> +		unsupported++;
> +	} else {
> +		if (test() < 0) {
> +			failed++;
> +			RTE_LOG(INFO, PMD, "%s Failed\n", name);
> +		} else {
> +			passed++;
> +			RTE_LOG(INFO, PMD, "%s Passed", name);
> +		}
> +	}
> +
> +	total++;
> +	tdown();
> +}
> +
> +int
> +test_eventdev_dpaa2(void)
> +{
> +	testsuite_setup();
> +
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_simple_enqdeq_atomic);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_simple_enqdeq_parallel);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_multi_queue_enq_single_port_deq);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_dev_stop_flush);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_multi_queue_enq_multi_port_deq);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_queue_to_port_single_link);
> +	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
> +			test_queue_to_port_multi_link);
> +
> +	DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
> +	DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
> +	DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
> +	DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
> +
> +	testsuite_teardown();
> +
> +	if (failed)
> +		return -1;
> +
> +	return 0;
> +}
> diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
> index f7da7fad5..72f97d4c1 100644
> --- a/drivers/event/dpaa2/meson.build
> +++ b/drivers/event/dpaa2/meson.build
> @@ -9,7 +9,8 @@ if not is_linux
>  endif
>  deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
>  sources = files('dpaa2_hw_dpcon.c',
> -		'dpaa2_eventdev.c')
> +		'dpaa2_eventdev.c',
> +		'dpaa2_eventdev_selftest.c')
>  
>  allow_experimental_apis = true
>  includes += include_directories('../../crypto/dpaa2_sec/')

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases
  2019-09-09 13:10     ` Aaron Conole
@ 2019-09-10  7:19       ` Hemant Agrawal
  0 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-10  7:19 UTC (permalink / raw)
  To: Aaron Conole; +Cc: dev, jerinj

Hi Aaron,
	Thanks!
I will take care of your comments in  v3

Regards,
Hemant

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-13  6:24     ` Jerin Jacob
  2019-09-26 17:55       ` Jerin Jacob
  0 siblings, 1 reply; 34+ messages in thread
From: Jerin Jacob @ 2019-09-13  6:24 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dev, Jerin Jacob, stable

On Sat, Sep 7, 2019 at 12:14 PM Hemant Agrawal <hemant.agrawal@nxp.com> wrote:
>
> Test vector expect only one type of scheduling as default.
> The old code is provide support scheduling types instead of default.
>
> Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
> Cc: stable@dpdk.org
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Please fix the clang build issue
http://mails.dpdk.org/archives/test-report/2019-September/096533.html



>  drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
>  1 file changed, 2 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> index 926b7edd8..b8cb437a0 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -1,7 +1,5 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
> - *
> - *   Copyright 2017 NXP
> - *
> + * Copyright 2017,2019 NXP
>   */
>
>  #include <assert.h>
> @@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
>         RTE_SET_USED(queue_conf);
>
>         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
> -       queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
> -                                     RTE_SCHED_TYPE_PARALLEL;
> +       queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
>         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
>  }
>
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf
  2019-09-13  6:24     ` Jerin Jacob
@ 2019-09-26 17:55       ` Jerin Jacob
  2019-09-27  6:02         ` Hemant Agrawal
  0 siblings, 1 reply; 34+ messages in thread
From: Jerin Jacob @ 2019-09-26 17:55 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dpdk-dev, Jerin Jacob, stable

On Fri, Sep 13, 2019 at 11:54 AM Jerin Jacob <jerinjacobk@gmail.com> wrote:
>
> On Sat, Sep 7, 2019 at 12:14 PM Hemant Agrawal <hemant.agrawal@nxp.com> wrote:
> >
> > Test vector expect only one type of scheduling as default.
> > The old code is provide support scheduling types instead of default.
> >
> > Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
> > Cc: stable@dpdk.org
> > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>
> Please fix the clang build issue
> http://mails.dpdk.org/archives/test-report/2019-September/096533.html

Waiting for the next version to merge for RC1.


>
>
>
> >  drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
> >  1 file changed, 2 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> > index 926b7edd8..b8cb437a0 100644
> > --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> > +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> > @@ -1,7 +1,5 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> > - *
> > - *   Copyright 2017 NXP
> > - *
> > + * Copyright 2017,2019 NXP
> >   */
> >
> >  #include <assert.h>
> > @@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
> >         RTE_SET_USED(queue_conf);
> >
> >         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
> > -       queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
> > -                                     RTE_SCHED_TYPE_PARALLEL;
> > +       queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
> >         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
> >  }
> >
> > --
> > 2.17.1
> >

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf
  2019-09-26 17:55       ` Jerin Jacob
@ 2019-09-27  6:02         ` Hemant Agrawal
  0 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  6:02 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: dpdk-dev, Jerin Jacob, stable

I will send the RC2 today.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements
  2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                     ` (4 preceding siblings ...)
  2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 5/5] test/event: enable dpaa2 self test Hemant Agrawal
@ 2019-09-27  7:58   ` Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
                       ` (6 more replies)
  5 siblings, 7 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch series has minor fixes in dpaa2 eventdev support
 - default queue and cleanup logic
 - introducing selftest for dpaa2

v3: review comment cleanup and support retry timeout in enq
v2: fix compilation issue wit meson and missing selftest file

Hemant Agrawal (5):
  event/dpaa2: fix def queue conf
  event/dpaa2: remove conditional compilation
  event/dpaa2: add destroy support
  event/dpaa2: add selftest cases
  test/event: enable dpaa2 self test

Nipun Gupta (1):
  event/dpaa2: add retry break in packet enqueue

 app/test/test_eventdev.c                      |   7 +
 drivers/event/dpaa2/Makefile                  |   3 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |  70 +-
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   8 +-
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 833 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 7 files changed, 907 insertions(+), 19 deletions(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-30  6:43       ` Jerin Jacob
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
                       ` (5 subsequent siblings)
  6 siblings, 1 reply; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj, stable

Test vector expect only one type of scheduling as default.
The old code is provide support scheduling types instead of default.

Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 926b7edd8..b8cb437a0 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright 2017 NXP
- *
+ * Copyright 2017,2019 NXP
  */
 
 #include <assert.h>
@@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	RTE_SET_USED(queue_conf);
 
 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
-	queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
-				      RTE_SCHED_TYPE_PARALLEL;
+	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 2/6] event/dpaa2: remove conditional compilation
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 3/6] event/dpaa2: add destroy support Hemant Agrawal
                       ` (4 subsequent siblings)
  6 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch removes the conditional compilation for
cryptodev event support from RTE_LIBRTE_SECURITY flag.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile         | 2 --
 drivers/event/dpaa2/dpaa2_eventdev.c | 6 ------
 2 files changed, 8 deletions(-)

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 470157f25..e0bb527b1 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -24,10 +24,8 @@ LDLIBS += -lrte_common_dpaax
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
 
-ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
 LDLIBS += -lrte_pmd_dpaa2_sec
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
-endif
 
 # versioning export map
 EXPORT_MAP := rte_pmd_dpaa2_event_version.map
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index b8cb437a0..98b487603 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -33,9 +33,7 @@
 #include <dpaa2_hw_mempool.h>
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_ethdev.h>
-#ifdef RTE_LIBRTE_SECURITY
 #include <dpaa2_sec_event.h>
-#endif
 #include "dpaa2_eventdev.h"
 #include "dpaa2_eventdev_logs.h"
 #include <portal/dpaa2_hw_pvt.h>
@@ -794,7 +792,6 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
 	return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
 static int
 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
@@ -937,7 +934,6 @@ dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
 
 	return 0;
 }
-#endif
 
 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
@@ -960,13 +956,11 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
 	.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
 	.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
-#ifdef RTE_LIBRTE_SECURITY
 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
-#endif
 };
 
 static int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 3/6] event/dpaa2: add destroy support
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add support to destroy the event device

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 35 ++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 98b487603..9255de16f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1059,6 +1059,39 @@ dpaa2_eventdev_create(const char *name)
 	return -EFAULT;
 }
 
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+	struct rte_eventdev *eventdev;
+	struct dpaa2_eventdev *priv;
+	int i;
+
+	eventdev = rte_event_pmd_get_named_dev(name);
+	if (eventdev == NULL) {
+		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+		return -1;
+	}
+
+	/* For secondary processes, the primary has done all the work */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	priv = eventdev->data->dev_private;
+	for (i = 0; i < priv->max_event_queues; i++) {
+		if (priv->evq_info[i].dpcon)
+			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+		if (priv->evq_info[i].dpci)
+			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+	}
+	priv->max_event_queues = 0;
+
+	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+	return 0;
+}
+
+
 static int
 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
 {
@@ -1077,6 +1110,8 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
 	name = rte_vdev_device_name(vdev);
 	DPAA2_EVENTDEV_INFO("Closing %s", name);
 
+	dpaa2_eventdev_destroy(name);
+
 	return rte_event_pmd_vdev_uninit(name);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 4/6] event/dpaa2: add retry break in packet enqueue
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                       ` (2 preceding siblings ...)
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 3/6] event/dpaa2: add destroy support Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 5/6] event/dpaa2: add selftest cases Hemant Agrawal
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Nipun Gupta

From: Nipun Gupta <nipun.gupta@nxp.com>

The patch adds the break in the TX function, if it is failing
to send the packets out. Previously the system was trying
infinitely to send packet out.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 21 +++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 9255de16f..834d3cba1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -49,6 +49,7 @@
 
 /* Dynamic logging identified for mempool */
 int dpaa2_logtype_event;
+#define DPAA2_EV_TX_RETRY_COUNT 10000
 
 static uint16_t
 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
@@ -59,7 +60,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 	struct dpaa2_dpio_dev *dpio_dev;
 	uint32_t queue_id = ev[0].queue_id;
 	struct dpaa2_eventq *evq_info;
-	uint32_t fqid;
+	uint32_t fqid, retry_count;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
 	uint32_t loop, frames_to_send;
@@ -162,13 +163,25 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 		}
 send_partial:
 		loop = 0;
+		retry_count = 0;
 		while (loop < frames_to_send) {
-			loop += qbman_swp_enqueue_multiple_desc(swp,
+			ret = qbman_swp_enqueue_multiple_desc(swp,
 					&eqdesc[loop], &fd_arr[loop],
 					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_events -= loop;
+					return num_tx + loop;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
 		}
-		num_tx += frames_to_send;
-		nb_events -= frames_to_send;
+		num_tx += loop;
+		nb_events -= loop;
 	}
 
 	return num_tx;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 5/6] event/dpaa2: add selftest cases
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                       ` (3 preceding siblings ...)
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 6/6] test/event: enable dpaa2 self test Hemant Agrawal
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  6 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add support for testing dpaa2 eventdev self test
for basic sanity for parallel and atomic queues.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile                  |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.c          |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   8 +-
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 833 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 6 files changed, 846 insertions(+), 2 deletions(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index e0bb527b1..634179383 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 834d3cba1..5249d2fe4 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -964,6 +964,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.port_unlink      = dpaa2_eventdev_port_unlink,
 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
 	.dump             = dpaa2_eventdev_dump,
+	.dev_selftest     = test_eventdev_dpaa2,
 	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
 	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index bdac1aa56..abc038e49 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -98,4 +98,6 @@ struct dpaa2_eventdev {
 struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
 void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
 
+int test_eventdev_dpaa2(void);
+
 #endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 86f2e5393..5da85c60f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
  */
 
 #ifndef _DPAA2_EVENTDEV_LOGS_H_
@@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
 #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
 	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
 
+#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
+#define dpaa2_evdev_selftest dpaa2_evdev_info
+
 #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
new file mode 100644
index 000000000..ba4f4bd23
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+#define MAX_PORTS 4
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS  8
+#define DPAA2_TEST_RUN(setup, teardown, test) \
+	dpaa2_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+	uint32_t flow_id;
+	uint8_t event_type;
+	uint8_t sub_event_type;
+	uint8_t sched_type;
+	uint8_t queue;
+	uint8_t port;
+	uint8_t seq;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static void
+seqn_list_init(void)
+{
+	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+	memset(seqn_list, 0, sizeof(seqn_list));
+	seqn_list_index = 0;
+}
+
+struct test_core_param {
+	rte_atomic32_t *total_events;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port;
+	uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+	const char *eventdev_name = "event_dpaa2";
+
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
+				__LINE__, eventdev_name);
+		if (rte_vdev_init(eventdev_name, NULL) < 0) {
+			dpaa2_evdev_err("Error creating eventdev %s",
+					eventdev_name);
+			return -1;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			dpaa2_evdev_err("Error finding newly created eventdev");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_event_dev_close(evdev);
+}
+
+static void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+			struct rte_event_dev_info *info)
+{
+	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+	dev_conf->nb_event_ports = info->max_event_ports;
+	dev_conf->nb_event_queues = info->max_event_queues;
+	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+	dev_conf->nb_event_port_dequeue_depth =
+			info->max_event_port_dequeue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_events_limit =
+			info->max_num_events;
+}
+
+enum {
+	TEST_EVENTDEV_SETUP_DEFAULT,
+	TEST_EVENTDEV_SETUP_PRIORITY,
+	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static int
+_eventdev_setup(int mode)
+{
+	int i, ret;
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_dev_info info;
+	const char *pool_name = "evdev_dpaa2_test_pool";
+
+	/* Create and destrory pool for each test case to make it standalone */
+	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+					MAX_EVENTS,
+					0 /*MBUF_CACHE_SIZE*/,
+					0,
+					512, /* Use very small mbufs */
+					rte_socket_id());
+	if (!eventdev_test_mempool) {
+		dpaa2_evdev_err("ERROR creating mempool");
+		return -1;
+	}
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+			"ERROR max_num_events=%d < max_events=%d",
+				info.max_num_events, MAX_EVENTS);
+
+	devconf_set_default_sane_values(&dev_conf, &info);
+	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+	ret = rte_event_dev_configure(evdev, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+		if (queue_count > 8) {
+			dpaa2_evdev_err(
+				"test expects the unique priority per queue");
+			return -ENOTSUP;
+		}
+
+		/* Configure event queues(0 to n) with
+		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+		 * RTE_EVENT_DEV_PRIORITY_LOWEST
+		 */
+		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+				queue_count;
+		for (i = 0; i < (int)queue_count; i++) {
+			struct rte_event_queue_conf queue_conf;
+
+			ret = rte_event_queue_default_conf_get(evdev, i,
+						&queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+					i);
+			queue_conf.priority = i * step;
+			ret = rte_event_queue_setup(evdev, i, &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+
+	} else {
+		/* Configure event queues with default priority */
+		for (i = 0; i < (int)queue_count; i++) {
+			ret = rte_event_queue_setup(evdev, i, NULL);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+	}
+	/* Configure event ports */
+	uint32_t port_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_setup(evdev, i, NULL);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+				i);
+	}
+
+	ret = rte_event_dev_start(evdev);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+	return 0;
+}
+
+static int
+eventdev_setup(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static void
+eventdev_teardown(void)
+{
+	rte_event_dev_stop(evdev);
+	rte_mempool_free(eventdev_test_mempool);
+}
+
+static void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+			uint32_t flow_id, uint8_t event_type,
+			uint8_t sub_event_type, uint8_t sched_type,
+			uint8_t queue, uint8_t port, uint8_t seq)
+{
+	struct event_attr *attr;
+
+	/* Store the event attributes in mbuf for future reference */
+	attr = rte_pktmbuf_mtod(m, struct event_attr *);
+	attr->flow_id = flow_id;
+	attr->event_type = event_type;
+	attr->sub_event_type = sub_event_type;
+	attr->sched_type = sched_type;
+	attr->queue = queue;
+	attr->port = port;
+	attr->seq = seq;
+
+	ev->flow_id = flow_id;
+	ev->sub_event_type = sub_event_type;
+	ev->event_type = event_type;
+	/* Inject the new event */
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = sched_type;
+	ev->queue_id = queue;
+	ev->mbuf = m;
+}
+
+static int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+		uint8_t sched_type, uint8_t queue, uint8_t port,
+		unsigned int events)
+{
+	struct rte_mbuf *m;
+	unsigned int i;
+
+	for (i = 0; i < events; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		update_event_and_validation_attr(m, &ev, flow_id, event_type,
+			sub_event_type, sched_type, queue, port, i);
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+	return 0;
+}
+
+static int
+check_excess_events(uint8_t port)
+{
+	int i;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	/* Check for excess events, try for a few times and exit */
+	for (i = 0; i < 32; i++) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+		RTE_TEST_ASSERT_SUCCESS(valid_event,
+				"Unexpected valid event=%d", ev.mbuf->seqn);
+	}
+	return 0;
+}
+
+static int
+generate_random_events(const unsigned int total_events)
+{
+	struct rte_event_dev_info info;
+	unsigned int i;
+	int ret;
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	for (i = 0; i < total_events; i++) {
+		ret = inject_events(
+			rte_rand() % info.max_event_queue_flows /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			rte_rand() % queue_count /* queue */,
+			0 /* port */,
+			1 /* events */);
+		if (ret)
+			return -1;
+	}
+	return ret;
+}
+
+
+static int
+validate_event(struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+			"flow_id mismatch enq=%d deq =%d",
+			attr->flow_id, ev->flow_id);
+	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+			"event_type mismatch enq=%d deq =%d",
+			attr->event_type, ev->event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+			"sub_event_type mismatch enq=%d deq =%d",
+			attr->sub_event_type, ev->sub_event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+			"sched_type mismatch enq=%d deq =%d",
+			attr->sched_type, ev->sched_type);
+	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+			"queue mismatch enq=%d deq =%d",
+			attr->queue, ev->queue_id);
+	return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+				 struct rte_event *ev);
+
+static int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+	int ret;
+	uint16_t valid_event;
+	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+	struct rte_event ev;
+
+	while (1) {
+		if (++forward_progress_cnt > UINT16_MAX) {
+			dpaa2_evdev_err("Detected deadlock");
+			return -1;
+		}
+
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		forward_progress_cnt = 0;
+		ret = validate_event(&ev);
+		if (ret)
+			return -1;
+
+		if (fn != NULL) {
+			ret = fn(index, port, &ev);
+			RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to validate test specific event");
+		}
+
+		++index;
+
+		rte_pktmbuf_free(ev.mbuf);
+		if (++events >= total_events)
+			break;
+	}
+
+	return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(index, attr->seq,
+		"index=%d != seqn=%d", index, attr->seq);
+	return 0;
+}
+
+static int
+test_simple_enqdeq(uint8_t sched_type)
+{
+	int ret;
+
+	ret = inject_events(0 /*flow_id */,
+				RTE_EVENT_TYPE_CPU /* event_type */,
+				0 /* sub_event_type */,
+				sched_type,
+				0 /* queue */,
+				0 /* port */,
+				MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+	int ret;
+
+	ret = generate_random_events(MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	rte_atomic32_t *total_events = param->total_events;
+	int ret;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		ret = validate_event(&ev);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+		rte_pktmbuf_free(ev.mbuf);
+		rte_atomic32_sub(total_events, 1);
+	}
+	return 0;
+}
+
+static int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+	uint64_t cycles, print_cycles;
+
+	RTE_SET_USED(count);
+
+	print_cycles = cycles = rte_get_timer_cycles();
+	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			dpaa2_evdev_dbg("\r%s: events %d", __func__,
+				rte_atomic32_read(count));
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+			dpaa2_evdev_info(
+				"%s: No schedules for seconds, deadlock (%d)",
+				__func__,
+				rte_atomic32_read(count));
+			rte_event_dev_dump(evdev, stdout);
+			cycles = new_cycles;
+			return -1;
+		}
+	}
+	rte_eal_mp_wait_lcore();
+	return 0;
+}
+
+
+static int
+launch_workers_and_wait(int (*master_worker)(void *),
+			int (*slave_workers)(void *), uint32_t total_events,
+			uint8_t nb_workers, uint8_t sched_type)
+{
+	uint8_t port = 0;
+	int w_lcore;
+	int ret;
+	struct test_core_param *param;
+	rte_atomic32_t atomic_total_events;
+	uint64_t dequeue_tmo_ticks;
+
+	if (!nb_workers)
+		return 0;
+
+	rte_atomic32_set(&atomic_total_events, total_events);
+	seqn_list_init();
+
+	param = malloc(sizeof(struct test_core_param) * nb_workers);
+	if (!param)
+		return -1;
+
+	ret = rte_event_dequeue_timeout_ticks(evdev,
+		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+	if (ret) {
+		free(param);
+		return -1;
+	}
+
+	param[0].total_events = &atomic_total_events;
+	param[0].sched_type = sched_type;
+	param[0].port = 0;
+	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+	rte_smp_wmb();
+
+	w_lcore = rte_get_next_lcore(
+			/* start core */ -1,
+			/* skip master */ 1,
+			/* wrap */ 0);
+	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+	for (port = 1; port < nb_workers; port++) {
+		param[port].total_events = &atomic_total_events;
+		param[port].sched_type = sched_type;
+		param[port].port = port;
+		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+		rte_smp_wmb();
+		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+	}
+
+	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+	free(param);
+	return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
+				__func__, nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	return launch_workers_and_wait(worker_multi_port_fn,
+					worker_multi_port_fn, total_events,
+					nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+	unsigned int *count = arg;
+
+	RTE_SET_USED(dev_id);
+	if (event.event_type == RTE_EVENT_TYPE_CPU)
+		*count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+	unsigned int total_events = MAX_EVENTS, count = 0;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+	if (ret)
+		return -2;
+	rte_event_dev_stop(evdev);
+	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+	if (ret)
+		return -3;
+	RTE_TEST_ASSERT_EQUAL(total_events, count,
+				"count mismatch total_events=%d count=%d",
+				total_events, count);
+	return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+	int i, nr_links, ret;
+
+	uint32_t port_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_unlink(evdev, i, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0,
+				"Failed to unlink all queues port=%d", i);
+	}
+
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	nr_links = RTE_MIN(port_count, queue_count);
+	const unsigned int total_events = MAX_EVENTS / nr_links;
+
+	/* Link queue x to port x and inject events to queue x through port x */
+	for (i = 0; i < nr_links; i++) {
+		uint8_t queue = (uint8_t)i;
+
+		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			i /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+	}
+
+	/* Verify the events generated from correct queue */
+	for (i = 0; i < nr_links; i++) {
+		ret = consume_events(i /* port */, total_events,
+				validate_queue_to_port_single_link);
+		if (ret)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+	int ret, port0_events = 0, port1_events = 0;
+	uint8_t queue, port;
+	uint32_t nr_queues = 0;
+	uint32_t nr_ports = 0;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &nr_queues), "Queue count get failed");
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				&nr_queues), "Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+
+	if (nr_ports < 2) {
+		dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
+				__func__, nr_ports);
+		return 0;
+	}
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (port = 0; port < nr_ports; port++) {
+		ret = rte_event_port_unlink(evdev, port, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+					port);
+	}
+
+	const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+	/* Link all even number of queues to port0 and odd numbers to port 1*/
+	for (queue = 0; queue < nr_queues; queue++) {
+		port = queue & 0x1;
+		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+					queue, port);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			port /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+
+		if (port == 0)
+			port0_events += total_events;
+		else
+			port1_events += total_events;
+	}
+
+	ret = consume_events(0 /* port */, port0_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+	ret = consume_events(1 /* port */, port1_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
+		int (*test)(void), const char *name)
+{
+	if (setup() < 0) {
+		RTE_LOG(INFO, PMD, "Error setting up test %s", name);
+		unsupported++;
+	} else {
+		if (test() < 0) {
+			failed++;
+			RTE_LOG(INFO, PMD, "%s Failed\n", name);
+		} else {
+			passed++;
+			RTE_LOG(INFO, PMD, "%s Passed", name);
+		}
+	}
+
+	total++;
+	tdown();
+}
+
+int
+test_eventdev_dpaa2(void)
+{
+	testsuite_setup();
+
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_atomic);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_parallel);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_single_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_dev_stop_flush);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_multi_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_single_link);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_multi_link);
+
+	DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
+	DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
+	DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
+	DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
+
+	testsuite_teardown();
+
+	if (failed)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
index f7da7fad5..72f97d4c1 100644
--- a/drivers/event/dpaa2/meson.build
+++ b/drivers/event/dpaa2/meson.build
@@ -9,7 +9,8 @@ if not is_linux
 endif
 deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
 sources = files('dpaa2_hw_dpcon.c',
-		'dpaa2_eventdev.c')
+		'dpaa2_eventdev.c',
+		'dpaa2_eventdev_selftest.c')
 
 allow_experimental_apis = true
 includes += include_directories('../../crypto/dpaa2_sec/')
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH 6/6] test/event: enable dpaa2 self test
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                       ` (4 preceding siblings ...)
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 5/6] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-27  7:58     ` Hemant Agrawal
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  6 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-27  7:58 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add the support to include dpaa2 event test
from the test framework.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_eventdev.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 783140dfe..427dbbf77 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1020,9 +1020,16 @@ test_eventdev_selftest_octeontx2(void)
 	return test_eventdev_selftest_impl("otx2_eventdev", "");
 }
 
+static int
+test_eventdev_selftest_dpaa2(void)
+{
+	return test_eventdev_selftest_impl("event_dpaa2", "");
+}
+
 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
 		test_eventdev_selftest_octeontx);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
 		test_eventdev_selftest_octeontx2);
+REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-30  6:43       ` Jerin Jacob
  0 siblings, 0 replies; 34+ messages in thread
From: Jerin Jacob @ 2019-09-30  6:43 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dpdk-dev, Jerin Jacob, stable

On Fri, Sep 27, 2019 at 1:31 PM Hemant Agrawal <hemant.agrawal@nxp.com> wrote:
>
> Test vector expect only one type of scheduling as default.
> The old code is provide support scheduling types instead of default.
>
> Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
> Cc: stable@dpdk.org
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

1) Patch version is missing

2) Shared library build failure.
http://mails.dpdk.org/archives/test-report/2019-September/099288.html

Please send the next version with fixes.


/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `rte_mempool_get_ops':
dpaa2_eventdev_selftest.c:(.text+0x224): undefined reference to
`rte_mempool_ops_table'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `rte_mbuf_raw_alloc':
dpaa2_eventdev_selftest.c:(.text+0x8d4): undefined reference to
`rte_mempool_check_cookies'
/usr/bin/ld: dpaa2_eventdev_selftest.c:(.text+0x8f5): undefined
reference to `rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `rte_pktmbuf_reset':
dpaa2_eventdev_selftest.c:(.text+0xa26): undefined reference to
`rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `__rte_pktmbuf_free_direct':
dpaa2_eventdev_selftest.c:(.text+0xb47): undefined reference to
`rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.c:(.text+0xc15): undefined
reference to `rte_mempool_check_cookies'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `rte_pktmbuf_free':
dpaa2_eventdev_selftest.c:(.text+0x23cf): undefined reference to
`rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.c:(.text+0x2408): undefined
reference to `rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.c:(.text+0x2535): undefined
reference to `rte_mbuf_sanity_check'
/usr/bin/ld: dpaa2_eventdev_selftest.c:(.text+0x2603): undefined
reference to `rte_mempool_check_cookies'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `_eventdev_setup':
dpaa2_eventdev_selftest.c:(.text+0x4142): undefined reference to
`rte_pktmbuf_pool_create'
/usr/bin/ld: dpaa2_eventdev_selftest.o: in function `eventdev_teardown':
dpaa2_eventdev_selftest.c:(.text+0x465d): undefined reference to
`rte_mempool_free'
collect2: error: ld returned 1 exit status
> ---
>  drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
>  1 file changed, 2 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
> index 926b7edd8..b8cb437a0 100644
> --- a/drivers/event/dpaa2/dpaa2_eventdev.c
> +++ b/drivers/event/dpaa2/dpaa2_eventdev.c
> @@ -1,7 +1,5 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
> - *
> - *   Copyright 2017 NXP
> - *
> + * Copyright 2017,2019 NXP
>   */
>
>  #include <assert.h>
> @@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
>         RTE_SET_USED(queue_conf);
>
>         queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
> -       queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
> -                                     RTE_SCHED_TYPE_PARALLEL;
> +       queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
>         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
>  }
>
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements
  2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                       ` (5 preceding siblings ...)
  2019-09-27  7:58     ` [dpdk-dev] [PATCH 6/6] test/event: enable dpaa2 self test Hemant Agrawal
@ 2019-09-30  8:32     ` Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
                         ` (5 more replies)
  6 siblings, 6 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch series has minor fixes in dpaa2 eventdev support
 - default queue and cleanup logic
 - introducing selftest for dpaa2

v4: fix shared build and add version info
v3: review comment cleanup and support retry timeout in enq
v2: fix compilation issue wit meson and missing selftest file

Hemant Agrawal (5):
  event/dpaa2: fix def queue conf
  event/dpaa2: remove conditional compilation
  event/dpaa2: add destroy support
  event/dpaa2: add selftest cases
  test/event: enable dpaa2 self test

Nipun Gupta (1):
  event/dpaa2: add retry break in packet enqueue

 app/test/test_eventdev.c                      |   7 +
 drivers/event/dpaa2/Makefile                  |   7 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |  70 +-
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   8 +-
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 833 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 7 files changed, 909 insertions(+), 21 deletions(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

-- 
2.17.1


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  2019-09-30 14:17         ` Jerin Jacob
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
                         ` (4 subsequent siblings)
  5 siblings, 1 reply; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj, stable

Test vector expect only one type of scheduling as default.
The old code is provide support scheduling types instead of default.

Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 926b7edd8..b8cb437a0 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright 2017 NXP
- *
+ * Copyright 2017,2019 NXP
  */
 
 #include <assert.h>
@@ -470,8 +468,7 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 	RTE_SET_USED(queue_conf);
 
 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
-	queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC |
-				      RTE_SCHED_TYPE_PARALLEL;
+	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 2/6] event/dpaa2: remove conditional compilation
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 3/6] event/dpaa2: add destroy support Hemant Agrawal
                         ` (3 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch removes the conditional compilation for
cryptodev event support from RTE_LIBRTE_SECURITY flag.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile         | 2 --
 drivers/event/dpaa2/dpaa2_eventdev.c | 6 ------
 2 files changed, 8 deletions(-)

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index 470157f25..e0bb527b1 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -24,10 +24,8 @@ LDLIBS += -lrte_common_dpaax
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
 
-ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
 LDLIBS += -lrte_pmd_dpaa2_sec
 CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec
-endif
 
 # versioning export map
 EXPORT_MAP := rte_pmd_dpaa2_event_version.map
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index b8cb437a0..98b487603 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -33,9 +33,7 @@
 #include <dpaa2_hw_mempool.h>
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_ethdev.h>
-#ifdef RTE_LIBRTE_SECURITY
 #include <dpaa2_sec_event.h>
-#endif
 #include "dpaa2_eventdev.h"
 #include "dpaa2_eventdev_logs.h"
 #include <portal/dpaa2_hw_pvt.h>
@@ -794,7 +792,6 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
 	return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY
 static int
 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
 			    const struct rte_cryptodev *cdev,
@@ -937,7 +934,6 @@ dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
 
 	return 0;
 }
-#endif
 
 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.dev_infos_get    = dpaa2_eventdev_info_get,
@@ -960,13 +956,11 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
 	.eth_rx_adapter_start = dpaa2_eventdev_eth_start,
 	.eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
-#ifdef RTE_LIBRTE_SECURITY
 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
-#endif
 };
 
 static int
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 3/6] event/dpaa2: add destroy support
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
                         ` (2 subsequent siblings)
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add support to destroy the event device

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 35 ++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 98b487603..9255de16f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1059,6 +1059,39 @@ dpaa2_eventdev_create(const char *name)
 	return -EFAULT;
 }
 
+static int
+dpaa2_eventdev_destroy(const char *name)
+{
+	struct rte_eventdev *eventdev;
+	struct dpaa2_eventdev *priv;
+	int i;
+
+	eventdev = rte_event_pmd_get_named_dev(name);
+	if (eventdev == NULL) {
+		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
+		return -1;
+	}
+
+	/* For secondary processes, the primary has done all the work */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	priv = eventdev->data->dev_private;
+	for (i = 0; i < priv->max_event_queues; i++) {
+		if (priv->evq_info[i].dpcon)
+			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
+
+		if (priv->evq_info[i].dpci)
+			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
+
+	}
+	priv->max_event_queues = 0;
+
+	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
+	return 0;
+}
+
+
 static int
 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
 {
@@ -1077,6 +1110,8 @@ dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
 	name = rte_vdev_device_name(vdev);
 	DPAA2_EVENTDEV_INFO("Closing %s", name);
 
+	dpaa2_eventdev_destroy(name);
+
 	return rte_event_pmd_vdev_uninit(name);
 }
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 4/6] event/dpaa2: add retry break in packet enqueue
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                         ` (2 preceding siblings ...)
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 3/6] event/dpaa2: add destroy support Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 5/6] event/dpaa2: add selftest cases Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 6/6] test/event: enable dpaa2 self test Hemant Agrawal
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj, Nipun Gupta

From: Nipun Gupta <nipun.gupta@nxp.com>

The patch adds the break in the TX function, if it is failing
to send the packets out. Previously the system was trying
infinitely to send packet out.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 21 +++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 9255de16f..834d3cba1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -49,6 +49,7 @@
 
 /* Dynamic logging identified for mempool */
 int dpaa2_logtype_event;
+#define DPAA2_EV_TX_RETRY_COUNT 10000
 
 static uint16_t
 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
@@ -59,7 +60,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 	struct dpaa2_dpio_dev *dpio_dev;
 	uint32_t queue_id = ev[0].queue_id;
 	struct dpaa2_eventq *evq_info;
-	uint32_t fqid;
+	uint32_t fqid, retry_count;
 	struct qbman_swp *swp;
 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
 	uint32_t loop, frames_to_send;
@@ -162,13 +163,25 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 		}
 send_partial:
 		loop = 0;
+		retry_count = 0;
 		while (loop < frames_to_send) {
-			loop += qbman_swp_enqueue_multiple_desc(swp,
+			ret = qbman_swp_enqueue_multiple_desc(swp,
 					&eqdesc[loop], &fd_arr[loop],
 					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_events -= loop;
+					return num_tx + loop;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
 		}
-		num_tx += frames_to_send;
-		nb_events -= frames_to_send;
+		num_tx += loop;
+		nb_events -= loop;
 	}
 
 	return num_tx;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 5/6] event/dpaa2: add selftest cases
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                         ` (3 preceding siblings ...)
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 6/6] test/event: enable dpaa2 self test Hemant Agrawal
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add support for testing dpaa2 eventdev self test
for basic sanity for parallel and atomic queues.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/event/dpaa2/Makefile                  |   5 +-
 drivers/event/dpaa2/dpaa2_eventdev.c          |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.h          |   2 +
 drivers/event/dpaa2/dpaa2_eventdev_logs.h     |   8 +-
 drivers/event/dpaa2/dpaa2_eventdev_selftest.c | 833 ++++++++++++++++++
 drivers/event/dpaa2/meson.build               |   3 +-
 6 files changed, 848 insertions(+), 4 deletions(-)
 create mode 100644 drivers/event/dpaa2/dpaa2_eventdev_selftest.c

diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index e0bb527b1..c6ab326da 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -18,9 +18,9 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
 LDLIBS += -lrte_eal -lrte_eventdev
-LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
-LDLIBS += -lrte_bus_vdev
 LDLIBS += -lrte_common_dpaax
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
+LDLIBS += -lrte_bus_vdev -lrte_mempool -lrte_mbuf
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
 CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
 
@@ -40,5 +40,6 @@ CFLAGS += -DALLOW_EXPERIMENTAL_API
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_hw_dpcon.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2_eventdev_selftest.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 834d3cba1..5249d2fe4 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -964,6 +964,7 @@ static struct rte_eventdev_ops dpaa2_eventdev_ops = {
 	.port_unlink      = dpaa2_eventdev_port_unlink,
 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
 	.dump             = dpaa2_eventdev_dump,
+	.dev_selftest     = test_eventdev_dpaa2,
 	.eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
 	.eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
 	.eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index bdac1aa56..abc038e49 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -98,4 +98,6 @@ struct dpaa2_eventdev {
 struct dpaa2_dpcon_dev *rte_dpaa2_alloc_dpcon_dev(void);
 void rte_dpaa2_free_dpcon_dev(struct dpaa2_dpcon_dev *dpcon);
 
+int test_eventdev_dpaa2(void);
+
 #endif /* __DPAA2_EVENTDEV_H__ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 86f2e5393..5da85c60f 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
  */
 
 #ifndef _DPAA2_EVENTDEV_LOGS_H_
@@ -35,4 +35,10 @@ extern int dpaa2_logtype_event;
 #define DPAA2_EVENTDEV_DP_WARN(fmt, args...) \
 	DPAA2_EVENTDEV_DP_LOG(WARNING, fmt, ## args)
 
+#define dpaa2_evdev_info(fmt, ...) DPAA2_EVENTDEV_LOG(INFO, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_dbg(fmt, ...) DPAA2_EVENTDEV_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev_err(fmt, ...) DPAA2_EVENTDEV_LOG(ERR, fmt, ##__VA_ARGS__)
+#define dpaa2_evdev__func_trace dpaa2_evdev_dbg
+#define dpaa2_evdev_selftest dpaa2_evdev_info
+
 #endif /* _DPAA2_EVENTDEV_LOGS_H_ */
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_selftest.c b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
new file mode 100644
index 000000000..ba4f4bd23
--- /dev/null
+++ b/drivers/event/dpaa2/dpaa2_eventdev_selftest.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018-2019 NXP
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_test.h>
+
+#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
+
+#define MAX_PORTS 4
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS  8
+#define DPAA2_TEST_RUN(setup, teardown, test) \
+	dpaa2_test_run(setup, teardown, test, #test)
+
+static int total;
+static int passed;
+static int failed;
+static int unsupported;
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+	uint32_t flow_id;
+	uint8_t event_type;
+	uint8_t sub_event_type;
+	uint8_t sched_type;
+	uint8_t queue;
+	uint8_t port;
+	uint8_t seq;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static void
+seqn_list_init(void)
+{
+	RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+	memset(seqn_list, 0, sizeof(seqn_list));
+	seqn_list_index = 0;
+}
+
+struct test_core_param {
+	rte_atomic32_t *total_events;
+	uint64_t dequeue_tmo_ticks;
+	uint8_t port;
+	uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+	const char *eventdev_name = "event_dpaa2";
+
+	evdev = rte_event_dev_get_dev_id(eventdev_name);
+	if (evdev < 0) {
+		dpaa2_evdev_dbg("%d: Eventdev %s not found - creating.",
+				__LINE__, eventdev_name);
+		if (rte_vdev_init(eventdev_name, NULL) < 0) {
+			dpaa2_evdev_err("Error creating eventdev %s",
+					eventdev_name);
+			return -1;
+		}
+		evdev = rte_event_dev_get_dev_id(eventdev_name);
+		if (evdev < 0) {
+			dpaa2_evdev_err("Error finding newly created eventdev");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_event_dev_close(evdev);
+}
+
+static void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+			struct rte_event_dev_info *info)
+{
+	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+	dev_conf->nb_event_ports = info->max_event_ports;
+	dev_conf->nb_event_queues = info->max_event_queues;
+	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+	dev_conf->nb_event_port_dequeue_depth =
+			info->max_event_port_dequeue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_event_port_enqueue_depth =
+			info->max_event_port_enqueue_depth;
+	dev_conf->nb_events_limit =
+			info->max_num_events;
+}
+
+enum {
+	TEST_EVENTDEV_SETUP_DEFAULT,
+	TEST_EVENTDEV_SETUP_PRIORITY,
+	TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static int
+_eventdev_setup(int mode)
+{
+	int i, ret;
+	struct rte_event_dev_config dev_conf;
+	struct rte_event_dev_info info;
+	const char *pool_name = "evdev_dpaa2_test_pool";
+
+	/* Create and destrory pool for each test case to make it standalone */
+	eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+					MAX_EVENTS,
+					0 /*MBUF_CACHE_SIZE*/,
+					0,
+					512, /* Use very small mbufs */
+					rte_socket_id());
+	if (!eventdev_test_mempool) {
+		dpaa2_evdev_err("ERROR creating mempool");
+		return -1;
+	}
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+			"ERROR max_num_events=%d < max_events=%d",
+				info.max_num_events, MAX_EVENTS);
+
+	devconf_set_default_sane_values(&dev_conf, &info);
+	if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+		dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+	ret = rte_event_dev_configure(evdev, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+		if (queue_count > 8) {
+			dpaa2_evdev_err(
+				"test expects the unique priority per queue");
+			return -ENOTSUP;
+		}
+
+		/* Configure event queues(0 to n) with
+		 * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+		 * RTE_EVENT_DEV_PRIORITY_LOWEST
+		 */
+		uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+				queue_count;
+		for (i = 0; i < (int)queue_count; i++) {
+			struct rte_event_queue_conf queue_conf;
+
+			ret = rte_event_queue_default_conf_get(evdev, i,
+						&queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d",
+					i);
+			queue_conf.priority = i * step;
+			ret = rte_event_queue_setup(evdev, i, &queue_conf);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+
+	} else {
+		/* Configure event queues with default priority */
+		for (i = 0; i < (int)queue_count; i++) {
+			ret = rte_event_queue_setup(evdev, i, NULL);
+			RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d",
+					i);
+		}
+	}
+	/* Configure event ports */
+	uint32_t port_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_setup(evdev, i, NULL);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+		ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d",
+				i);
+	}
+
+	ret = rte_event_dev_start(evdev);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+	return 0;
+}
+
+static int
+eventdev_setup(void)
+{
+	return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static void
+eventdev_teardown(void)
+{
+	rte_event_dev_stop(evdev);
+	rte_mempool_free(eventdev_test_mempool);
+}
+
+static void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+			uint32_t flow_id, uint8_t event_type,
+			uint8_t sub_event_type, uint8_t sched_type,
+			uint8_t queue, uint8_t port, uint8_t seq)
+{
+	struct event_attr *attr;
+
+	/* Store the event attributes in mbuf for future reference */
+	attr = rte_pktmbuf_mtod(m, struct event_attr *);
+	attr->flow_id = flow_id;
+	attr->event_type = event_type;
+	attr->sub_event_type = sub_event_type;
+	attr->sched_type = sched_type;
+	attr->queue = queue;
+	attr->port = port;
+	attr->seq = seq;
+
+	ev->flow_id = flow_id;
+	ev->sub_event_type = sub_event_type;
+	ev->event_type = event_type;
+	/* Inject the new event */
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = sched_type;
+	ev->queue_id = queue;
+	ev->mbuf = m;
+}
+
+static int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+		uint8_t sched_type, uint8_t queue, uint8_t port,
+		unsigned int events)
+{
+	struct rte_mbuf *m;
+	unsigned int i;
+
+	for (i = 0; i < events; i++) {
+		struct rte_event ev = {.event = 0, .u64 = 0};
+
+		m = rte_pktmbuf_alloc(eventdev_test_mempool);
+		RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+		update_event_and_validation_attr(m, &ev, flow_id, event_type,
+			sub_event_type, sched_type, queue, port, i);
+		rte_event_enqueue_burst(evdev, port, &ev, 1);
+	}
+	return 0;
+}
+
+static int
+check_excess_events(uint8_t port)
+{
+	int i;
+	uint16_t valid_event;
+	struct rte_event ev;
+
+	/* Check for excess events, try for a few times and exit */
+	for (i = 0; i < 32; i++) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+		RTE_TEST_ASSERT_SUCCESS(valid_event,
+				"Unexpected valid event=%d", ev.mbuf->seqn);
+	}
+	return 0;
+}
+
+static int
+generate_random_events(const unsigned int total_events)
+{
+	struct rte_event_dev_info info;
+	unsigned int i;
+	int ret;
+
+	uint32_t queue_count;
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	ret = rte_event_dev_info_get(evdev, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+	for (i = 0; i < total_events; i++) {
+		ret = inject_events(
+			rte_rand() % info.max_event_queue_flows /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			rte_rand() % queue_count /* queue */,
+			0 /* port */,
+			1 /* events */);
+		if (ret)
+			return -1;
+	}
+	return ret;
+}
+
+
+static int
+validate_event(struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+	RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+			"flow_id mismatch enq=%d deq =%d",
+			attr->flow_id, ev->flow_id);
+	RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+			"event_type mismatch enq=%d deq =%d",
+			attr->event_type, ev->event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+			"sub_event_type mismatch enq=%d deq =%d",
+			attr->sub_event_type, ev->sub_event_type);
+	RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+			"sched_type mismatch enq=%d deq =%d",
+			attr->sched_type, ev->sched_type);
+	RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+			"queue mismatch enq=%d deq =%d",
+			attr->queue, ev->queue_id);
+	return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+				 struct rte_event *ev);
+
+static int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+	int ret;
+	uint16_t valid_event;
+	uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+	struct rte_event ev;
+
+	while (1) {
+		if (++forward_progress_cnt > UINT16_MAX) {
+			dpaa2_evdev_err("Detected deadlock");
+			return -1;
+		}
+
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		forward_progress_cnt = 0;
+		ret = validate_event(&ev);
+		if (ret)
+			return -1;
+
+		if (fn != NULL) {
+			ret = fn(index, port, &ev);
+			RTE_TEST_ASSERT_SUCCESS(ret,
+				"Failed to validate test specific event");
+		}
+
+		++index;
+
+		rte_pktmbuf_free(ev.mbuf);
+		if (++events >= total_events)
+			break;
+	}
+
+	return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+	struct event_attr *attr;
+
+	attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+
+	RTE_SET_USED(port);
+	RTE_TEST_ASSERT_EQUAL(index, attr->seq,
+		"index=%d != seqn=%d", index, attr->seq);
+	return 0;
+}
+
+static int
+test_simple_enqdeq(uint8_t sched_type)
+{
+	int ret;
+
+	ret = inject_events(0 /*flow_id */,
+				RTE_EVENT_TYPE_CPU /* event_type */,
+				0 /* sub_event_type */,
+				sched_type,
+				0 /* queue */,
+				0 /* port */,
+				MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS,	validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+	return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+	int ret;
+
+	ret = generate_random_events(MAX_EVENTS);
+	if (ret)
+		return -1;
+
+	return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+	struct test_core_param *param = arg;
+	struct rte_event ev;
+	uint16_t valid_event;
+	uint8_t port = param->port;
+	rte_atomic32_t *total_events = param->total_events;
+	int ret;
+
+	while (rte_atomic32_read(total_events) > 0) {
+		valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+		if (!valid_event)
+			continue;
+
+		ret = validate_event(&ev);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+		rte_pktmbuf_free(ev.mbuf);
+		rte_atomic32_sub(total_events, 1);
+	}
+	return 0;
+}
+
+static int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+	uint64_t cycles, print_cycles;
+
+	RTE_SET_USED(count);
+
+	print_cycles = cycles = rte_get_timer_cycles();
+	while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+		uint64_t new_cycles = rte_get_timer_cycles();
+
+		if (new_cycles - print_cycles > rte_get_timer_hz()) {
+			dpaa2_evdev_dbg("\r%s: events %d", __func__,
+				rte_atomic32_read(count));
+			print_cycles = new_cycles;
+		}
+		if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+			dpaa2_evdev_info(
+				"%s: No schedules for seconds, deadlock (%d)",
+				__func__,
+				rte_atomic32_read(count));
+			rte_event_dev_dump(evdev, stdout);
+			cycles = new_cycles;
+			return -1;
+		}
+	}
+	rte_eal_mp_wait_lcore();
+	return 0;
+}
+
+
+static int
+launch_workers_and_wait(int (*master_worker)(void *),
+			int (*slave_workers)(void *), uint32_t total_events,
+			uint8_t nb_workers, uint8_t sched_type)
+{
+	uint8_t port = 0;
+	int w_lcore;
+	int ret;
+	struct test_core_param *param;
+	rte_atomic32_t atomic_total_events;
+	uint64_t dequeue_tmo_ticks;
+
+	if (!nb_workers)
+		return 0;
+
+	rte_atomic32_set(&atomic_total_events, total_events);
+	seqn_list_init();
+
+	param = malloc(sizeof(struct test_core_param) * nb_workers);
+	if (!param)
+		return -1;
+
+	ret = rte_event_dequeue_timeout_ticks(evdev,
+		rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+	if (ret) {
+		free(param);
+		return -1;
+	}
+
+	param[0].total_events = &atomic_total_events;
+	param[0].sched_type = sched_type;
+	param[0].port = 0;
+	param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+	rte_smp_wmb();
+
+	w_lcore = rte_get_next_lcore(
+			/* start core */ -1,
+			/* skip master */ 1,
+			/* wrap */ 0);
+	rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+	for (port = 1; port < nb_workers; port++) {
+		param[port].total_events = &atomic_total_events;
+		param[port].sched_type = sched_type;
+		param[port].port = port;
+		param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+		rte_smp_wmb();
+		w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+		rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+	}
+
+	ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+	free(param);
+	return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+	const unsigned int total_events = MAX_EVENTS;
+	uint32_t nr_ports;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+	nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1);
+
+	if (!nr_ports) {
+		dpaa2_evdev_err("%s: Not enough ports=%d or workers=%d",
+				__func__, nr_ports, rte_lcore_count() - 1);
+		return 0;
+	}
+
+	return launch_workers_and_wait(worker_multi_port_fn,
+					worker_multi_port_fn, total_events,
+					nr_ports, 0xff /* invalid */);
+}
+
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+	unsigned int *count = arg;
+
+	RTE_SET_USED(dev_id);
+	if (event.event_type == RTE_EVENT_TYPE_CPU)
+		*count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+	unsigned int total_events = MAX_EVENTS, count = 0;
+	int ret;
+
+	ret = generate_random_events(total_events);
+	if (ret)
+		return -1;
+
+	ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+	if (ret)
+		return -2;
+	rte_event_dev_stop(evdev);
+	ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+	if (ret)
+		return -3;
+	RTE_TEST_ASSERT_EQUAL(total_events, count,
+				"count mismatch total_events=%d count=%d",
+				total_events, count);
+	return 0;
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, ev->queue_id,
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+	int i, nr_links, ret;
+
+	uint32_t port_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&port_count), "Port count get failed");
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (i = 0; i < (int)port_count; i++) {
+		ret = rte_event_port_unlink(evdev, i, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0,
+				"Failed to unlink all queues port=%d", i);
+	}
+
+	uint32_t queue_count;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &queue_count), "Queue count get failed");
+
+	nr_links = RTE_MIN(port_count, queue_count);
+	const unsigned int total_events = MAX_EVENTS / nr_links;
+
+	/* Link queue x to port x and inject events to queue x through port x */
+	for (i = 0; i < nr_links; i++) {
+		uint8_t queue = (uint8_t)i;
+
+		ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			i /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+	}
+
+	/* Verify the events generated from correct queue */
+	for (i = 0; i < nr_links; i++) {
+		ret = consume_events(i /* port */, total_events,
+				validate_queue_to_port_single_link);
+		if (ret)
+			return -1;
+	}
+
+	return 0;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+			struct rte_event *ev)
+{
+	RTE_SET_USED(index);
+	RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+				"queue mismatch enq=%d deq =%d",
+				port, ev->queue_id);
+	return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+	int ret, port0_events = 0, port1_events = 0;
+	uint8_t queue, port;
+	uint32_t nr_queues = 0;
+	uint32_t nr_ports = 0;
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+			    RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+			    &nr_queues), "Queue count get failed");
+
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+				&nr_queues), "Queue count get failed");
+	RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev,
+				RTE_EVENT_DEV_ATTR_PORT_COUNT,
+				&nr_ports), "Port count get failed");
+
+	if (nr_ports < 2) {
+		dpaa2_evdev_err("%s: Not enough ports to test ports=%d",
+				__func__, nr_ports);
+		return 0;
+	}
+
+	/* Unlink all connections that created in eventdev_setup */
+	for (port = 0; port < nr_ports; port++) {
+		ret = rte_event_port_unlink(evdev, port, NULL, 0);
+		RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+					port);
+	}
+
+	const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+	/* Link all even number of queues to port0 and odd numbers to port 1*/
+	for (queue = 0; queue < nr_queues; queue++) {
+		port = queue & 0x1;
+		ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+		RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+					queue, port);
+
+		ret = inject_events(
+			0x100 /*flow_id */,
+			RTE_EVENT_TYPE_CPU /* event_type */,
+			rte_rand() % 256 /* sub_event_type */,
+			rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+			queue /* queue */,
+			port /* port */,
+			total_events /* events */);
+		if (ret)
+			return -1;
+
+		if (port == 0)
+			port0_events += total_events;
+		else
+			port1_events += total_events;
+	}
+
+	ret = consume_events(0 /* port */, port0_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+	ret = consume_events(1 /* port */, port1_events,
+				validate_queue_to_port_multi_link);
+	if (ret)
+		return -1;
+
+	return 0;
+}
+
+static void dpaa2_test_run(int (*setup)(void), void (*tdown)(void),
+		int (*test)(void), const char *name)
+{
+	if (setup() < 0) {
+		RTE_LOG(INFO, PMD, "Error setting up test %s", name);
+		unsupported++;
+	} else {
+		if (test() < 0) {
+			failed++;
+			RTE_LOG(INFO, PMD, "%s Failed\n", name);
+		} else {
+			passed++;
+			RTE_LOG(INFO, PMD, "%s Passed", name);
+		}
+	}
+
+	total++;
+	tdown();
+}
+
+int
+test_eventdev_dpaa2(void)
+{
+	testsuite_setup();
+
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_atomic);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_simple_enqdeq_parallel);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_single_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_dev_stop_flush);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_multi_queue_enq_multi_port_deq);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_single_link);
+	DPAA2_TEST_RUN(eventdev_setup, eventdev_teardown,
+			test_queue_to_port_multi_link);
+
+	DPAA2_EVENTDEV_INFO("Total tests   : %d", total);
+	DPAA2_EVENTDEV_INFO("Passed        : %d", passed);
+	DPAA2_EVENTDEV_INFO("Failed        : %d", failed);
+	DPAA2_EVENTDEV_INFO("Not supported : %d", unsupported);
+
+	testsuite_teardown();
+
+	if (failed)
+		return -1;
+
+	return 0;
+}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
index f7da7fad5..72f97d4c1 100644
--- a/drivers/event/dpaa2/meson.build
+++ b/drivers/event/dpaa2/meson.build
@@ -9,7 +9,8 @@ if not is_linux
 endif
 deps += ['bus_vdev', 'pmd_dpaa2', 'pmd_dpaa2_sec']
 sources = files('dpaa2_hw_dpcon.c',
-		'dpaa2_eventdev.c')
+		'dpaa2_eventdev.c',
+		'dpaa2_eventdev_selftest.c')
 
 allow_experimental_apis = true
 includes += include_directories('../../crypto/dpaa2_sec/')
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [dpdk-dev] [PATCH v4 6/6] test/event: enable dpaa2 self test
  2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
                         ` (4 preceding siblings ...)
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 5/6] event/dpaa2: add selftest cases Hemant Agrawal
@ 2019-09-30  8:32       ` Hemant Agrawal
  5 siblings, 0 replies; 34+ messages in thread
From: Hemant Agrawal @ 2019-09-30  8:32 UTC (permalink / raw)
  To: dev; +Cc: jerinj

This patch add the support to include dpaa2 event test
from the test framework.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 app/test/test_eventdev.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 783140dfe..427dbbf77 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1020,9 +1020,16 @@ test_eventdev_selftest_octeontx2(void)
 	return test_eventdev_selftest_impl("otx2_eventdev", "");
 }
 
+static int
+test_eventdev_selftest_dpaa2(void)
+{
+	return test_eventdev_selftest_impl("event_dpaa2", "");
+}
+
 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
 REGISTER_TEST_COMMAND(eventdev_selftest_sw, test_eventdev_selftest_sw);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx,
 		test_eventdev_selftest_octeontx);
 REGISTER_TEST_COMMAND(eventdev_selftest_octeontx2,
 		test_eventdev_selftest_octeontx2);
+REGISTER_TEST_COMMAND(eventdev_selftest_dpaa2, test_eventdev_selftest_dpaa2);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf
  2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
@ 2019-09-30 14:17         ` Jerin Jacob
  0 siblings, 0 replies; 34+ messages in thread
From: Jerin Jacob @ 2019-09-30 14:17 UTC (permalink / raw)
  To: Hemant Agrawal; +Cc: dpdk-dev, Jerin Jacob, stable

On Mon, Sep 30, 2019 at 2:04 PM Hemant Agrawal <hemant.agrawal@nxp.com> wrote:
>
> Test vector expect only one type of scheduling as default.
> The old code is provide support scheduling types instead of default.
>
> Fixes: 13370a3877a5 ("eventdev: fix inconsistency in queue config")
> Cc: stable@dpdk.org
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Series applied to dpdk-next-eventdev/master. Thanks.

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2019-09-30 14:17 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-06 10:34 [dpdk-dev] [PATCH 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
2019-09-06 10:34 ` [dpdk-dev] [PATCH 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
2019-09-06 10:34 ` [dpdk-dev] [PATCH 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
2019-09-06 10:34 ` [dpdk-dev] [PATCH 3/5] event/dpaa2: add destroy support Hemant Agrawal
2019-09-06 10:34 ` [dpdk-dev] [PATCH 4/5] event/dpaa2: add selftest cases Hemant Agrawal
2019-09-06 19:29   ` Aaron Conole
2019-09-06 10:34 ` [dpdk-dev] [PATCH 5/5] test/event: enable dpaa2 self test Hemant Agrawal
2019-09-07  6:42 ` [dpdk-dev] [PATCH v2 0/5] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 1/5] event/dpaa2: fix def queue conf Hemant Agrawal
2019-09-13  6:24     ` Jerin Jacob
2019-09-26 17:55       ` Jerin Jacob
2019-09-27  6:02         ` Hemant Agrawal
2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 2/5] event/dpaa2: remove conditional compilation Hemant Agrawal
2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 3/5] event/dpaa2: add destroy support Hemant Agrawal
2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 4/5] event/dpaa2: add selftest cases Hemant Agrawal
2019-09-09 13:10     ` Aaron Conole
2019-09-10  7:19       ` Hemant Agrawal
2019-09-07  6:42   ` [dpdk-dev] [PATCH v2 5/5] test/event: enable dpaa2 self test Hemant Agrawal
2019-09-27  7:58   ` [dpdk-dev] [PATCH 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
2019-09-27  7:58     ` [dpdk-dev] [PATCH 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
2019-09-30  6:43       ` Jerin Jacob
2019-09-27  7:58     ` [dpdk-dev] [PATCH 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
2019-09-27  7:58     ` [dpdk-dev] [PATCH 3/6] event/dpaa2: add destroy support Hemant Agrawal
2019-09-27  7:58     ` [dpdk-dev] [PATCH 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
2019-09-27  7:58     ` [dpdk-dev] [PATCH 5/6] event/dpaa2: add selftest cases Hemant Agrawal
2019-09-27  7:58     ` [dpdk-dev] [PATCH 6/6] test/event: enable dpaa2 self test Hemant Agrawal
2019-09-30  8:32     ` [dpdk-dev] [PATCH v4 0/6] NXP DPAA2 EVENTDEV enhancements Hemant Agrawal
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 1/6] event/dpaa2: fix def queue conf Hemant Agrawal
2019-09-30 14:17         ` Jerin Jacob
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 2/6] event/dpaa2: remove conditional compilation Hemant Agrawal
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 3/6] event/dpaa2: add destroy support Hemant Agrawal
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 4/6] event/dpaa2: add retry break in packet enqueue Hemant Agrawal
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 5/6] event/dpaa2: add selftest cases Hemant Agrawal
2019-09-30  8:32       ` [dpdk-dev] [PATCH v4 6/6] test/event: enable dpaa2 self test Hemant Agrawal

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.