All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations
@ 2021-06-17  8:14 Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 2/4] test/virtual_pmd: clean rings on close Ferruh Yigit
                   ` (3 more replies)
  0 siblings, 4 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-06-17  8:14 UTC (permalink / raw)
  To: dev; +Cc: Ferruh Yigit

This will be used to overwrite the dev_ops for various tests.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 14 ++++++++++++--
 app/test/virtual_pmd.h |  6 ++++++
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7036f401ed95..4d6ce302a563 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -350,8 +350,8 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
 
 static uint16_t
 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
-							 struct rte_mbuf **bufs __rte_unused,
-							 uint16_t nb_pkts __rte_unused)
+		struct rte_mbuf **bufs __rte_unused,
+		uint16_t nb_pkts __rte_unused)
 {
 	return 0;
 }
@@ -614,3 +614,13 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 
 	return -1;
 }
+
+struct eth_dev_ops *
+virtual_ethdev_ops_get(uint16_t port_id)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	struct virtual_ethdev_private *dev_private = dev->data->dev_private;
+	struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
+
+	return dev_ops;
+}
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 120b58b27395..517dd0d2efa6 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -70,6 +70,12 @@ void
 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
 		uint8_t packet_fail_count);
 
+/* Let application get dev_ops to be able to overwrite some operations
+ * per the specific test needs.
+ */
+struct eth_dev_ops *
+virtual_ethdev_ops_get(uint16_t port_id);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC 2/4] test/virtual_pmd: clean rings on close
  2021-06-17  8:14 [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations Ferruh Yigit
@ 2021-06-17  8:14 ` Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 3/4] test/virtual_pmd: enable updating device flags Ferruh Yigit
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-06-17  8:14 UTC (permalink / raw)
  To: dev; +Cc: Ferruh Yigit

Not cleaning the rings prevents creating devices again, which breaks to
run some unit tests multiple times.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 4d6ce302a563..17f28c5a304c 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -34,7 +34,7 @@ struct virtual_ethdev_queue {
 };
 
 static int
-virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
+virtual_ethdev_start_success(struct rte_eth_dev *eth_dev)
 {
 	eth_dev->data->dev_started = 1;
 
@@ -42,13 +42,13 @@ virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
 }
 
 static int
-virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
+virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev)
 {
 	eth_dev->data->dev_started = 0;
 
 	return -1;
 }
-static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
+static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev)
 {
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
@@ -65,8 +65,13 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 }
 
 static int
-virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
+virtual_ethdev_close(struct rte_eth_dev *eth_dev)
 {
+	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
+
+	rte_ring_free(prv->rx_queue);
+	rte_ring_free(prv->tx_queue);
+
 	return 0;
 }
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC 3/4] test/virtual_pmd: enable updating device flags
  2021-06-17  8:14 [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 2/4] test/virtual_pmd: clean rings on close Ferruh Yigit
@ 2021-06-17  8:14 ` Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 4/4] test: support ethdev Ferruh Yigit
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
  3 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-06-17  8:14 UTC (permalink / raw)
  To: dev; +Cc: Ferruh Yigit

To be able to test various dev_flags.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 10 ++++++++++
 app/test/virtual_pmd.h |  4 ++++
 2 files changed, 14 insertions(+)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 17f28c5a304c..615243e19aed 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -629,3 +629,13 @@ virtual_ethdev_ops_get(uint16_t port_id)
 
 	return dev_ops;
 }
+
+int
+virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags)
+{
+	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+
+	eth_dev->data->dev_flags = dev_flags;
+
+	return 0;
+}
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 517dd0d2efa6..80d5d343579a 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -76,6 +76,10 @@ virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
 struct eth_dev_ops *
 virtual_ethdev_ops_get(uint16_t port_id);
 
+/* For application to be able to alter the device flags */
+int
+virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC 4/4] test: support ethdev
  2021-06-17  8:14 [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 2/4] test/virtual_pmd: clean rings on close Ferruh Yigit
  2021-06-17  8:14 ` [dpdk-dev] [RFC 3/4] test/virtual_pmd: enable updating device flags Ferruh Yigit
@ 2021-06-17  8:14 ` Ferruh Yigit
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
  3 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-06-17  8:14 UTC (permalink / raw)
  To: dev, Thomas Monjalon, Andrew Rybchenko; +Cc: Ferruh Yigit

Added unit test for ethdev APIs, this unit test 'ethdev_api_autotest'
can run without physical device. If there are physical devices probed,
they will be ignored by the unit test.

A few issues fixed or some clarification added in the ehtdev library
with in this unit test patch.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Notes:
* 'rte_eth_dev_owner_unset()' error message is misleading:
  "Cannot set owner to port 1 already owned by ..."
  Unset API error message is about setting.

* 'rte_eth_dev_owner_delete()' crashes, fixed here but it seems it is
  not used at all

* 'rte_eth_dev_configure()' is too complex, there still much more things
  to test in that API.

* Is there a way to get start/stop status of a port, should we add a new
  API, 'rte_eth_dev_is_started()', ?

* Need a way to get bus from ethdev. Current API requires "rte_device"
  which is internal information from ethdev perspective.

* Clarification added that PMD should implement 'dev_infos_get' for
  'rte_eth_dev_configure()' support.

* Tried to clarify dev_flags with more comments

* In configure, for default config, having only Rx or Tx queue number
  pass the test but it should fail, adding more checks to
  'rte_eth_dev_configure()' for it.

* Do we need a way to get device 'dev_conf.rxmode.max_rx_pkt_len' value?
---
 app/test/meson.build         |    1 +
 app/test/test.c              |    1 +
 app/test/test_ethdev.c       | 1160 ++++++++++++++++++++++++++++++++++
 lib/ethdev/ethdev_driver.h   |    6 +-
 lib/ethdev/rte_ethdev.c      |   19 +-
 lib/ethdev/rte_ethdev.h      |   14 +-
 lib/ethdev/rte_ethdev_core.h |    2 +-
 7 files changed, 1197 insertions(+), 6 deletions(-)
 create mode 100644 app/test/test_ethdev.c

diff --git a/app/test/meson.build b/app/test/meson.build
index 08c82d3d23a0..c55a7ac82bd8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -41,6 +41,7 @@ test_sources = files(
         'test_efd.c',
         'test_efd_perf.c',
         'test_errno.c',
+        'test_ethdev.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
         'test_event_eth_rx_adapter.c',
diff --git a/app/test/test.c b/app/test/test.c
index 173d202e4774..82727e10b2be 100644
--- a/app/test/test.c
+++ b/app/test/test.c
@@ -222,6 +222,7 @@ main(int argc, char **argv)
 				break;
 		}
 		cmdline_free(cl);
+		printf("\n");
 		goto out;
 	} else {
 		/* if no DPDK_TEST env variable, go interactive */
diff --git a/app/test/test_ethdev.c b/app/test/test_ethdev.c
new file mode 100644
index 000000000000..69a2eaede1c3
--- /dev/null
+++ b/app/test/test_ethdev.c
@@ -0,0 +1,1160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
+
+#include "test.h"
+#include "virtual_pmd.h"
+
+#define MAX_PORT_NUMBER	2
+
+static uint16_t port_id[MAX_PORT_NUMBER];
+static struct eth_dev_ops *dev_ops[MAX_PORT_NUMBER];
+static uint16_t initial_port_number;
+static uint16_t port_number;
+static uint64_t port_owner_id;
+static uint16_t invalid_port_id = 999;
+
+#define TEST_PMD_NAME	"net_test"
+
+#define MAX_RX_PKTLEN	2048
+
+static int
+ethdev_api_setup(void)
+{
+	struct rte_ether_addr mac_addr = {
+		{ 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0x00 },
+	};
+	char name[RTE_ETH_NAME_MAX_LEN];
+	uint16_t local_port_id;
+	int ret;
+
+	if (port_number != 0)
+		return TEST_SUCCESS;
+
+	initial_port_number = rte_eth_dev_count_total();
+
+	snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, port_number);
+	ret = virtual_ethdev_create(name, &mac_addr, rte_socket_id(), 1);
+	TEST_ASSERT(ret >= 0, "Failed to create test PMD %s\n", name);
+	local_port_id = (uint16_t)ret;
+	dev_ops[port_number] = virtual_ethdev_ops_get(local_port_id);
+	port_id[port_number++] = local_port_id;
+
+	snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, port_number);
+	ret = virtual_ethdev_create(name, &mac_addr, rte_socket_id(), 1);
+	TEST_ASSERT(ret >= 0, "Failed to create test PMD %s\n", name);
+	local_port_id = (uint16_t)ret;
+	dev_ops[port_number] = virtual_ethdev_ops_get(local_port_id);
+	port_id[port_number++] = local_port_id;
+
+	return TEST_SUCCESS;
+}
+
+static void
+ethdev_api_teardown(void)
+{
+	int local_port_number = port_number;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int i;
+
+	for (i = 0; i < local_port_number; i++) {
+		rte_eth_dev_close(port_id[i]);
+		snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, i);
+		/* TODO: get bus from eth_dev */
+		rte_eal_hotplug_remove("pci", name);
+		port_number--;
+	}
+
+	/* reset global variables */
+	memset(port_id, 0, MAX_PORT_NUMBER * sizeof(port_id[0]));
+	memset(dev_ops, 0, MAX_PORT_NUMBER * sizeof(dev_ops[0]));
+	port_owner_id = RTE_ETH_DEV_NO_OWNER;
+}
+
+static int
+ethdev_count_avail(void)
+{
+	uint16_t count;
+
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, port_number + initial_port_number,
+		"Failed to get available ethdev device count\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_get(void)
+{
+	char no_name[RTE_ETH_MAX_OWNER_NAME_LEN] = "";
+	struct rte_eth_dev_owner owner;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		ret = rte_eth_dev_owner_get(invalid_port_id, &owner);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner get accepted invalid port id %u\n",
+			invalid_port_id);
+
+		ret = rte_eth_dev_owner_get(port_id[i], NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner get accepted null owner for port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_owner_get(port_id[i], &owner);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to get owner for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(owner.id, RTE_ETH_DEV_NO_OWNER,
+			"Received owner id doesn't match with no owner id port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, no_name,
+			RTE_ETH_MAX_OWNER_NAME_LEN,
+			"Received owner name doesn't match with no owner name port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_new(void)
+{
+	uint64_t local_port_owner_id;
+	int ret;
+
+	/* null owner id pointer */
+	ret = rte_eth_dev_owner_new(NULL);
+	RTE_TEST_ASSERT_FAIL(ret, "NULL owner argument accepted\n");
+
+	ret = rte_eth_dev_owner_new(&port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get new owner id\n");
+
+	/* Check not same owner ID received twice */
+	local_port_owner_id = port_owner_id;
+	ret = rte_eth_dev_owner_new(&port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get new owner id\n");
+	TEST_ASSERT_NOT_EQUAL(port_owner_id, local_port_owner_id,
+		"Existing owner id returned\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_set(void)
+{
+	struct rte_eth_dev_owner owner = {
+		.id = RTE_ETH_DEV_NO_OWNER,
+		.name = "TEST",
+	};
+	struct rte_eth_dev_owner owner_get;
+	uint16_t local_port_id = port_id[1];
+	const char *alternate_name = "TEST2";
+	int ret;
+
+	/* invalid port id */
+	ret = rte_eth_dev_owner_set(invalid_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner set accepted invalid port id %u\n",
+		invalid_port_id);
+
+	/* null owner */
+	ret = rte_eth_dev_owner_set(local_port_id, NULL);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner set accepted null owner for port id %u\n",
+		local_port_id);
+
+	/* no owner id */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted no owner id for port id %u\n",
+		local_port_id);
+
+	/* invalid owner id */
+	owner.id = port_owner_id + 1; /* 'rte_eth_dev_owner_new() called twice */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted invalid owner id for port id %u\n",
+		local_port_id);
+
+	/* set owner */
+	owner.id = port_owner_id;
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to set owner for port id %u\n",
+		local_port_id);
+
+	/* get the owner back and verify */
+	ret = rte_eth_dev_owner_get(local_port_id, &owner_get);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get owner for port id %u\n",
+		local_port_id);
+	TEST_ASSERT_EQUAL(owner.id, owner_get.id,
+		"Received owner id doesn't match with set owner id port id %u\n",
+		local_port_id);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, owner_get.name,
+		RTE_ETH_MAX_OWNER_NAME_LEN,
+		"Received owner name doesn't match with set owner name port id %u\n",
+		local_port_id);
+
+	/* set same owner */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted same owner for port id %u\n",
+		local_port_id);
+
+	/* no owner id after owner set */
+	owner.id = RTE_ETH_DEV_NO_OWNER;
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted no owner id for port id %u\n",
+		local_port_id);
+
+	/* set owner with same owner id different owner name */
+	owner.id = port_owner_id;
+	strlcpy(owner.name, alternate_name, RTE_ETH_MAX_OWNER_NAME_LEN);
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted same owner id different owner name for port id %u\n",
+		local_port_id);
+
+	/* set owner with same owner name different owner id */
+	owner.id = port_owner_id - 1; /* Two owner ids received */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted different owner id with same owner name for port id %u\n",
+		local_port_id);
+
+	/* Set owner with very large name */
+	ret = rte_eth_dev_owner_unset(local_port_id, port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unset owner for port id %u\n",
+		local_port_id);
+
+	owner.id = port_owner_id;
+	memset(owner.name, 'x', RTE_ETH_MAX_OWNER_NAME_LEN);
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret,
+		"Failed to set owner with large name for port id %u\n",
+		local_port_id);
+
+	/* Force printing the previously set large name */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted same owner with large name for port id %u\n",
+		local_port_id);
+
+	return TEST_SUCCESS;
+}
+
+/* There must be two ethdev devices created at this point,
+ * But one of them has owner, so available and total device counts
+ * should differ.
+ */
+static int
+ethdev_count_total(void)
+{
+	uint16_t total_count;
+	uint16_t available_count;
+	uint16_t count;
+
+	total_count = rte_eth_dev_count_total();
+	TEST_ASSERT_EQUAL(total_count, initial_port_number + port_number,
+		"Failed to get total ethdev device count\n");
+
+	available_count = initial_port_number + port_number - 1; /* One has owner */
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, available_count,
+		"Failed to get available ethdev device count after ownership\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_unset(void)
+{
+	char no_name[RTE_ETH_MAX_OWNER_NAME_LEN] = "";
+	uint16_t local_port_id = port_id[1];
+	struct rte_eth_dev_owner owner;
+	uint64_t invalid_owner_id;
+	int ret;
+
+	/* unset owner with invalid port id */
+	ret = rte_eth_dev_owner_unset(invalid_port_id, port_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner unset accepted invalid port id %u\n",
+		invalid_port_id);
+
+	/* unset owner with invalid owner id */
+	invalid_owner_id = port_owner_id - 1;
+	ret = rte_eth_dev_owner_unset(local_port_id, invalid_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Owner unset accepted invalid owner id %" PRIu64 " for port id %u\n",
+		invalid_owner_id, local_port_id);
+
+	invalid_owner_id = port_owner_id + 1;
+	ret = rte_eth_dev_owner_unset(local_port_id, invalid_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Owner unset accepted invalid owner id %" PRIu64 " for port id %u\n",
+		invalid_owner_id, local_port_id);
+
+	/* unset owner */
+	ret = rte_eth_dev_owner_unset(local_port_id, port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unset owner for port id %u\n",
+		local_port_id);
+
+	/* verify owner unset */
+	ret = rte_eth_dev_owner_get(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get owner for port id %u\n",
+		local_port_id);
+	TEST_ASSERT_EQUAL(owner.id, RTE_ETH_DEV_NO_OWNER,
+		"Received owner id doesn't match with no owner id port id %u\n",
+		local_port_id);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, no_name,
+		RTE_ETH_MAX_OWNER_NAME_LEN,
+		"Received owner name doesn't match with no owner name port id %u\n",
+		local_port_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_delete(void)
+{
+	struct rte_eth_dev_owner owner = {
+		.id = port_owner_id,
+		.name = "TEST",
+	};
+	uint64_t invalid_owner_id;
+	int count;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* set owner */
+		ret = rte_eth_dev_owner_set(port_id[i], &owner);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to set owner for port id %u\n",
+			port_id[i]);
+
+		/* delete owner with invalid owner id */
+		invalid_owner_id = port_owner_id - 1;
+		ret = rte_eth_dev_owner_unset(port_id[i], invalid_owner_id);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner delete accepted invalid owner id %" PRIu64 " for port id %u\n",
+			invalid_owner_id, port_id[i]);
+
+		invalid_owner_id = port_owner_id + 1;
+		ret = rte_eth_dev_owner_unset(port_id[i], invalid_owner_id);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner delete accepted invalid owner id %" PRIu64 " for port id %u\n",
+			invalid_owner_id, port_id[i]);
+	}
+
+	ret = rte_eth_dev_owner_delete(port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to delete owner id %" PRIu64 "\n",
+		port_owner_id);
+
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, initial_port_number + port_number,
+		"Failed to delete owner id %" PRIu64 " from some ethdev devices\n",
+		port_owner_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+configure_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+info_get_default_config(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define DEFAULT_BURST_SIZE	99
+#define DEFAULT_RING_SIZE	129
+#define DEFAULT_QUEUE_NUMBER	333
+	struct rte_eth_dev_portconf portconfig = {
+		.burst_size = DEFAULT_BURST_SIZE,
+		.ring_size = DEFAULT_BURST_SIZE,
+		.nb_queues = DEFAULT_QUEUE_NUMBER,
+	};
+	dev_info->default_rxportconf = portconfig;
+	dev_info->default_txportconf = portconfig;
+
+	dev_info->max_rx_queues = DEFAULT_QUEUE_NUMBER + 1;
+	dev_info->max_tx_queues = DEFAULT_QUEUE_NUMBER + 1;
+
+	return 0;
+}
+
+static int
+info_get_offload_jumbo(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return 0;
+}
+
+static int
+info_get_min_max_mtu(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+	dev_info->max_mtu = MAX_RX_PKTLEN - 100;
+
+	return 0;
+}
+
+static int
+info_get_lro(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TCP_LRO;
+
+	return 0;
+}
+
+static int
+info_get_lro_pkt_size(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define MAX_LRO_PKTLEN (MAX_RX_PKTLEN * 2)
+	dev_info->max_lro_pkt_size = MAX_LRO_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TCP_LRO;
+
+	return 0;
+}
+
+static int
+info_get_rss_hash_offload(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_RSS_HASH;
+
+	return 0;
+}
+
+static int
+ethdev_configure(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_conf dev_conf;
+	uint16_t nb_rx_q = 0;
+	uint16_t nb_tx_q = 0;
+	int ret;
+	int i;
+
+	memset(&dev_conf, 0, sizeof(dev_conf));
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_configure(invalid_port_id, nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Configure accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* set NULL config */
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL configuration for port id %u\n",
+			port_id[i]);
+
+		/* no configure dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_configure = NULL;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL configuration for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* no infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = NULL;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info get dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_configure = configure_fail;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device configuration for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* configure after start */
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Configuring an already started port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+
+		/* get device info for various tests below */
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+
+		/* set big Rx queue number */
+		nb_rx_q = RTE_MAX_QUEUES_PER_PORT + 1;
+		nb_tx_q = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue number > RTE_MAX_QUEUES configuration for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = dev_info.max_rx_queues + 1;
+		nb_tx_q = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue number > max_rx_queues configuration for port id %u\n",
+			port_id[i]);
+
+		/* set big Tx queue number */
+		nb_rx_q = 1;
+		nb_tx_q = RTE_MAX_QUEUES_PER_PORT + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Tx queue number > RTE_MAX_QUEUES configuration for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = 1;
+		nb_tx_q = dev_info.max_tx_queues + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Tx queue number > max_tx_queues configuration for port id %u\n",
+			port_id[i]);
+		nb_rx_q = 1;
+		nb_tx_q = 1;
+
+		/* request default queue number only for Rx or Tx */
+		nb_rx_q = 1;
+		nb_tx_q = 0;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted only Tx default queue number for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = 0;
+		nb_tx_q = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted only Rx default queue number for port id %u\n",
+			port_id[i]);
+		nb_rx_q = 1;
+		nb_tx_q = 1;
+
+		/* request not supported LSC */
+		dev_conf.intr_conf.lsc = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LSC interrupt config port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.lsc = 0;
+
+		/* request not supported RMV */
+		dev_conf.intr_conf.rmv = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LSC interrupt config port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.rmv = 0;
+
+		/* configure device */
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+
+		/* requested supported device features */
+		virtual_ethdev_set_dev_flags(port_id[i],
+			RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_INTR_RMV);
+		dev_conf.intr_conf.lsc = 1;
+		dev_conf.intr_conf.rmv = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to configure with device flags for port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.lsc = 0;
+		dev_conf.intr_conf.rmv = 0;
+
+		/* Use default Rx/Tx queue numbers */
+		nb_rx_q = 0;
+		nb_tx_q = 0;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_rx_queues,
+				RTE_ETH_DEV_FALLBACK_RX_NBQUEUES,
+			"Default Rx queue number is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_tx_queues,
+				RTE_ETH_DEV_FALLBACK_TX_NBQUEUES,
+			"Default Tx queue number is wrong for port id %u\n",
+			port_id[i]);
+
+		/* Use PMD provided Rx/Tx queue numbers */
+		nb_rx_q = 0;
+		nb_tx_q = 0;
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_default_config;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_rx_queues, DEFAULT_QUEUE_NUMBER,
+			"Default driver Rx queue number is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_tx_queues, DEFAULT_QUEUE_NUMBER,
+			"Default driver Tx queue number is wrong for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+		nb_rx_q = 1;
+		nb_tx_q = 1;
+
+		/* check max_rx_pkt_len without jumbo frame support */
+		uint16_t overhead_len;
+		struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id[i]];
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + overhead_len + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				dev_conf.rxmode.max_rx_pkt_len,
+			"Accepted Rx packet length bigger than max MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				(uint32_t)(RTE_ETHER_MTU + overhead_len),
+			"Max Rx packet length calculated wrong for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MIN_MTU + overhead_len - 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				dev_conf.rxmode.max_rx_pkt_len,
+			"Accepted Rx packet length less than min MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				(uint32_t)(RTE_ETHER_MTU + overhead_len),
+			"Max Rx packet length calculated wrong for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+
+		/* check max_rx_pkt_len with jumbo frame support */
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN + 1;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted Rx packet length bigger than supported by device for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MIN_LEN - 1;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted Rx packet length less than min MTU for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		uint16_t mtu;
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_offload_jumbo;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_offload_jumbo;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* max_rx_pkt_len with jumbo frame with min/max MTU */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_min_max_mtu;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* LRO */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_lro;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_TCP_LRO;
+		dev_conf.rxmode.max_lro_pkt_size = MAX_RX_PKTLEN * 2;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted different LRO packet size when driver limit is missing for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		dev_conf.rxmode.max_lro_pkt_size = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_lro_pkt_size;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_TCP_LRO;
+		dev_conf.rxmode.max_lro_pkt_size = MAX_LRO_PKTLEN + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LRO packet size bigger than what device supports for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		dev_conf.rxmode.max_lro_pkt_size = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* offloads */
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted offload that is not in the capability for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		/* RSS hash function */
+		dev_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_ETH;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted RSS hash function that is not in the capability for port id %u\n",
+			port_id[i]);
+		dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
+
+		/* RSS hash offload */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_rss_hash_offload;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_RSS_HASH;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted RSS hash offload without RSS for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+	}
+
+	// rss_hf src_only and dst_only
+	// eth_dev_tx_queue_config
+	// eth_dev_rx_queue_config
+	// RTE_ETHDEV_PROFILE_WITH_VTUNE
+	// eth_dev_validate_offloads
+	// restore config
+	// restore mtu
+
+	return TEST_SUCCESS;
+}
+
+
+static const char *virtual_ethdev_driver_name = "Virtual PMD";
+static int
+info_get_success(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+
+	dev_info->driver_name = virtual_ethdev_driver_name;
+	dev_info->max_mac_addrs = 1;
+
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->min_rx_bufsize = 0;
+
+	return 0;
+}
+
+static int
+info_get_fail(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info __rte_unused)
+{
+	return -1;
+}
+
+static int
+info_get_max_queues(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = RTE_MAX_QUEUES_PER_PORT + 1;
+	dev_info->max_tx_queues = RTE_MAX_QUEUES_PER_PORT + 1;
+
+	return 0;
+}
+
+static int
+info_get_mtu(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define MIN_MTU	256
+#define MAX_MTU 512
+	dev_info->min_mtu = MIN_MTU;
+	dev_info->max_mtu = MAX_MTU;
+
+	return 0;
+}
+
+static int
+ethdev_info_get(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	struct rte_eth_dev_info dev_info;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_info_get(invalid_port_id, &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Getting info accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* NULL info */
+		ret = rte_eth_dev_info_get(port_id[i], NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info struct for port id %u\n",
+			port_id[i]);
+
+		/* no infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = NULL;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info get dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_fail;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device info get for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* get info */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_success;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* big max queues number */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_max_queues;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(dev_info.nb_rx_queues, RTE_MAX_QUEUES_PER_PORT + 1,
+			"Accepted big Rx queue number for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(dev_info.nb_tx_queues, RTE_MAX_QUEUES_PER_PORT + 1,
+			"Accepted big Tx queue number for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* min/max MTU */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_mtu;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.min_mtu, MIN_MTU,
+			"Received min MTU is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.max_mtu, MAX_MTU,
+			"Received max MTU is wrong for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* verify dev_flags */
+#define DEV_FLAG 0xABCD
+		uint32_t local_dev_flag = DEV_FLAG;
+		virtual_ethdev_set_dev_flags(port_id[i], local_dev_flag);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(*dev_info.dev_flags, local_dev_flag,
+			"Received device flags is wrong for port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_rx_queue_setup(void)
+{
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_tx_queue_setup(void)
+{
+	return TEST_SUCCESS;
+}
+
+static int
+start_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+ethdev_start(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_start(invalid_port_id);
+		RTE_TEST_ASSERT_FAIL(ret, "Start accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* no dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_start = NULL;
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL start dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_start = start_fail;
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device start for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to start already started port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+stop_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+ethdev_stop(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_stop(invalid_port_id);
+		RTE_TEST_ASSERT_FAIL(ret, "Stop accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* no dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_stop = NULL;
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL stop dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_stop = stop_fail;
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device stop for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to stop already stopped port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite ethdev_api_testsuite = {
+	.suite_name = "ethdev API unit test suite",
+	.setup = ethdev_api_setup,
+	.teardown = ethdev_api_teardown,
+	.unit_test_cases = {
+		TEST_CASE(ethdev_count_avail),
+		TEST_CASE(ethdev_owner_get),
+		TEST_CASE(ethdev_owner_new),
+		TEST_CASE(ethdev_owner_set),
+		TEST_CASE(ethdev_count_total),
+		TEST_CASE(ethdev_owner_unset),
+		TEST_CASE(ethdev_owner_delete),
+		TEST_CASE(ethdev_configure),
+		TEST_CASE(ethdev_info_get),
+		TEST_CASE(ethdev_rx_queue_setup),
+		TEST_CASE(ethdev_tx_queue_setup),
+		TEST_CASE(ethdev_start),
+		TEST_CASE(ethdev_stop),
+		TEST_CASES_END(),
+	},
+};
+
+static int
+test_ethdev_api(void)
+{
+	return unit_test_suite_runner(&ethdev_api_testsuite);
+}
+
+REGISTER_TEST_COMMAND(ethdev_api_autotest, test_ethdev_api);
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 170a4e22a7c1..26e247f160d4 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -31,7 +31,11 @@ struct rte_hairpin_peer_info;
  */
 
 typedef int  (*eth_dev_configure_t)(struct rte_eth_dev *dev);
-/**< @internal Ethernet device configuration. */
+/**< @internal Ethernet device configuration.
+ *
+ * For ``rte_eth_dev_configure()`` API both ``eth_dev_configure_t`` and
+ * ``eth_dev_infos_get_t`` needs to be implemented by PMD.
+ * */
 
 typedef int  (*eth_dev_start_t)(struct rte_eth_dev *dev);
 /**< @internal Function used to start a configured Ethernet device. */
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index c607eabb5b0c..8e6e632dec9c 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -694,6 +694,7 @@ eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 	}
 
 	/* can not truncate (same structure) */
+	memset(port_owner->name, 0, RTE_ETH_MAX_OWNER_NAME_LEN);
 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
 
 	port_owner->id = new_owner->id;
@@ -748,10 +749,13 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 
 	if (eth_is_valid_owner_id(owner_id)) {
-		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
-			if (rte_eth_devices[port_id].data->owner.id == owner_id)
-				memset(&rte_eth_devices[port_id].data->owner, 0,
+		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+			struct rte_eth_dev_data *data =
+				rte_eth_devices[port_id].data;
+			if (data != NULL && data->owner.id == owner_id)
+				memset(&data->owner, 0,
 				       sizeof(struct rte_eth_dev_owner));
+		}
 		RTE_ETHDEV_LOG(NOTICE,
 			"All port owners owned by %016"PRIx64" identifier have removed\n",
 			owner_id);
@@ -1387,6 +1391,15 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If driver does not provide any preferred valued, fall back on
 	 * EAL defaults.
 	 */
+	if ((nb_rx_q & nb_tx_q) == 0 && (nb_rx_q | nb_tx_q) != 0) {
+		RTE_ETHDEV_LOG(ERR,
+			"Ethdev port_id (%u), Rx queue number (%u) and Tx queue number (%u) "
+			"should be both zero or both non-zero\n",
+			port_id, nb_rx_q, nb_tx_q);
+		ret = -EINVAL;
+		goto rollback;
+	}
+
 	if (nb_rx_q == 0 && nb_tx_q == 0) {
 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
 		if (nb_rx_q == 0)
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index faf3bd901d75..a6ab64abf1df 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1837,6 +1837,10 @@ struct rte_eth_dev_owner {
 	char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
 };
 
+/**
+ * Device flags set on ``eth_dev->data->dev_flags`` by drivers.
+ * These values can be received via ``rte_eth_dev_info_get()``
+ */
 /** PMD supports thread-safe flow operations */
 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE  0x0001
 /** Device supports link state interrupt */
@@ -1980,6 +1984,10 @@ int rte_eth_dev_owner_new(uint64_t *owner_id);
  *
  * Set an Ethernet device owner.
  *
+ * Once an owner is set for an Ethernet device, setting owner again will fail,
+ * even it is exact same owner.
+ * Owner ids not obtained by ``rte_eth_dev_owner_new()`` are rejected.
+ *
  * @param	port_id
  *  The identifier of the port to own.
  * @param	owner
@@ -2524,6 +2532,8 @@ int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
  * On success, all basic functions exported by the Ethernet API (link status,
  * receive/transmit, and so on) can be invoked.
  *
+ * Starting an already started port returns success.
+ *
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @return
@@ -2536,6 +2546,8 @@ int rte_eth_dev_start(uint16_t port_id);
  * Stop an Ethernet device. The device can be restarted with a call to
  * rte_eth_dev_start()
  *
+ * Stopping an already stopped port returns success.
+ *
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @return
@@ -3036,7 +3048,7 @@ int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
  * min_mtu = RTE_ETHER_MIN_MTU
  * max_mtu = UINT16_MAX
  *
- * The following fields will be populated if support for dev_infos_get()
+ *ops The following fields will be populated if support for dev_infos_get()
  * exists for the device and the rte_eth_dev 'dev' has been populated
  * successfully with a call to it:
  *
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index 4679d948fa5e..43ab76760691 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -172,7 +172,7 @@ struct rte_eth_dev_data {
 		/**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */
 	uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
 		/**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */
-	uint32_t dev_flags;             /**< Capabilities. */
+	uint32_t dev_flags;		/**< Device flags */
 	int numa_node;                  /**< NUMA node connection. */
 	struct rte_vlan_filter_conf vlan_filter_conf;
 			/**< VLAN filter configuration. */
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close
  2021-06-17  8:14 [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations Ferruh Yigit
                   ` (2 preceding siblings ...)
  2021-06-17  8:14 ` [dpdk-dev] [RFC 4/4] test: support ethdev Ferruh Yigit
@ 2021-07-16 14:27 ` Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations Ferruh Yigit
                     ` (7 more replies)
  3 siblings, 8 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Not cleaning the rings prevents creating devices again, which breaks to
run some unit tests multiple times.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 7036f401ed95..6098e633f35a 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -34,7 +34,7 @@ struct virtual_ethdev_queue {
 };
 
 static int
-virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
+virtual_ethdev_start_success(struct rte_eth_dev *eth_dev)
 {
 	eth_dev->data->dev_started = 1;
 
@@ -42,13 +42,13 @@ virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
 }
 
 static int
-virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
+virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev)
 {
 	eth_dev->data->dev_started = 0;
 
 	return -1;
 }
-static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
+static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev)
 {
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
@@ -65,8 +65,13 @@ static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
 }
 
 static int
-virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
+virtual_ethdev_close(struct rte_eth_dev *eth_dev)
 {
+	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
+
+	rte_ring_free(prv->rx_queue);
+	rte_ring_free(prv->tx_queue);
+
 	return 0;
 }
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2023-08-22 21:10     ` Stephen Hemminger
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 3/8] test/virtual_pmd: enable updating device flags Ferruh Yigit
                     ` (6 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

This will be used to overwrite the dev_ops for various tests.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 14 ++++++++++++--
 app/test/virtual_pmd.h |  6 ++++++
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 6098e633f35a..17f28c5a304c 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -355,8 +355,8 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
 
 static uint16_t
 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
-							 struct rte_mbuf **bufs __rte_unused,
-							 uint16_t nb_pkts __rte_unused)
+		struct rte_mbuf **bufs __rte_unused,
+		uint16_t nb_pkts __rte_unused)
 {
 	return 0;
 }
@@ -619,3 +619,13 @@ virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr,
 
 	return -1;
 }
+
+struct eth_dev_ops *
+virtual_ethdev_ops_get(uint16_t port_id)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	struct virtual_ethdev_private *dev_private = dev->data->dev_private;
+	struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
+
+	return dev_ops;
+}
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 120b58b27395..517dd0d2efa6 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -70,6 +70,12 @@ void
 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
 		uint8_t packet_fail_count);
 
+/* Let application get dev_ops to be able to overwrite some operations
+ * per the specific test needs.
+ */
+struct eth_dev_ops *
+virtual_ethdev_ops_get(uint16_t port_id);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 3/8] test/virtual_pmd: enable updating device flags
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 4/8] test/virtual_pmd: enable getting device data Ferruh Yigit
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

To be able to test various dev_flags.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 10 ++++++++++
 app/test/virtual_pmd.h |  4 ++++
 2 files changed, 14 insertions(+)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 17f28c5a304c..615243e19aed 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -629,3 +629,13 @@ virtual_ethdev_ops_get(uint16_t port_id)
 
 	return dev_ops;
 }
+
+int
+virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags)
+{
+	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+
+	eth_dev->data->dev_flags = dev_flags;
+
+	return 0;
+}
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 517dd0d2efa6..80d5d343579a 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -76,6 +76,10 @@ virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
 struct eth_dev_ops *
 virtual_ethdev_ops_get(uint16_t port_id);
 
+/* For application to be able to alter the device flags */
+int
+virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 4/8] test/virtual_pmd: enable getting device data
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 3/8] test/virtual_pmd: enable updating device flags Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 5/8] test/virtual_pmd: support get queue info device ops Ferruh Yigit
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 10 ++++++++++
 app/test/virtual_pmd.h |  4 ++++
 2 files changed, 14 insertions(+)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 615243e19aed..f2d807de8d89 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -639,3 +639,13 @@ virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags)
 
 	return 0;
 }
+
+int
+virtual_ethdev_get_dev_data(uint16_t port_id, struct rte_eth_dev_data **data)
+{
+	struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+
+	*data = eth_dev->data;
+
+	return 0;
+}
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 80d5d343579a..374bb4148f96 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -80,6 +80,10 @@ virtual_ethdev_ops_get(uint16_t port_id);
 int
 virtual_ethdev_set_dev_flags(uint16_t port_id, uint32_t dev_flags);
 
+/* Get device data for various checks */
+int
+virtual_ethdev_get_dev_data(uint16_t port_id, struct rte_eth_dev_data **data);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 5/8] test/virtual_pmd: support get queue info device ops
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
                     ` (2 preceding siblings ...)
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 4/8] test/virtual_pmd: enable getting device data Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 6/8] test/virtual_pmd: provide descriptor limit info Ferruh Yigit
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 27 +++++++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index f2d807de8d89..e0ea213ae231 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -31,6 +31,8 @@ struct virtual_ethdev_private {
 struct virtual_ethdev_queue {
 	int port_id;
 	int queue_id;
+	uint16_t nb_desc;
+	struct rte_eth_rxconf rx_conf;
 };
 
 static int
@@ -106,9 +108,9 @@ virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
 
 static int
 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
-		uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
+		uint16_t rx_queue_id, uint16_t nb_rx_desc,
 		unsigned int socket_id,
-		const struct rte_eth_rxconf *rx_conf __rte_unused,
+		const struct rte_eth_rxconf *rx_conf,
 		struct rte_mempool *mb_pool __rte_unused)
 {
 	struct virtual_ethdev_queue *rx_q;
@@ -121,6 +123,8 @@ virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
 
 	rx_q->port_id = dev->data->port_id;
 	rx_q->queue_id = rx_queue_id;
+	rx_q->nb_desc = nb_rx_desc;
+	rx_q->rx_conf = *rx_conf;
 
 	dev->data->rx_queues[rx_queue_id] = rx_q;
 
@@ -159,6 +163,23 @@ virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static void
+virtual_ethdev_rx_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+		struct rte_eth_rxq_info *qinfo)
+{
+	struct virtual_ethdev_queue *rx_q = dev->data->rx_queues[rx_queue_id];
+
+	qinfo->nb_desc = rx_q->nb_desc;
+	qinfo->conf = rx_q->rx_conf;
+}
+
+static void
+virtual_ethdev_tx_info_get(struct rte_eth_dev *dev __rte_unused,
+		uint16_t tx_queue_id __rte_unused,
+		struct rte_eth_txq_info *qinfo __rte_unused)
+{
+}
+
 static int
 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
 		uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
@@ -248,6 +269,8 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
 	.dev_infos_get = virtual_ethdev_info_get,
 	.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
 	.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
+	.rxq_info_get = virtual_ethdev_rx_info_get,
+	.txq_info_get = virtual_ethdev_tx_info_get,
 	.rx_queue_release = virtual_ethdev_rx_queue_release,
 	.tx_queue_release = virtual_ethdev_tx_queue_release,
 	.link_update = virtual_ethdev_link_update_success,
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 6/8] test/virtual_pmd: provide descriptor limit info
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
                     ` (3 preceding siblings ...)
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 5/8] test/virtual_pmd: support get queue info device ops Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop Ferruh Yigit
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 4 ++++
 app/test/virtual_pmd.h | 4 ++++
 2 files changed, 8 insertions(+)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index e0ea213ae231..27c8501b96a7 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -103,6 +103,10 @@ virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
 
 	dev_info->min_rx_bufsize = 0;
 
+	dev_info->rx_desc_lim.nb_max = VIRTUAL_ETHDEV_MAX_DESC_NUM;
+	dev_info->rx_desc_lim.nb_min = VIRTUAL_ETHDEV_MIN_DESC_NUM;
+	dev_info->rx_desc_lim.nb_align = VIRTUAL_ETHDEV_DESC_ALIGN;
+
 	return 0;
 }
 
diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h
index 374bb4148f96..7e11d23f598e 100644
--- a/app/test/virtual_pmd.h
+++ b/app/test/virtual_pmd.h
@@ -11,6 +11,10 @@ extern "C" {
 
 #include <rte_ether.h>
 
+#define VIRTUAL_ETHDEV_MAX_DESC_NUM	2048
+#define VIRTUAL_ETHDEV_MIN_DESC_NUM	32
+#define VIRTUAL_ETHDEV_DESC_ALIGN	8
+
 int
 virtual_ethdev_init(void);
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
                     ` (4 preceding siblings ...)
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 6/8] test/virtual_pmd: provide descriptor limit info Ferruh Yigit
@ 2021-07-16 14:27   ` Ferruh Yigit
  2023-08-22 21:11     ` Stephen Hemminger
  2021-07-16 14:28   ` [dpdk-dev] [RFC v2 8/8] test: support ethdev Ferruh Yigit
  2023-08-22 21:15   ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Stephen Hemminger
  7 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:27 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 app/test/virtual_pmd.c | 52 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)

diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
index 27c8501b96a7..4b8318cf39dd 100644
--- a/app/test/virtual_pmd.c
+++ b/app/test/virtual_pmd.c
@@ -35,11 +35,35 @@ struct virtual_ethdev_queue {
 	struct rte_eth_rxconf rx_conf;
 };
 
+static int
+virtual_ethdev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+	eth_dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
+static int
+virtual_ethdev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+	eth_dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+	return 0;
+}
+
 static int
 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev)
 {
+	int i;
+
 	eth_dev->data->dev_started = 1;
 
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		virtual_ethdev_rx_queue_start(eth_dev, i);
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		virtual_ethdev_tx_queue_start(eth_dev, i);
+
 	return 0;
 }
 
@@ -50,10 +74,34 @@ virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev)
 
 	return -1;
 }
+
+static int
+virtual_ethdev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+	eth_dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
+static int
+virtual_ethdev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+	eth_dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+	return 0;
+}
+
 static int  virtual_ethdev_stop(struct rte_eth_dev *eth_dev)
 {
 	void *pkt = NULL;
 	struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
+	int i;
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		virtual_ethdev_rx_queue_stop(eth_dev, i);
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		virtual_ethdev_tx_queue_stop(eth_dev, i);
 
 	eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
 	eth_dev->data->dev_started = 0;
@@ -271,6 +319,10 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
 	.dev_stop = virtual_ethdev_stop,
 	.dev_close = virtual_ethdev_close,
 	.dev_infos_get = virtual_ethdev_info_get,
+	.rx_queue_start = virtual_ethdev_rx_queue_start,
+	.tx_queue_start = virtual_ethdev_tx_queue_start,
+	.rx_queue_stop = virtual_ethdev_rx_queue_stop,
+	.tx_queue_stop = virtual_ethdev_tx_queue_stop,
 	.rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
 	.tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
 	.rxq_info_get = virtual_ethdev_rx_info_get,
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [dpdk-dev] [RFC v2 8/8] test: support ethdev
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
                     ` (5 preceding siblings ...)
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop Ferruh Yigit
@ 2021-07-16 14:28   ` Ferruh Yigit
  2023-08-22 21:14     ` Stephen Hemminger
  2023-08-22 21:15   ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Stephen Hemminger
  7 siblings, 1 reply; 16+ messages in thread
From: Ferruh Yigit @ 2021-07-16 14:28 UTC (permalink / raw)
  To: Andrew Rybchenko, Thomas Monjalon; +Cc: Ferruh Yigit, dev

Added unit test for ethdev APIs, this unit test 'ethdev_api_autotest'
can run without physical device. If there are physical devices probed,
they will be ignored by the unit test.

A few issues fixed or some clarification added in the ehtdev library
with in this unit test patch.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
Notes:
* 'rte_eth_dev_owner_unset()' error message is misleading:
  "Cannot set owner to port 1 already owned by ..."
  Unset API error message is about setting.

* 'rte_eth_dev_owner_delete()' crashes, fixed here but it seems it is
  not used at all

* 'rte_eth_dev_configure()' is too complex, there still much more things
  to test in that API.

* Is there a way to get start/stop status of a port, should we add a new
  API, 'rte_eth_dev_is_started()', ?

* Need a way to get bus from ethdev. Current API requires "rte_device"
  which is internal information from ethdev perspective.

* Clarification added that PMD should implement 'dev_infos_get' for
  'rte_eth_dev_configure()' support.

* Tried to clarify dev_flags with more comments

* In configure, for default config, having only Rx or Tx queue number
  pass the test but it should fail, adding more checks to
  'rte_eth_dev_configure()' for it.

* Do we need a way to get device 'dev_conf.rxmode.max_rx_pkt_len' value?

* `rte_eth_rx_queue_setup()` fails for multiple Rx mempool and length is
  not configured (default 0).

Changelog:
v2:
* Because of 'rte_eth_dev_configure()' change, can't start port before
  configure. And if latest 'rte_eth_dev_configure()' fails can't start
  port anymore, so last 'rte_eth_dev_configure()' should succeed.

* more 'ethdev_rx_queue_setup' tests added.
---
 app/test/meson.build         |    2 +
 app/test/test.c              |    1 +
 app/test/test_ethdev.c       | 1582 ++++++++++++++++++++++++++++++++++
 lib/ethdev/ethdev_driver.h   |    6 +-
 lib/ethdev/rte_ethdev.c      |   19 +-
 lib/ethdev/rte_ethdev.h      |   16 +-
 lib/ethdev/rte_ethdev_core.h |    2 +-
 7 files changed, 1622 insertions(+), 6 deletions(-)
 create mode 100644 app/test/test_ethdev.c

diff --git a/app/test/meson.build b/app/test/meson.build
index a7611686adcb..7f41f2cf5ec0 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -48,6 +48,7 @@ test_sources = files(
         'test_efd.c',
         'test_efd_perf.c',
         'test_errno.c',
+        'test_ethdev.c',
         'test_ethdev_link.c',
         'test_event_crypto_adapter.c',
         'test_event_eth_rx_adapter.c',
@@ -215,6 +216,7 @@ fast_tests = [
         ['eal_fs_autotest', true],
         ['errno_autotest', true],
         ['ethdev_link_status', true],
+        ['ethdev_api_autotest', true],
         ['event_ring_autotest', true],
         ['fib_autotest', true],
         ['fib6_autotest', true],
diff --git a/app/test/test.c b/app/test/test.c
index 173d202e4774..82727e10b2be 100644
--- a/app/test/test.c
+++ b/app/test/test.c
@@ -222,6 +222,7 @@ main(int argc, char **argv)
 				break;
 		}
 		cmdline_free(cl);
+		printf("\n");
 		goto out;
 	} else {
 		/* if no DPDK_TEST env variable, go interactive */
diff --git a/app/test/test_ethdev.c b/app/test/test_ethdev.c
new file mode 100644
index 000000000000..3100d8bbc9b5
--- /dev/null
+++ b/app/test/test_ethdev.c
@@ -0,0 +1,1582 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
+
+#include "test.h"
+#include "virtual_pmd.h"
+
+#define MAX_PORT_NUMBER	2
+
+static uint16_t port_id[MAX_PORT_NUMBER];
+static struct eth_dev_ops *dev_ops[MAX_PORT_NUMBER];
+static uint16_t initial_port_number;
+static uint16_t port_number;
+static uint64_t port_owner_id;
+static uint16_t invalid_port_id = 999;
+static uint16_t default_nb_rx_q = 2;
+static uint16_t default_nb_tx_q = 2;
+
+#define TEST_PMD_NAME	"net_test"
+
+#define MAX_RX_PKTLEN	2048
+
+static int
+ethdev_api_setup(void)
+{
+	struct rte_ether_addr mac_addr = {
+		{ 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0x00 },
+	};
+	char name[RTE_ETH_NAME_MAX_LEN];
+	uint16_t local_port_id;
+	int ret;
+
+	if (port_number != 0)
+		return TEST_SUCCESS;
+
+	initial_port_number = rte_eth_dev_count_total();
+
+	snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, port_number);
+	ret = virtual_ethdev_create(name, &mac_addr, rte_socket_id(), 1);
+	TEST_ASSERT(ret >= 0, "Failed to create test PMD %s\n", name);
+	local_port_id = (uint16_t)ret;
+	dev_ops[port_number] = virtual_ethdev_ops_get(local_port_id);
+	port_id[port_number++] = local_port_id;
+
+	snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, port_number);
+	ret = virtual_ethdev_create(name, &mac_addr, rte_socket_id(), 1);
+	TEST_ASSERT(ret >= 0, "Failed to create test PMD %s\n", name);
+	local_port_id = (uint16_t)ret;
+	dev_ops[port_number] = virtual_ethdev_ops_get(local_port_id);
+	port_id[port_number++] = local_port_id;
+
+	return TEST_SUCCESS;
+}
+
+static void
+ethdev_api_teardown(void)
+{
+	int local_port_number = port_number;
+	char name[RTE_ETH_NAME_MAX_LEN];
+	int i;
+
+	for (i = 0; i < local_port_number; i++) {
+		rte_eth_dev_close(port_id[i]);
+		snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s%d", TEST_PMD_NAME, i);
+		/* TODO: get bus from eth_dev */
+		rte_eal_hotplug_remove("pci", name);
+		port_number--;
+	}
+
+	/* reset global variables */
+	memset(port_id, 0, MAX_PORT_NUMBER * sizeof(port_id[0]));
+	memset(dev_ops, 0, MAX_PORT_NUMBER * sizeof(dev_ops[0]));
+	port_owner_id = RTE_ETH_DEV_NO_OWNER;
+}
+
+static int
+ethdev_count_avail(void)
+{
+	uint16_t count;
+
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, port_number + initial_port_number,
+		"Failed to get available ethdev device count\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_get(void)
+{
+	char no_name[RTE_ETH_MAX_OWNER_NAME_LEN] = "";
+	struct rte_eth_dev_owner owner;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		ret = rte_eth_dev_owner_get(invalid_port_id, &owner);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner get accepted invalid port id %u\n",
+			invalid_port_id);
+
+		ret = rte_eth_dev_owner_get(port_id[i], NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner get accepted null owner for port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_owner_get(port_id[i], &owner);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to get owner for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(owner.id, RTE_ETH_DEV_NO_OWNER,
+			"Received owner id doesn't match with no owner id port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, no_name,
+			RTE_ETH_MAX_OWNER_NAME_LEN,
+			"Received owner name doesn't match with no owner name port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_new(void)
+{
+	uint64_t local_port_owner_id;
+	int ret;
+
+	/* null owner id pointer */
+	ret = rte_eth_dev_owner_new(NULL);
+	RTE_TEST_ASSERT_FAIL(ret, "NULL owner argument accepted\n");
+
+	ret = rte_eth_dev_owner_new(&port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get new owner id\n");
+
+	/* Check not same owner ID received twice */
+	local_port_owner_id = port_owner_id;
+	ret = rte_eth_dev_owner_new(&port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get new owner id\n");
+	TEST_ASSERT_NOT_EQUAL(port_owner_id, local_port_owner_id,
+		"Existing owner id returned\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_set(void)
+{
+	struct rte_eth_dev_owner owner = {
+		.id = RTE_ETH_DEV_NO_OWNER,
+		.name = "TEST",
+	};
+	struct rte_eth_dev_owner owner_get;
+	uint16_t local_port_id = port_id[1];
+	const char *alternate_name = "TEST2";
+	int ret;
+
+	/* invalid port id */
+	ret = rte_eth_dev_owner_set(invalid_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner set accepted invalid port id %u\n",
+		invalid_port_id);
+
+	/* null owner */
+	ret = rte_eth_dev_owner_set(local_port_id, NULL);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner set accepted null owner for port id %u\n",
+		local_port_id);
+
+	/* no owner id */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted no owner id for port id %u\n",
+		local_port_id);
+
+	/* invalid owner id */
+	owner.id = port_owner_id + 1; /* 'rte_eth_dev_owner_new() called twice */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted invalid owner id for port id %u\n",
+		local_port_id);
+
+	/* set owner */
+	owner.id = port_owner_id;
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to set owner for port id %u\n",
+		local_port_id);
+
+	/* get the owner back and verify */
+	ret = rte_eth_dev_owner_get(local_port_id, &owner_get);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get owner for port id %u\n",
+		local_port_id);
+	TEST_ASSERT_EQUAL(owner.id, owner_get.id,
+		"Received owner id doesn't match with set owner id port id %u\n",
+		local_port_id);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, owner_get.name,
+		RTE_ETH_MAX_OWNER_NAME_LEN,
+		"Received owner name doesn't match with set owner name port id %u\n",
+		local_port_id);
+
+	/* set same owner */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted same owner for port id %u\n",
+		local_port_id);
+
+	/* no owner id after owner set */
+	owner.id = RTE_ETH_DEV_NO_OWNER;
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret, "Accepted no owner id for port id %u\n",
+		local_port_id);
+
+	/* set owner with same owner id different owner name */
+	owner.id = port_owner_id;
+	strlcpy(owner.name, alternate_name, RTE_ETH_MAX_OWNER_NAME_LEN);
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted same owner id different owner name for port id %u\n",
+		local_port_id);
+
+	/* set owner with same owner name different owner id */
+	owner.id = port_owner_id - 1; /* Two owner ids received */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted different owner id with same owner name for port id %u\n",
+		local_port_id);
+
+	/* Set owner with very large name */
+	ret = rte_eth_dev_owner_unset(local_port_id, port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unset owner for port id %u\n",
+		local_port_id);
+
+	owner.id = port_owner_id;
+	memset(owner.name, 'x', RTE_ETH_MAX_OWNER_NAME_LEN);
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret,
+		"Failed to set owner with large name for port id %u\n",
+		local_port_id);
+
+	/* Force printing the previously set large name */
+	ret = rte_eth_dev_owner_set(local_port_id, &owner);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Accepted same owner with large name for port id %u\n",
+		local_port_id);
+
+	return TEST_SUCCESS;
+}
+
+/* There must be two ethdev devices created at this point,
+ * But one of them has owner, so available and total device counts
+ * should differ.
+ */
+static int
+ethdev_count_total(void)
+{
+	uint16_t total_count;
+	uint16_t available_count;
+	uint16_t count;
+
+	total_count = rte_eth_dev_count_total();
+	TEST_ASSERT_EQUAL(total_count, initial_port_number + port_number,
+		"Failed to get total ethdev device count\n");
+
+	available_count = initial_port_number + port_number - 1; /* One has owner */
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, available_count,
+		"Failed to get available ethdev device count after ownership\n");
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_unset(void)
+{
+	char no_name[RTE_ETH_MAX_OWNER_NAME_LEN] = "";
+	uint16_t local_port_id = port_id[1];
+	struct rte_eth_dev_owner owner;
+	uint64_t invalid_owner_id;
+	int ret;
+
+	/* unset owner with invalid port id */
+	ret = rte_eth_dev_owner_unset(invalid_port_id, port_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret, "Owner unset accepted invalid port id %u\n",
+		invalid_port_id);
+
+	/* unset owner with invalid owner id */
+	invalid_owner_id = port_owner_id - 1;
+	ret = rte_eth_dev_owner_unset(local_port_id, invalid_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Owner unset accepted invalid owner id %" PRIu64 " for port id %u\n",
+		invalid_owner_id, local_port_id);
+
+	invalid_owner_id = port_owner_id + 1;
+	ret = rte_eth_dev_owner_unset(local_port_id, invalid_owner_id);
+	RTE_TEST_ASSERT_FAIL(ret,
+		"Owner unset accepted invalid owner id %" PRIu64 " for port id %u\n",
+		invalid_owner_id, local_port_id);
+
+	/* unset owner */
+	ret = rte_eth_dev_owner_unset(local_port_id, port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to unset owner for port id %u\n",
+		local_port_id);
+
+	/* verify owner unset */
+	ret = rte_eth_dev_owner_get(local_port_id, &owner);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get owner for port id %u\n",
+		local_port_id);
+	TEST_ASSERT_EQUAL(owner.id, RTE_ETH_DEV_NO_OWNER,
+		"Received owner id doesn't match with no owner id port id %u\n",
+		local_port_id);
+	TEST_ASSERT_BUFFERS_ARE_EQUAL(owner.name, no_name,
+		RTE_ETH_MAX_OWNER_NAME_LEN,
+		"Received owner name doesn't match with no owner name port id %u\n",
+		local_port_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_owner_delete(void)
+{
+	struct rte_eth_dev_owner owner = {
+		.id = port_owner_id,
+		.name = "TEST",
+	};
+	uint64_t invalid_owner_id;
+	int count;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* set owner */
+		ret = rte_eth_dev_owner_set(port_id[i], &owner);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to set owner for port id %u\n",
+			port_id[i]);
+
+		/* delete owner with invalid owner id */
+		invalid_owner_id = port_owner_id - 1;
+		ret = rte_eth_dev_owner_unset(port_id[i], invalid_owner_id);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner delete accepted invalid owner id %" PRIu64 " for port id %u\n",
+			invalid_owner_id, port_id[i]);
+
+		invalid_owner_id = port_owner_id + 1;
+		ret = rte_eth_dev_owner_unset(port_id[i], invalid_owner_id);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Owner delete accepted invalid owner id %" PRIu64 " for port id %u\n",
+			invalid_owner_id, port_id[i]);
+	}
+
+	ret = rte_eth_dev_owner_delete(port_owner_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to delete owner id %" PRIu64 "\n",
+		port_owner_id);
+
+	count = rte_eth_dev_count_avail();
+	TEST_ASSERT_EQUAL(count, initial_port_number + port_number,
+		"Failed to delete owner id %" PRIu64 " from some ethdev devices\n",
+		port_owner_id);
+
+	return TEST_SUCCESS;
+}
+
+static int
+configure_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+info_get_default_config(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define DEFAULT_BURST_SIZE	99
+#define DEFAULT_RING_SIZE	129
+#define DEFAULT_QUEUE_NUMBER	333
+	struct rte_eth_dev_portconf portconfig = {
+		.burst_size = DEFAULT_BURST_SIZE,
+		.ring_size = DEFAULT_RING_SIZE,
+		.nb_queues = DEFAULT_QUEUE_NUMBER,
+	};
+	dev_info->default_rxportconf = portconfig;
+	dev_info->default_txportconf = portconfig;
+
+#define DEFAULT_RX_FREE_THRESH	48
+	dev_info->default_rxconf = (struct rte_eth_rxconf) {
+		.rx_free_thresh = DEFAULT_RX_FREE_THRESH,
+	};
+
+#define DEFAULT_TX_FREE_THRESH	54
+	dev_info->default_txconf = (struct rte_eth_txconf) {
+		.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
+	};
+
+	dev_info->max_rx_queues = DEFAULT_QUEUE_NUMBER + 1;
+	dev_info->max_tx_queues = DEFAULT_QUEUE_NUMBER + 1;
+
+	return 0;
+}
+
+static int
+info_get_offload_jumbo(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	return 0;
+}
+
+static int
+info_get_min_max_mtu(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+	dev_info->max_mtu = MAX_RX_PKTLEN - 100;
+
+	return 0;
+}
+
+static int
+info_get_lro(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TCP_LRO;
+
+	return 0;
+}
+
+static int
+info_get_lro_pkt_size(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define MAX_LRO_PKTLEN (MAX_RX_PKTLEN * 2)
+	dev_info->max_lro_pkt_size = MAX_LRO_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_TCP_LRO;
+
+	return 0;
+}
+
+static int
+info_get_rss_hash_offload(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_RSS_HASH;
+
+	return 0;
+}
+
+static int
+ethdev_configure(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	struct rte_eth_dev_info dev_info;
+	struct rte_eth_conf dev_conf;
+	uint16_t nb_rx_q = 0;
+	uint16_t nb_tx_q = 0;
+	int ret;
+	int i;
+
+	memset(&dev_conf, 0, sizeof(dev_conf));
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_configure(invalid_port_id, nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Configure accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* set NULL config */
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL configuration for port id %u\n",
+			port_id[i]);
+
+		/* no configure dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_configure = NULL;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL configuration for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* no infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = NULL;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info get dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_configure = configure_fail;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device configuration for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* start before configure */
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Started before configure for port id %u\n",
+			port_id[i]);
+
+		/* get device info for various tests below */
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+
+		/* set big Rx queue number */
+		nb_rx_q = RTE_MAX_QUEUES_PER_PORT + 1;
+		nb_tx_q = default_nb_tx_q;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue number > RTE_MAX_QUEUES configuration for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = dev_info.max_rx_queues + 1;
+		nb_tx_q = default_nb_tx_q;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue number > max_rx_queues configuration for port id %u\n",
+			port_id[i]);
+
+		/* set big Tx queue number */
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = RTE_MAX_QUEUES_PER_PORT + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Tx queue number > RTE_MAX_QUEUES configuration for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = dev_info.max_tx_queues + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Tx queue number > max_tx_queues configuration for port id %u\n",
+			port_id[i]);
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = default_nb_tx_q;
+
+		/* request default queue number only for Rx or Tx */
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = 0;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted only Tx default queue number for port id %u\n",
+			port_id[i]);
+
+		nb_rx_q = 0;
+		nb_tx_q = default_nb_tx_q;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted only Rx default queue number for port id %u\n",
+			port_id[i]);
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = default_nb_tx_q;
+
+		/* request not supported LSC */
+		dev_conf.intr_conf.lsc = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LSC interrupt config port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.lsc = 0;
+
+		/* request not supported RMV */
+		dev_conf.intr_conf.rmv = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LSC interrupt config port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.rmv = 0;
+
+		/* configure device */
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+
+		/* configure after start */
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Configuring an already started port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+
+		/* requested supported device features */
+		virtual_ethdev_set_dev_flags(port_id[i],
+			RTE_ETH_DEV_INTR_LSC | RTE_ETH_DEV_INTR_RMV);
+		dev_conf.intr_conf.lsc = 1;
+		dev_conf.intr_conf.rmv = 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to configure with device flags for port id %u\n",
+			port_id[i]);
+		dev_conf.intr_conf.lsc = 0;
+		dev_conf.intr_conf.rmv = 0;
+
+		/* Use default Rx/Tx queue numbers */
+		nb_rx_q = 0;
+		nb_tx_q = 0;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_rx_queues,
+				RTE_ETH_DEV_FALLBACK_RX_NBQUEUES,
+			"Default Rx queue number is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_tx_queues,
+				RTE_ETH_DEV_FALLBACK_TX_NBQUEUES,
+			"Default Tx queue number is wrong for port id %u\n",
+			port_id[i]);
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = default_nb_tx_q;
+
+		/* Use PMD provided Rx/Tx queue numbers */
+		nb_rx_q = 0;
+		nb_tx_q = 0;
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_default_config;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_rx_queues, DEFAULT_QUEUE_NUMBER,
+			"Default driver Rx queue number is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.nb_tx_queues, DEFAULT_QUEUE_NUMBER,
+			"Default driver Tx queue number is wrong for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+		nb_rx_q = default_nb_rx_q;
+		nb_tx_q = default_nb_tx_q;
+
+		/* check max_rx_pkt_len without jumbo frame support */
+		uint16_t overhead_len;
+		struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id[i]];
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + overhead_len + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				dev_conf.rxmode.max_rx_pkt_len,
+			"Accepted Rx packet length bigger than max MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				(uint32_t)(RTE_ETHER_MTU + overhead_len),
+			"Max Rx packet length calculated wrong for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MIN_MTU + overhead_len - 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				dev_conf.rxmode.max_rx_pkt_len,
+			"Accepted Rx packet length less than min MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+				(uint32_t)(RTE_ETHER_MTU + overhead_len),
+			"Max Rx packet length calculated wrong for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+
+		/* check max_rx_pkt_len with jumbo frame support */
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN + 1;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted Rx packet length bigger than supported by device for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MIN_LEN - 1;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted Rx packet length less than min MTU for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		uint16_t mtu;
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_offload_jumbo;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_offload_jumbo;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* max_rx_pkt_len with jumbo frame with min/max MTU */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_min_max_mtu;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
+		ret = rte_eth_dev_get_mtu(port_id[i], &mtu);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get MTU for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_conf.rxmode.max_rx_pkt_len - overhead_len,
+				mtu,
+			"MTU calculated wrong on configure for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* LRO */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_lro;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_TCP_LRO;
+		dev_conf.rxmode.max_lro_pkt_size = MAX_RX_PKTLEN * 2;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted different LRO packet size when driver limit is missing for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		dev_conf.rxmode.max_lro_pkt_size = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_lro_pkt_size;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_TCP_LRO;
+		dev_conf.rxmode.max_lro_pkt_size = MAX_LRO_PKTLEN + 1;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted LRO packet size bigger than what device supports for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		dev_conf.rxmode.max_lro_pkt_size = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* offloads */
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME;
+		dev_conf.rxmode.max_rx_pkt_len = MAX_RX_PKTLEN;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted offload that is not in the capability for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.max_rx_pkt_len = 0;
+		dev_conf.rxmode.offloads = 0;
+
+		/* RSS hash function */
+		dev_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_ETH;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted RSS hash function that is not in the capability for port id %u\n",
+			port_id[i]);
+		dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
+
+		/* RSS hash offload */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_rss_hash_offload;
+		dev_conf.rxmode.offloads = DEV_RX_OFFLOAD_RSS_HASH;
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted RSS hash offload without RSS for port id %u\n",
+			port_id[i]);
+		dev_conf.rxmode.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* start after failed configure */
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Started after failed configure for port id %u\n",
+			port_id[i]);
+
+		/* Need successful configure for start */
+		ret = rte_eth_dev_configure(port_id[i], nb_rx_q, nb_tx_q,
+			&dev_conf);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure port id %u\n",
+			port_id[i]);
+	}
+
+	// rss_hf src_only and dst_only
+	// eth_dev_tx_queue_config
+	// eth_dev_rx_queue_config
+	// RTE_ETHDEV_PROFILE_WITH_VTUNE
+	// eth_dev_validate_offloads
+	// restore config
+	// restore mtu
+
+	return TEST_SUCCESS;
+}
+
+
+static const char *virtual_ethdev_driver_name = "Virtual PMD";
+static int
+info_get_success(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+
+	dev_info->driver_name = virtual_ethdev_driver_name;
+	dev_info->max_mac_addrs = 1;
+
+	dev_info->max_rx_pktlen = MAX_RX_PKTLEN;
+
+	dev_info->max_rx_queues = (uint16_t)128;
+	dev_info->max_tx_queues = (uint16_t)512;
+
+	dev_info->min_rx_bufsize = 0;
+
+	return 0;
+}
+
+static int
+info_get_fail(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info __rte_unused)
+{
+	return -1;
+}
+
+static int
+info_get_max_queues(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->max_rx_queues = RTE_MAX_QUEUES_PER_PORT + 1;
+	dev_info->max_tx_queues = RTE_MAX_QUEUES_PER_PORT + 1;
+
+	return 0;
+}
+
+static int
+info_get_mtu(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+#define MIN_MTU	256
+#define MAX_MTU 512
+	dev_info->min_mtu = MIN_MTU;
+	dev_info->max_mtu = MAX_MTU;
+
+	return 0;
+}
+
+static int
+ethdev_info_get(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	struct rte_eth_dev_info dev_info;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_info_get(invalid_port_id, &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Getting info accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* NULL info */
+		ret = rte_eth_dev_info_get(port_id[i], NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info struct for port id %u\n",
+			port_id[i]);
+
+		/* no infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = NULL;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info get dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_fail;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device info get for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* get info */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_success;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* big max queues number */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_max_queues;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(dev_info.nb_rx_queues, RTE_MAX_QUEUES_PER_PORT + 1,
+			"Accepted big Rx queue number for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_NOT_EQUAL(dev_info.nb_tx_queues, RTE_MAX_QUEUES_PER_PORT + 1,
+			"Accepted big Tx queue number for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* min/max MTU */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_mtu;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.min_mtu, MIN_MTU,
+			"Received min MTU is wrong for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(dev_info.max_mtu, MAX_MTU,
+			"Received max MTU is wrong for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* verify dev_flags */
+#define DEV_FLAG 0xABCD
+		uint32_t local_dev_flag = DEV_FLAG;
+		virtual_ethdev_set_dev_flags(port_id[i], local_dev_flag);
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get info for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(*dev_info.dev_flags, local_dev_flag,
+			"Received device flags is wrong for port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+info_get_multi_rx_segment(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->rx_seg_capa.max_nseg = 3;
+	dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
+	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
+	dev_info->rx_seg_capa.multi_pools = 0;
+
+	return 0;
+}
+
+static int
+info_get_large_min_rx_bufsize(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->min_rx_bufsize = RTE_MBUF_DEFAULT_BUF_SIZE;
+
+	return 0;
+}
+
+static int
+info_get_runtime_rx_setup(struct rte_eth_dev *dev __rte_unused,
+		struct rte_eth_dev_info *dev_info)
+{
+	dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP;
+
+	return 0;
+}
+
+static int
+ethdev_rx_queue_setup(void)
+{
+	struct rte_eth_rxconf rx_conf;
+	uint16_t rx_queue_id = 0;
+	uint16_t nb_rx_desc = 256;
+	unsigned int socket_id = SOCKET_ID_ANY;
+	struct rte_mempool *mp = NULL;
+	struct rte_mempool *first_seg_mp = NULL;
+	struct rte_mempool *second_seg_mp = NULL;
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	struct rte_eth_dev_info dev_info;
+	int ret;
+	int i;
+
+	memset(&rx_conf, 0, sizeof(struct rte_eth_rxconf));
+	mp = rte_pktmbuf_pool_create("test_ethdev", 128, 0, 0,
+			RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_rx_queue_setup(invalid_port_id, rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* invalid queue id */
+		rx_queue_id = default_nb_rx_q;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted invalid port id %u\n",
+			port_id[i]);
+		rx_queue_id = 0;
+
+		/* no rx_queue_setup dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->rx_queue_setup = NULL;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL Rx queue setup dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* no infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = NULL;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL info get dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing infos_get dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_fail;
+		ret = rte_eth_dev_info_get(port_id[i], &dev_info);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device info get for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* null mp with null rx_conf */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, NULL, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with null config for port id %u\n",
+			port_id[i]);
+
+		/* null mp with null rx_seg */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with null rx_seg for port id %u\n",
+			port_id[i]);
+
+		union rte_eth_rxseg rxseg[2];
+		memset(&rxseg, 0, sizeof(union rte_eth_rxseg) * 2);
+		/* null mp with zero rx_nseg */
+		rx_conf.rx_seg = &rxseg[0];
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with zero rx_nseg for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+
+		/* null mp without RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload */
+		rx_conf.rx_seg = &rxseg[0];
+		rx_conf.rx_nseg = 1;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool without split offload for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+		rx_conf.rx_nseg = 0;
+
+		/* null mp with rx_nseg > seg_capa->max_nseg */
+		rx_conf.rx_seg = &rxseg[0];
+		rx_conf.rx_nseg = 1;
+		rx_conf.offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with segment "
+			"number bigger than supported for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+		rx_conf.rx_nseg = 0;
+		rx_conf.offloads = 0;
+
+		/* null mp with null segment mp */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_multi_rx_segment;
+		rx_conf.rx_seg = &rxseg[0];
+		rx_conf.rx_nseg = 1;
+		rx_conf.offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with null segment mempool for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+		rx_conf.rx_nseg = 0;
+		rx_conf.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+
+		/* null mp with segment mp are different when not supported */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_multi_rx_segment;
+		first_seg_mp = rte_pktmbuf_pool_create("test_ethdev1", 128, 0, 0,
+				RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+		second_seg_mp = rte_pktmbuf_pool_create("test_ethdev2", 128, 0, 0,
+				RTE_MBUF_DEFAULT_BUF_SIZE - 512, rte_socket_id());
+		rx_conf.rx_seg = rxseg;
+		rxseg[0].split.mp = first_seg_mp;
+		rxseg[0].split.length = 512;
+		//TODO: when rxseg.split.length is 0, API fails, check it
+		rxseg[1].split.mp = second_seg_mp;
+		rxseg[1].split.length = 512;
+		rx_conf.rx_nseg = 2;
+		rx_conf.offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, NULL);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null mempool with segment mp "
+			"are different when not supported for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+		rx_conf.rx_nseg = 0;
+		rx_conf.offloads = 0;
+		*local_dev_ops = backup_dev_ops;
+		memset(&rxseg, 0, sizeof(union rte_eth_rxseg) * 2);
+		rte_mempool_free(first_seg_mp);
+		first_seg_mp = NULL;
+		rte_mempool_free(second_seg_mp);
+		second_seg_mp = NULL;
+
+		//TODO: Add more segment Rx tests based on other capabilities
+
+		/* mp with non zero Rx segment number */
+		rx_conf.rx_seg = &rxseg[0];
+		rx_conf.rx_nseg = 1;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted mempool with non zero rx_nseg for port id %u\n",
+			port_id[i]);
+		rx_conf.rx_seg = NULL;
+		rx_conf.rx_nseg = 0;
+
+		/* mp with buffer size < min buffer size */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_large_min_rx_bufsize;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted mempool with buffer size < min_rx_bufsize for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* Use driver provided default Rx descriptor number */
+		struct rte_eth_rxq_info rxq_info;
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_default_config;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				0, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue setup failed for port id %u\n",
+			port_id[i]);
+		ret = rte_eth_rx_queue_info_get(port_id[i], rx_queue_id, &rxq_info);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue info get failed for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(rxq_info.nb_desc, DEFAULT_RING_SIZE,
+			"Not using default Rx desc number on Rx queue setup for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* Use library default Rx descriptor number */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				0, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue setup failed for port id %u\n",
+			port_id[i]);
+		ret = rte_eth_rx_queue_info_get(port_id[i], rx_queue_id, &rxq_info);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue info get failed for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(rxq_info.nb_desc, RTE_ETH_DEV_FALLBACK_RX_RINGSIZE,
+			"Not using default Rx desc number on Rx queue setup for port id %u\n",
+			port_id[i]);
+
+		/* Rx descriptor less than min supported by driver */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				VIRTUAL_ETHDEV_MIN_DESC_NUM - 1,
+				socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted Rx descriptor less than supported for port id %u\n",
+			port_id[i]);
+
+		/* Rx descriptor more than max supported by driver */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				VIRTUAL_ETHDEV_MAX_DESC_NUM + 1,
+				socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted Rx descriptor more than supported for port id %u\n",
+			port_id[i]);
+
+		/* Rx descriptor number unaligned */
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc + VIRTUAL_ETHDEV_DESC_ALIGN + 1,
+				socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted unaligned number of Rx descriptor for port id %u\n",
+			port_id[i]);
+
+		/* Setup after port start */
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted after start for port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+
+		/* Setup with runtime setup capability but queue is not stopped */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_runtime_rx_setup;
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup with runtime setup capability accepted "
+			"started queue for port id %u\n",
+			port_id[i]);
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* Setup Rx queue when queue is already allocated but there is
+		 * no rx_queue_release dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->rx_queue_release = NULL;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Rx queue setup accepted null queue release dev_ops when "
+			"queue is already allocated for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* Use driver provided Rx configuration */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_infos_get = info_get_default_config;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, NULL, mp);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue setup failed for port id %u\n",
+			port_id[i]);
+		ret = rte_eth_rx_queue_info_get(port_id[i], rx_queue_id, &rxq_info);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Rx queue info get failed for port id %u\n",
+			port_id[i]);
+		TEST_ASSERT_EQUAL(rxq_info.conf.rx_free_thresh, DEFAULT_RX_FREE_THRESH,
+			"Not using default Rx config on Rx queue setup for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* Request unsupported Rx queue offload */
+		/* expectation is there is no Rx queue specific offload
+		 * capability reported for device and following offload not
+		 * enabled in the port configure */
+		rx_conf.offloads = DEV_RX_OFFLOAD_VLAN_STRIP;
+		ret = rte_eth_rx_queue_setup(port_id[i], rx_queue_id,
+				nb_rx_desc, socket_id, &rx_conf, mp);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted unsupported Rx queue offload for port id %u\n",
+			port_id[i]);
+		rx_conf.offloads = 0;
+
+		//TODO: LRO
+
+		/* Check min Rx buffer size */
+		struct rte_eth_dev_data *data;
+		uint32_t size = RTE_MBUF_DEFAULT_BUF_SIZE;
+		for (int j = 0; j < default_nb_rx_q; j++) {
+			size -= 256;
+			first_seg_mp = rte_pktmbuf_pool_create("test_ethdev1",
+					128, 0, 0, size, rte_socket_id());
+			ret = rte_eth_rx_queue_setup(port_id[i], j, nb_rx_desc,
+					socket_id, &rx_conf, first_seg_mp);
+			RTE_TEST_ASSERT_SUCCESS(ret,
+				"Rx queue setup failed for port id %u\n",
+				port_id[i]);
+			rte_mempool_free(first_seg_mp);
+			first_seg_mp = NULL;
+		}
+		virtual_ethdev_get_dev_data(port_id[i], &data);
+		TEST_ASSERT_EQUAL(data->min_rx_buf_size, size,
+			"Rx queue setup set minimum Rx buffer size wrong for port id %u\n",
+			port_id[i]);
+		data = NULL;
+		size = 0;
+	}
+
+	rte_mempool_free(mp);
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_tx_queue_setup(void)
+{
+	return TEST_SUCCESS;
+}
+
+static int
+start_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+ethdev_start(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_start(invalid_port_id);
+		RTE_TEST_ASSERT_FAIL(ret, "Start accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* no dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_start = NULL;
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL start dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_start = start_fail;
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device start for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_start(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to start already started port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+stop_fail(struct rte_eth_dev *dev __rte_unused)
+{
+	return -1;
+}
+
+static int
+ethdev_stop(void)
+{
+	struct eth_dev_ops *local_dev_ops;
+	struct eth_dev_ops backup_dev_ops;
+	int ret;
+	int i;
+
+	for (i = 0; i < port_number; i++) {
+		/* invalid port id */
+		ret = rte_eth_dev_stop(invalid_port_id);
+		RTE_TEST_ASSERT_FAIL(ret, "Stop accepted invalid port id %u\n",
+			invalid_port_id);
+
+		/* no dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_stop = NULL;
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted NULL stop dev_ops for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		/* failing dev_ops */
+		local_dev_ops = dev_ops[i];
+		backup_dev_ops = *local_dev_ops;
+		local_dev_ops->dev_stop = stop_fail;
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_FAIL(ret,
+			"Accepted failing device stop for port id %u\n",
+			port_id[i]);
+		*local_dev_ops = backup_dev_ops;
+
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop port id %u\n",
+			port_id[i]);
+
+		ret = rte_eth_dev_stop(port_id[i]);
+		RTE_TEST_ASSERT_SUCCESS(ret,
+			"Failed to stop already stopped port id %u\n",
+			port_id[i]);
+	}
+
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_rx_queue_info_get(void)
+{
+	return TEST_SUCCESS;
+}
+
+static int
+ethdev_tx_queue_info_get(void)
+{
+	return TEST_SUCCESS;
+}
+
+static struct unit_test_suite ethdev_api_testsuite = {
+	.suite_name = "ethdev API unit test suite",
+	.setup = ethdev_api_setup,
+	.teardown = ethdev_api_teardown,
+	.unit_test_cases = {
+		TEST_CASE(ethdev_count_avail),
+		TEST_CASE(ethdev_owner_get),
+		TEST_CASE(ethdev_owner_new),
+		TEST_CASE(ethdev_owner_set),
+		TEST_CASE(ethdev_count_total),
+		TEST_CASE(ethdev_owner_unset),
+		TEST_CASE(ethdev_owner_delete),
+		TEST_CASE(ethdev_configure),
+		TEST_CASE(ethdev_info_get),
+		TEST_CASE(ethdev_rx_queue_setup),
+		TEST_CASE(ethdev_tx_queue_setup),
+		TEST_CASE(ethdev_rx_queue_info_get),
+		TEST_CASE(ethdev_tx_queue_info_get),
+		TEST_CASE(ethdev_start),
+		TEST_CASE(ethdev_stop),
+		TEST_CASES_END(),
+	},
+};
+
+static int
+test_ethdev_api(void)
+{
+	return unit_test_suite_runner(&ethdev_api_testsuite);
+}
+
+REGISTER_TEST_COMMAND(ethdev_api_autotest, test_ethdev_api);
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 40e474aa7e7f..926bf96b719f 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -31,7 +31,11 @@ struct rte_hairpin_peer_info;
  */
 
 typedef int  (*eth_dev_configure_t)(struct rte_eth_dev *dev);
-/**< @internal Ethernet device configuration. */
+/**< @internal Ethernet device configuration.
+ *
+ * For ``rte_eth_dev_configure()`` API both ``eth_dev_configure_t`` and
+ * ``eth_dev_infos_get_t`` needs to be implemented by PMD.
+ * */
 
 typedef int  (*eth_dev_start_t)(struct rte_eth_dev *dev);
 /**< @internal Function used to start a configured Ethernet device. */
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9d95cd11e1b5..972d332e94d2 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -694,6 +694,7 @@ eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 	}
 
 	/* can not truncate (same structure) */
+	memset(port_owner->name, 0, RTE_ETH_MAX_OWNER_NAME_LEN);
 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
 
 	port_owner->id = new_owner->id;
@@ -748,10 +749,13 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
 
 	if (eth_is_valid_owner_id(owner_id)) {
-		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
-			if (rte_eth_devices[port_id].data->owner.id == owner_id)
-				memset(&rte_eth_devices[port_id].data->owner, 0,
+		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+			struct rte_eth_dev_data *data =
+				rte_eth_devices[port_id].data;
+			if (data != NULL && data->owner.id == owner_id)
+				memset(&data->owner, 0,
 				       sizeof(struct rte_eth_dev_owner));
+		}
 		RTE_ETHDEV_LOG(NOTICE,
 			"All port owners owned by %016"PRIx64" identifier have removed\n",
 			owner_id);
@@ -1394,6 +1398,15 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * If driver does not provide any preferred valued, fall back on
 	 * EAL defaults.
 	 */
+	if ((nb_rx_q & nb_tx_q) == 0 && (nb_rx_q | nb_tx_q) != 0) {
+		RTE_ETHDEV_LOG(ERR,
+			"Ethdev port_id (%u), Rx queue number (%u) and Tx queue number (%u) "
+			"should be both zero or both non-zero\n",
+			port_id, nb_rx_q, nb_tx_q);
+		ret = -EINVAL;
+		goto rollback;
+	}
+
 	if (nb_rx_q == 0 && nb_tx_q == 0) {
 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
 		if (nb_rx_q == 0)
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index d2b27c351fdb..6ab818b59f66 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1837,6 +1837,10 @@ struct rte_eth_dev_owner {
 	char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
 };
 
+/**
+ * Device flags set on ``eth_dev->data->dev_flags`` by drivers.
+ * These values can be received via ``rte_eth_dev_info_get()``
+ */
 /** PMD supports thread-safe flow operations */
 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE  0x0001
 /** Device supports link state interrupt */
@@ -1980,6 +1984,10 @@ int rte_eth_dev_owner_new(uint64_t *owner_id);
  *
  * Set an Ethernet device owner.
  *
+ * Once an owner is set for an Ethernet device, setting owner again will fail,
+ * even it is exact same owner.
+ * Owner ids not obtained by ``rte_eth_dev_owner_new()`` are rejected.
+ *
  * @param	port_id
  *  The identifier of the port to own.
  * @param	owner
@@ -2212,6 +2220,8 @@ rte_eth_dev_is_removed(uint16_t port_id);
  *   - -ENOMEM: Unable to allocate the receive ring descriptors or to
  *      allocate network memory buffers from the memory pool when
  *      initializing receive descriptors.
+ *   - -ENOTSUP: The function or ``rte_eth_dev_info_get()`` is not supported by
+ *      driver.
  */
 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		uint16_t nb_rx_desc, unsigned int socket_id,
@@ -2524,6 +2534,8 @@ int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
  * On success, all basic functions exported by the Ethernet API (link status,
  * receive/transmit, and so on) can be invoked.
  *
+ * Starting an already started port returns success.
+ *
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @return
@@ -2536,6 +2548,8 @@ int rte_eth_dev_start(uint16_t port_id);
  * Stop an Ethernet device. The device can be restarted with a call to
  * rte_eth_dev_start()
  *
+ * Stopping an already stopped port returns success.
+ *
  * @param port_id
  *   The port identifier of the Ethernet device.
  * @return
@@ -3036,7 +3050,7 @@ int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
  * min_mtu = RTE_ETHER_MIN_MTU
  * max_mtu = UINT16_MAX
  *
- * The following fields will be populated if support for dev_infos_get()
+ *ops The following fields will be populated if support for dev_infos_get()
  * exists for the device and the rte_eth_dev 'dev' has been populated
  * successfully with a call to it:
  *
diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
index edf96de2dc2e..291b70a8cfc4 100644
--- a/lib/ethdev/rte_ethdev_core.h
+++ b/lib/ethdev/rte_ethdev_core.h
@@ -176,7 +176,7 @@ struct rte_eth_dev_data {
 		/**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */
 	uint8_t tx_queue_state[RTE_MAX_QUEUES_PER_PORT];
 		/**< Queues state: HAIRPIN(2) / STARTED(1) / STOPPED(0). */
-	uint32_t dev_flags;             /**< Capabilities. */
+	uint32_t dev_flags;		/**< Device flags */
 	int numa_node;                  /**< NUMA node connection. */
 	struct rte_vlan_filter_conf vlan_filter_conf;
 			/**< VLAN filter configuration. */
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations Ferruh Yigit
@ 2023-08-22 21:10     ` Stephen Hemminger
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Hemminger @ 2023-08-22 21:10 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Andrew Rybchenko, Thomas Monjalon, dev

Looking at this old patch.

On Fri, 16 Jul 2021 15:27:54 +0100
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c
> index 6098e633f35a..17f28c5a304c 100644
> --- a/app/test/virtual_pmd.c
> +++ b/app/test/virtual_pmd.c
> @@ -355,8 +355,8 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
>  
>  static uint16_t
>  virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
> -							 struct rte_mbuf **bufs __rte_unused,
> -							 uint16_t nb_pkts __rte_unused)
> +		struct rte_mbuf **bufs __rte_unused,
> +		uint16_t nb_pkts __rte_unused)

No need for just reindenting if code didn't change here.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop
  2021-07-16 14:27   ` [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop Ferruh Yigit
@ 2023-08-22 21:11     ` Stephen Hemminger
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Hemminger @ 2023-08-22 21:11 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Andrew Rybchenko, Thomas Monjalon, dev

On Fri, 16 Jul 2021 15:27:59 +0100
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> @@ -271,6 +319,10 @@ static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
>  	.dev_stop = virtual_ethdev_stop,
>  	.dev_close = virtual_ethdev_close,
>  	.dev_infos_get = virtual_ethdev_info_get,
> +	.rx_queue_start = virtual_ethdev_rx_queue_start,
> +	.tx_queue_start = virtual_ethdev_tx_queue_start,
> +	.rx_queue_stop = virtual_ethdev_rx_queue_stop,
> +	.tx_queue_stop = virtual_ethdev_tx_queue_stop,

This part of the patch needs to be rebased, driver now has other bits.


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [RFC v2 8/8] test: support ethdev
  2021-07-16 14:28   ` [dpdk-dev] [RFC v2 8/8] test: support ethdev Ferruh Yigit
@ 2023-08-22 21:14     ` Stephen Hemminger
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Hemminger @ 2023-08-22 21:14 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Andrew Rybchenko, Thomas Monjalon, dev

On Fri, 16 Jul 2021 15:28:00 +0100
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> +/**
> + * Device flags set on ``eth_dev->data->dev_flags`` by drivers.
> + * These values can be received via ``rte_eth_dev_info_get()``
> + */
Already done in later patches.

> @@ -3036,7 +3050,7 @@ int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
>   * min_mtu = RTE_ETHER_MIN_MTU
>   * max_mtu = UINT16_MAX
>   *
> - * The following fields will be populated if support for dev_infos_get()
> + *ops The following fields will be populated if support for dev_infos_get()

Typo, just don't change this.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close
  2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
                     ` (6 preceding siblings ...)
  2021-07-16 14:28   ` [dpdk-dev] [RFC v2 8/8] test: support ethdev Ferruh Yigit
@ 2023-08-22 21:15   ` Stephen Hemminger
  7 siblings, 0 replies; 16+ messages in thread
From: Stephen Hemminger @ 2023-08-22 21:15 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Andrew Rybchenko, Thomas Monjalon, dev

On Fri, 16 Jul 2021 15:27:53 +0100
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> Not cleaning the rings prevents creating devices again, which breaks to
> run some unit tests multiple times.
> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

Is this patch set still worth doing?
If so it needs to be rebased, updated, and retested.
Marking it as changes requested.


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2023-08-22 21:15 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-17  8:14 [dpdk-dev] [RFC 1/4] test/virtual_pmd: enable getting device operations Ferruh Yigit
2021-06-17  8:14 ` [dpdk-dev] [RFC 2/4] test/virtual_pmd: clean rings on close Ferruh Yigit
2021-06-17  8:14 ` [dpdk-dev] [RFC 3/4] test/virtual_pmd: enable updating device flags Ferruh Yigit
2021-06-17  8:14 ` [dpdk-dev] [RFC 4/4] test: support ethdev Ferruh Yigit
2021-07-16 14:27 ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Ferruh Yigit
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 2/8] test/virtual_pmd: enable getting device operations Ferruh Yigit
2023-08-22 21:10     ` Stephen Hemminger
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 3/8] test/virtual_pmd: enable updating device flags Ferruh Yigit
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 4/8] test/virtual_pmd: enable getting device data Ferruh Yigit
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 5/8] test/virtual_pmd: support get queue info device ops Ferruh Yigit
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 6/8] test/virtual_pmd: provide descriptor limit info Ferruh Yigit
2021-07-16 14:27   ` [dpdk-dev] [RFC v2 7/8] test/virtual_pmd: support queue start/stop Ferruh Yigit
2023-08-22 21:11     ` Stephen Hemminger
2021-07-16 14:28   ` [dpdk-dev] [RFC v2 8/8] test: support ethdev Ferruh Yigit
2023-08-22 21:14     ` Stephen Hemminger
2023-08-22 21:15   ` [dpdk-dev] [RFC v2 1/8] test/virtual_pmd: clean rings on close Stephen Hemminger

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.