All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
To: <dev@dpdk.org>
Cc: <thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>,
	<hemant.agrawal@nxp.com>, <gage.eads@intel.com>,
	<harry.van.haaren@intel.com>,
	Jerin Jacob <jerin.jacob@caviumnetworks.com>
Subject: [PATCH v2 3/6] eventdev: implement the northbound APIs
Date: Tue, 6 Dec 2016 09:22:17 +0530	[thread overview]
Message-ID: <1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com> (raw)
In-Reply-To: <1480996340-29871-1-git-send-email-jerin.jacob@caviumnetworks.com>

This patch implements northbound eventdev API interface using
southbond driver interface

Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 config/common_base                           |    6 +
 lib/Makefile                                 |    1 +
 lib/librte_eal/common/include/rte_log.h      |    1 +
 lib/librte_eventdev/Makefile                 |   57 ++
 lib/librte_eventdev/rte_eventdev.c           | 1001 ++++++++++++++++++++++++++
 lib/librte_eventdev/rte_eventdev.h           |  108 ++-
 lib/librte_eventdev/rte_eventdev_pmd.h       |  109 +++
 lib/librte_eventdev/rte_eventdev_version.map |   33 +
 mk/rte.app.mk                                |    1 +
 9 files changed, 1311 insertions(+), 6 deletions(-)
 create mode 100644 lib/librte_eventdev/Makefile
 create mode 100644 lib/librte_eventdev/rte_eventdev.c
 create mode 100644 lib/librte_eventdev/rte_eventdev_version.map

diff --git a/config/common_base b/config/common_base
index 4bff83a..7a8814e 100644
--- a/config/common_base
+++ b/config/common_base
@@ -411,6 +411,12 @@ CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n
 CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
 
 #
+# Compile generic event device library
+#
+CONFIG_RTE_LIBRTE_EVENTDEV=y
+CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
+CONFIG_RTE_EVENT_MAX_DEVS=16
+CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
 # Compile librte_ring
 #
 CONFIG_RTE_LIBRTE_RING=y
diff --git a/lib/Makefile b/lib/Makefile
index 990f23a..1a067bf 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,6 +41,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
 DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
 DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
 DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
 DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
 DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
 DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 29f7d19..9a07d92 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -79,6 +79,7 @@ extern struct rte_logs rte_logs;
 #define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
 #define RTE_LOGTYPE_MBUF    0x00010000 /**< Log related to mbuf. */
 #define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */
+#define RTE_LOGTYPE_EVENTDEV 0x00040000 /**< Log related to eventdev. */
 
 /* these log types can be used in an application */
 #define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
new file mode 100644
index 0000000..dac0663
--- /dev/null
+++ b/lib/librte_eventdev/Makefile
@@ -0,0 +1,57 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 Cavium networks. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Cavium networks nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_eventdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library source files
+SRCS-y += rte_eventdev.c
+
+# export include files
+SYMLINK-y-include += rte_eventdev.h
+SYMLINK-y-include += rte_eventdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_eventdev_version.map
+
+# library dependencies
+DEPDIRS-y += lib/librte_eal
+DEPDIRS-y += lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
new file mode 100644
index 0000000..0a1d2d6
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -0,0 +1,1001 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+
+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
+
+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
+
+static struct rte_eventdev_global eventdev_globals = {
+	.nb_devs		= 0
+};
+
+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
+
+/* Event dev north bound API implementation */
+
+uint8_t
+rte_event_dev_count(void)
+{
+	return rte_eventdev_globals->nb_devs;
+}
+
+int
+rte_event_dev_get_dev_id(const char *name)
+{
+	int i;
+
+	if (!name)
+		return -EINVAL;
+
+	for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
+		if ((strcmp(rte_event_devices[i].data->name, name)
+				== 0) &&
+				(rte_event_devices[i].attached ==
+						RTE_EVENTDEV_ATTACHED))
+			return i;
+	return -ENODEV;
+}
+
+int
+rte_event_dev_socket_id(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	return dev->data->socket_id;
+}
+
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+	dev_info->pci_dev = dev->pci_dev;
+	return 0;
+}
+
+static inline int
+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+{
+	uint8_t old_nb_queues = dev->data->nb_queues;
+	void **queues;
+	uint8_t *queues_prio;
+	unsigned int i;
+
+	RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
+			 dev->data->dev_id);
+
+	/* First time configuration */
+	if (dev->data->queues == NULL && nb_queues != 0) {
+		dev->data->queues = rte_zmalloc_socket("eventdev->data->queues",
+				sizeof(dev->data->queues[0]) * nb_queues,
+				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->queues == NULL) {
+			dev->data->nb_queues = 0;
+			RTE_EDEV_LOG_ERR("failed to get memory for queue meta,"
+					"nb_queues %u", nb_queues);
+			return -(ENOMEM);
+		}
+		/* Allocate memory to store queue priority */
+		dev->data->queues_prio = rte_zmalloc_socket(
+				"eventdev->data->queues_prio",
+				sizeof(dev->data->queues_prio[0]) * nb_queues,
+				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->queues_prio == NULL) {
+			dev->data->nb_queues = 0;
+			RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+					"nb_queues %u", nb_queues);
+			return -(ENOMEM);
+		}
+
+	} else if (dev->data->queues != NULL && nb_queues != 0) {/* re-config */
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+		queues = dev->data->queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			(*dev->dev_ops->queue_release)(queues[i]);
+
+		queues = rte_realloc(queues, sizeof(queues[0]) * nb_queues,
+				RTE_CACHE_LINE_SIZE);
+		if (queues == NULL) {
+			RTE_EDEV_LOG_ERR("failed to realloc queue meta data,"
+						" nb_queues %u", nb_queues);
+			return -(ENOMEM);
+		}
+		dev->data->queues = queues;
+
+		/* Re allocate memory to store queue priority */
+		queues_prio = dev->data->queues_prio;
+		queues_prio = rte_realloc(queues_prio,
+				sizeof(queues_prio[0]) * nb_queues,
+				RTE_CACHE_LINE_SIZE);
+		if (queues_prio == NULL) {
+			RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+						" nb_queues %u", nb_queues);
+			return -(ENOMEM);
+		}
+		dev->data->queues_prio = queues_prio;
+
+		if (nb_queues > old_nb_queues) {
+			uint8_t new_qs = nb_queues - old_nb_queues;
+
+			memset(queues + old_nb_queues, 0,
+				sizeof(queues[0]) * new_qs);
+			memset(queues_prio + old_nb_queues, 0,
+				sizeof(queues_prio[0]) * new_qs);
+		}
+	} else if (dev->data->queues != NULL && nb_queues == 0) {
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+		queues = dev->data->queues;
+		for (i = nb_queues; i < old_nb_queues; i++)
+			(*dev->dev_ops->queue_release)(queues[i]);
+	}
+
+	dev->data->nb_queues = nb_queues;
+	return 0;
+}
+
+static inline int
+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+{
+	uint8_t old_nb_ports = dev->data->nb_ports;
+	void **ports;
+	uint16_t *links_map;
+	uint8_t *ports_dequeue_depth;
+	uint8_t *ports_enqueue_depth;
+	unsigned int i;
+
+	RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
+			 dev->data->dev_id);
+
+	/* First time configuration */
+	if (dev->data->ports == NULL && nb_ports != 0) {
+		dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
+				sizeof(dev->data->ports[0]) * nb_ports,
+				RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->ports == NULL) {
+			dev->data->nb_ports = 0;
+			RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
+					"nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Allocate memory to store ports dequeue depth */
+		dev->data->ports_dequeue_depth =
+			rte_zmalloc_socket("eventdev->ports_dequeue_depth",
+			sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
+			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->ports_dequeue_depth == NULL) {
+			dev->data->nb_ports = 0;
+			RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
+					"nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Allocate memory to store ports enqueue depth */
+		dev->data->ports_enqueue_depth =
+			rte_zmalloc_socket("eventdev->ports_enqueue_depth",
+			sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
+			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->ports_enqueue_depth == NULL) {
+			dev->data->nb_ports = 0;
+			RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
+					"nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Allocate memory to store queue to port link connection */
+		dev->data->links_map =
+			rte_zmalloc_socket("eventdev->links_map",
+			sizeof(dev->data->links_map[0]) * nb_ports *
+			RTE_EVENT_MAX_QUEUES_PER_DEV,
+			RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+		if (dev->data->links_map == NULL) {
+			dev->data->nb_ports = 0;
+			RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
+					"nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+	} else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+		ports = dev->data->ports;
+		ports_dequeue_depth = dev->data->ports_dequeue_depth;
+		ports_enqueue_depth = dev->data->ports_enqueue_depth;
+		links_map = dev->data->links_map;
+
+		for (i = nb_ports; i < old_nb_ports; i++)
+			(*dev->dev_ops->port_release)(ports[i]);
+
+		/* Realloc memory for ports */
+		ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
+				RTE_CACHE_LINE_SIZE);
+		if (ports == NULL) {
+			RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
+						" nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Realloc memory for ports_dequeue_depth */
+		ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
+			sizeof(ports_dequeue_depth[0]) * nb_ports,
+			RTE_CACHE_LINE_SIZE);
+		if (ports_dequeue_depth == NULL) {
+			RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
+						" nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Realloc memory for ports_enqueue_depth */
+		ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
+			sizeof(ports_enqueue_depth[0]) * nb_ports,
+			RTE_CACHE_LINE_SIZE);
+		if (ports_enqueue_depth == NULL) {
+			RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
+						" nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		/* Realloc memory to store queue to port link connection */
+		links_map = rte_realloc(links_map,
+			sizeof(dev->data->links_map[0]) * nb_ports *
+			RTE_EVENT_MAX_QUEUES_PER_DEV,
+			RTE_CACHE_LINE_SIZE);
+		if (dev->data->links_map == NULL) {
+			dev->data->nb_ports = 0;
+			RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
+					"nb_ports %u", nb_ports);
+			return -(ENOMEM);
+		}
+
+		if (nb_ports > old_nb_ports) {
+			uint8_t new_ps = nb_ports - old_nb_ports;
+
+			memset(ports + old_nb_ports, 0,
+				sizeof(ports[0]) * new_ps);
+			memset(ports_dequeue_depth + old_nb_ports, 0,
+				sizeof(ports_dequeue_depth[0]) * new_ps);
+			memset(ports_enqueue_depth + old_nb_ports, 0,
+				sizeof(ports_enqueue_depth[0]) * new_ps);
+			memset(links_map +
+				(old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV),
+				0, sizeof(ports_enqueue_depth[0]) * new_ps);
+		}
+
+		dev->data->ports = ports;
+		dev->data->ports_dequeue_depth = ports_dequeue_depth;
+		dev->data->ports_enqueue_depth = ports_enqueue_depth;
+		dev->data->links_map = links_map;
+	} else if (dev->data->ports != NULL && nb_ports == 0) {
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+		ports = dev->data->ports;
+		for (i = nb_ports; i < old_nb_ports; i++)
+			(*dev->dev_ops->port_release)(ports[i]);
+	}
+
+	dev->data->nb_ports = nb_ports;
+	return 0;
+}
+
+int
+rte_event_dev_configure(uint8_t dev_id,
+			const struct rte_event_dev_config *dev_conf)
+{
+	struct rte_eventdev *dev;
+	struct rte_event_dev_info info;
+	int diag;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+	if (dev->data->dev_started) {
+		RTE_EDEV_LOG_ERR(
+		    "device %d must be stopped to allow configuration", dev_id);
+		return -EBUSY;
+	}
+
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	(*dev->dev_ops->dev_infos_get)(dev, &info);
+
+	/* Check dequeue_timeout_ns value is in limit */
+	if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+			|| dev_conf->dequeue_timeout_ns >
+				 info.max_dequeue_timeout_ns) {
+			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
+			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
+			dev_id, dev_conf->dequeue_timeout_ns,
+			info.min_dequeue_timeout_ns,
+			info.max_dequeue_timeout_ns);
+			return -EINVAL;
+		}
+	}
+
+	/* Check nb_events_limit is in limit */
+	if (dev_conf->nb_events_limit > info.max_num_events) {
+		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
+		dev_id, dev_conf->nb_events_limit, info.max_num_events);
+		return -EINVAL;
+	}
+
+	/* Check nb_event_queues is in limit */
+	if (!dev_conf->nb_event_queues) {
+		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
+					dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_queues > info.max_event_queues) {
+		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
+		dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+		return -EINVAL;
+	}
+
+	/* Check nb_event_ports is in limit */
+	if (!dev_conf->nb_event_ports) {
+		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_ports > info.max_event_ports) {
+		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
+		dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+		return -EINVAL;
+	}
+
+	/* Check nb_event_queue_flows is in limit */
+	if (!dev_conf->nb_event_queue_flows) {
+		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
+		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
+		dev_id, dev_conf->nb_event_queue_flows,
+		info.max_event_queue_flows);
+		return -EINVAL;
+	}
+
+	/* Check nb_event_port_dequeue_depth is in limit */
+	if (!dev_conf->nb_event_port_dequeue_depth) {
+		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
+					dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_port_dequeue_depth >
+			 info.max_event_port_dequeue_depth) {
+		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
+		dev_id, dev_conf->nb_event_port_dequeue_depth,
+		info.max_event_port_dequeue_depth);
+		return -EINVAL;
+	}
+
+	/* Check nb_event_port_enqueue_depth is in limit */
+	if (!dev_conf->nb_event_port_enqueue_depth) {
+		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
+					dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_event_port_enqueue_depth >
+			 info.max_event_port_enqueue_depth) {
+		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
+		dev_id, dev_conf->nb_event_port_enqueue_depth,
+		info.max_event_port_enqueue_depth);
+		return -EINVAL;
+	}
+
+	/* Copy the dev_conf parameter into the dev structure */
+	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+	/* Setup new number of queues and reconfigure device. */
+	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+	if (diag != 0) {
+		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
+				dev_id, diag);
+		return diag;
+	}
+
+	/* Setup new number of ports and reconfigure device. */
+	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+	if (diag != 0) {
+		rte_event_dev_queue_config(dev, 0);
+		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
+				dev_id, diag);
+		return diag;
+	}
+
+	/* Configure the device */
+	diag = (*dev->dev_ops->dev_configure)(dev);
+	if (diag != 0) {
+		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+		rte_event_dev_queue_config(dev, 0);
+		rte_event_dev_port_config(dev, 0);
+	}
+
+	dev->data->event_dev_cap = info.event_dev_cap;
+	return diag;
+}
+
+static inline int
+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
+{
+	if (queue_id < dev->data->nb_queues && queue_id <
+				RTE_EVENT_MAX_QUEUES_PER_DEV)
+		return 1;
+	else
+		return 0;
+}
+
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+				 struct rte_event_queue_conf *queue_conf)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	if (queue_conf == NULL)
+		return -EINVAL;
+
+	if (!is_valid_queue(dev, queue_id)) {
+		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
+	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+	(*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
+	return 0;
+}
+
+static inline int
+is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+	if (queue_conf && (
+		((queue_conf->event_queue_cfg &
+			RTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)
+			== RTE_EVENT_QUEUE_CFG_FLAG_ALL_TYPES) ||
+		((queue_conf->event_queue_cfg &
+			RTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)
+			== RTE_EVENT_QUEUE_CFG_FLAG_ATOMIC_ONLY)
+		))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int
+is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+	if (queue_conf && (
+		((queue_conf->event_queue_cfg &
+			RTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)
+			== RTE_EVENT_QUEUE_CFG_FLAG_ALL_TYPES) ||
+		((queue_conf->event_queue_cfg &
+			RTE_EVENT_QUEUE_CFG_FLAG_TYPE_MASK)
+			== RTE_EVENT_QUEUE_CFG_FLAG_ORDERED_ONLY)
+		))
+		return 1;
+	else
+		return 0;
+}
+
+
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+		      const struct rte_event_queue_conf *queue_conf)
+{
+	struct rte_eventdev *dev;
+	struct rte_event_queue_conf def_conf;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	if (!is_valid_queue(dev, queue_id)) {
+		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+		return -EINVAL;
+	}
+
+	/* Check nb_atomic_flows limit */
+	if (is_valid_atomic_queue_conf(queue_conf)) {
+		if (queue_conf->nb_atomic_flows == 0 ||
+		    queue_conf->nb_atomic_flows >
+			dev->data->dev_conf.nb_event_queue_flows) {
+			RTE_EDEV_LOG_ERR(
+		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
+			dev_id, queue_id, queue_conf->nb_atomic_flows,
+			dev->data->dev_conf.nb_event_queue_flows);
+			return -EINVAL;
+		}
+	}
+
+	/* Check nb_atomic_order_sequences limit */
+	if (is_valid_ordered_queue_conf(queue_conf)) {
+		if (queue_conf->nb_atomic_order_sequences == 0 ||
+		    queue_conf->nb_atomic_order_sequences >
+			dev->data->dev_conf.nb_event_queue_flows) {
+			RTE_EDEV_LOG_ERR(
+		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
+			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
+			dev->data->dev_conf.nb_event_queue_flows);
+			return -EINVAL;
+		}
+	}
+
+	if (dev->data->dev_started) {
+		RTE_EDEV_LOG_ERR(
+		    "device %d must be stopped to allow queue setup", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
+
+	if (queue_conf == NULL) {
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
+					-ENOTSUP);
+		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
+		def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_FLAG_DEFAULT;
+		queue_conf = &def_conf;
+	}
+
+	dev->data->queues_prio[queue_id] = queue_conf->priority;
+	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
+}
+
+uint8_t
+rte_event_queue_count(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+	return dev->data->nb_queues;
+}
+
+uint8_t
+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+	if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_FLAG_QUEUE_QOS)
+		return dev->data->queues_prio[queue_id];
+	else
+		return RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static inline int
+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
+{
+	if (port_id < dev->data->nb_ports)
+		return 1;
+	else
+		return 0;
+}
+
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+				 struct rte_event_port_conf *port_conf)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	if (port_conf == NULL)
+		return -EINVAL;
+
+	if (!is_valid_port(dev, port_id)) {
+		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
+	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
+	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
+	return 0;
+}
+
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+		     const struct rte_event_port_conf *port_conf)
+{
+	struct rte_eventdev *dev;
+	struct rte_event_port_conf def_conf;
+	int diag;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+
+	if (!is_valid_port(dev, port_id)) {
+		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+		return -EINVAL;
+	}
+
+	/* Check new_event_threshold limit */
+	if ((port_conf && !port_conf->new_event_threshold) ||
+			(port_conf && port_conf->new_event_threshold >
+				 dev->data->dev_conf.nb_events_limit)) {
+		RTE_EDEV_LOG_ERR(
+		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
+			dev_id, port_id, port_conf->new_event_threshold,
+			dev->data->dev_conf.nb_events_limit);
+		return -EINVAL;
+	}
+
+	/* Check dequeue_depth limit */
+	if ((port_conf && !port_conf->dequeue_depth) ||
+			(port_conf && port_conf->dequeue_depth >
+		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
+		RTE_EDEV_LOG_ERR(
+		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
+			dev_id, port_id, port_conf->dequeue_depth,
+			dev->data->dev_conf.nb_event_port_dequeue_depth);
+		return -EINVAL;
+	}
+
+	/* Check enqueue_depth limit */
+	if ((port_conf && !port_conf->enqueue_depth) ||
+			(port_conf && port_conf->enqueue_depth >
+		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
+		RTE_EDEV_LOG_ERR(
+		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
+			dev_id, port_id, port_conf->enqueue_depth,
+			dev->data->dev_conf.nb_event_port_enqueue_depth);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started) {
+		RTE_EDEV_LOG_ERR(
+		    "device %d must be stopped to allow port setup", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
+
+	if (port_conf == NULL) {
+		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
+					-ENOTSUP);
+		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
+		port_conf = &def_conf;
+	}
+
+	dev->data->ports_dequeue_depth[port_id] =
+			port_conf->dequeue_depth;
+	dev->data->ports_enqueue_depth[port_id] =
+			port_conf->enqueue_depth;
+
+	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
+
+	/* Unlink all the queues from this port(default state after setup) */
+	if (!diag)
+		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
+
+	if (diag < 0)
+		return diag;
+
+	return 0;
+}
+
+uint8_t
+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+	return dev->data->ports_dequeue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+	return dev->data->ports_enqueue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_count(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	dev = &rte_eventdevs[dev_id];
+	return dev->data->nb_ports;
+}
+
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+		    const struct rte_event_queue_link link[],
+		    uint16_t nb_links)
+{
+	struct rte_eventdev *dev;
+	struct rte_event_queue_link all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	uint16_t *links_map;
+	int i, diag;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
+
+	if (!is_valid_port(dev, port_id)) {
+		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+		return -EINVAL;
+	}
+
+	if (link == NULL) {
+		for (i = 0; i < dev->data->nb_queues; i++) {
+			all_queues[i].queue_id = i;
+			all_queues[i].priority =
+				RTE_EVENT_DEV_PRIORITY_NORMAL;
+		}
+		link = all_queues;
+		nb_links = dev->data->nb_queues;
+	}
+
+	for (i = 0; i < nb_links; i++)
+		if (link[i].queue_id >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+			return -EINVAL;
+
+	diag = (*dev->dev_ops->port_link)(dev->data->ports[port_id], link,
+						 nb_links);
+	if (diag < 0)
+		return diag;
+
+	links_map = dev->data->links_map;
+	/* Point links_map to this port specific area */
+	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+	for (i = 0; i < diag; i++)
+		links_map[link[i].queue_id] = (uint8_t)link[i].priority;
+
+	return diag;
+}
+
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+		      uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct rte_eventdev *dev;
+	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+	int i, diag;
+	uint16_t *links_map;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
+
+	if (!is_valid_port(dev, port_id)) {
+		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+		return -EINVAL;
+	}
+
+	if (queues == NULL) {
+		for (i = 0; i < dev->data->nb_queues; i++)
+			all_queues[i] = i;
+		queues = all_queues;
+		nb_unlinks = dev->data->nb_queues;
+	}
+
+	for (i = 0; i < nb_unlinks; i++)
+		if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV)
+			return -EINVAL;
+
+	diag = (*dev->dev_ops->port_unlink)(dev->data->ports[port_id], queues,
+					nb_unlinks);
+
+	if (diag < 0)
+		return diag;
+
+	links_map = dev->data->links_map;
+	/* Point links_map to this port specific area */
+	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+	for (i = 0; i < diag; i++)
+		links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+
+	return diag;
+}
+
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+			struct rte_event_queue_link link[])
+{
+	struct rte_eventdev *dev;
+	uint16_t *links_map;
+	int i, count = 0;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	if (!is_valid_port(dev, port_id)) {
+		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+		return -EINVAL;
+	}
+
+	links_map = dev->data->links_map;
+	/* Point links_map to this port specific area */
+	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+			link[count].queue_id = i;
+			link[count].priority = (uint8_t)links_map[i];
+			++count;
+		}
+	}
+	return count;
+}
+
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+				 uint64_t *timeout_ticks)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
+
+	if (timeout_ticks == NULL)
+		return -EINVAL;
+
+	(*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
+	return 0;
+}
+
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
+
+	(*dev->dev_ops->dump)(dev, f);
+	return 0;
+
+}
+
+int
+rte_event_dev_start(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+	int diag;
+
+	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+	if (dev->data->dev_started != 0) {
+		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
+			dev_id);
+		return 0;
+	}
+
+	diag = (*dev->dev_ops->dev_start)(dev);
+	if (diag == 0)
+		dev->data->dev_started = 1;
+	else
+		return diag;
+
+	return 0;
+}
+
+void
+rte_event_dev_stop(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
+
+	RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+	if (dev->data->dev_started == 0) {
+		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
+			dev_id);
+		return;
+	}
+
+	dev->data->dev_started = 0;
+	(*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_event_dev_close(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_eventdevs[dev_id];
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
+				dev_id);
+		return -EBUSY;
+	}
+
+	return (*dev->dev_ops->dev_close)(dev);
+}
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index 451bb5d..cefca98 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -970,6 +970,8 @@ struct rte_event {
 	};
 };
 
+
+struct rte_eventdev_driver;
 struct rte_eventdev_ops;
 struct rte_eventdev;
 
@@ -991,6 +993,51 @@ typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
 /**< @internal Dequeue burst of events from port of a device */
 
+#define RTE_EVENTDEV_NAME_MAX_LEN	(64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+	int socket_id;
+	/**< Socket ID where memory is allocated */
+	uint8_t dev_id;
+	/**< Device ID for this instance */
+	uint8_t nb_queues;
+	/**< Number of event queues. */
+	uint8_t nb_ports;
+	/**< Number of event ports. */
+	void **ports;
+	/**< Array of pointers to ports. */
+	uint8_t *ports_dequeue_depth;
+	/**< Array of port dequeue depth. */
+	uint8_t *ports_enqueue_depth;
+	/**< Array of port enqueue depth. */
+	void **queues;
+	/**< Array of pointers to queues. */
+	uint8_t *queues_prio;
+	/**< Array of queue priority. */
+	uint16_t *links_map;
+	/**< Memory to store queues to port connections. */
+	void *dev_private;
+	/**< PMD-specific private data */
+	uint32_t event_dev_cap;
+	/**< Event device capabilities(RTE_EVENT_DEV_CAP_FLAG)*/
+	struct rte_event_dev_config dev_conf;
+	/**< Configuration applied to device. */
+
+	RTE_STD_C11
+	uint8_t dev_started : 1;
+	/**< Device state: STARTED(1)/STOPPED(0) */
+
+	char name[RTE_EVENTDEV_NAME_MAX_LEN];
+	/**< Unique identifier name */
+} __rte_cache_aligned;
 
 /** @internal The data structure associated with each event device. */
 struct rte_eventdev {
@@ -1005,8 +1052,23 @@ struct rte_eventdev {
 	event_dequeue_burst_t dequeue_burst;
 	/**< Pointer to PMD dequeue burst function. */
 
+	struct rte_eventdev_data *data;
+	/**< Pointer to device data */
+	const struct rte_eventdev_ops *dev_ops;
+	/**< Functions exported by PMD */
+	struct rte_pci_device *pci_dev;
+	/**< PCI info. supplied by probing */
+	const struct rte_eventdev_driver *driver;
+	/**< Driver for this device */
+
+	RTE_STD_C11
+	uint8_t attached : 1;
+	/**< Flag indicating the device is attached */
 } __rte_cache_aligned;
 
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
 
 /**
  * Schedule one or more events in the event dev.
@@ -1017,8 +1079,13 @@ struct rte_eventdev {
  * @param dev_id
  *   The identifier of the device.
  */
-void
-rte_event_schedule(uint8_t dev_id);
+static inline void
+rte_event_schedule(uint8_t dev_id)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+	if (*dev->schedule)
+		(*dev->schedule)(dev);
+}
 
 /**
  * Enqueue a burst of events objects or an event object supplied in *rte_event*
@@ -1053,9 +1120,23 @@ rte_event_schedule(uint8_t dev_id);
  *
  * @see rte_event_port_enqueue_depth()
  */
-uint16_t
+static inline uint16_t
 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events);
+			uint16_t nb_events)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->enqueue)(
+			dev->data->ports[port_id], ev);
+	else
+		return (*dev->enqueue_burst)(
+			dev->data->ports[port_id], ev, nb_events);
+}
 
 /**
  * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
@@ -1147,9 +1228,24 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
  *
  * @see rte_event_port_dequeue_depth()
  */
-uint16_t
+static inline uint16_t
 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
-			uint16_t nb_events, uint64_t timeout_ticks);
+			uint16_t nb_events, uint64_t timeout_ticks)
+{
+	struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+	/*
+	 * Allow zero cost non burst mode routine invocation if application
+	 * requests nb_events as const one
+	 */
+	if (nb_events == 1)
+		return (*dev->dequeue)(
+			dev->data->ports[port_id], ev, timeout_ticks);
+	else
+		return (*dev->dequeue_burst)(
+			dev->data->ports[port_id], ev, nb_events,
+				timeout_ticks);
+}
 
 /** Structure to hold the queue to port link establishment attributes */
 struct rte_event_queue_link {
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 0b04ab7..7d94031 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -44,8 +44,117 @@
 extern "C" {
 #endif
 
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
 #include "rte_eventdev.h"
 
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_PMD_DEBUG_TRACE(...) \
+	rte_pmd_debug_trace(__func__, __VA_ARGS__)
+#else
+#define RTE_PMD_DEBUG_TRACE(...)
+#endif
+
+/* Logging Macros */
+#define RTE_EDEV_LOG_ERR(fmt, args...) \
+	RTE_LOG(ERR, EVENTDEV, "%s() line %u: " fmt "\n",  \
+			__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_EDEV_LOG_DEBUG(fmt, args...) \
+	RTE_LOG(DEBUG, EVENTDEV, "%s() line %u: " fmt "\n",  \
+			__func__, __LINE__, ## args)
+#else
+#define RTE_EDEV_LOG_DEBUG(fmt, args...) (void)0
+#endif
+
+/* Macros to check for valid device */
+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+		RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
+	if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+		RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+		return; \
+	} \
+} while (0)
+
+#define RTE_EVENTDEV_DETACHED  (0)
+#define RTE_EVENTDEV_ATTACHED  (1)
+
+/** Global structure used for maintaining state of allocated event devices */
+struct rte_eventdev_global {
+	uint8_t nb_devs;	/**< Number of devices found */
+	uint8_t max_devs;	/**< Max number of devices */
+};
+
+extern struct rte_eventdev_global *rte_eventdev_globals;
+/** Pointer to global event devices data structure. */
+extern struct rte_eventdev *rte_eventdevs;
+/** The pool of rte_eventdev structures. */
+
+/**
+ * Get the rte_eventdev structure device pointer for the named device.
+ *
+ * @param name
+ *   device name to select the device structure.
+ *
+ * @return
+ *   - The rte_eventdev structure pointer for the given device ID.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_get_named_dev(const char *name)
+{
+	struct rte_eventdev *dev;
+	unsigned int i;
+
+	if (name == NULL)
+		return NULL;
+
+	for (i = 0, dev = &rte_eventdevs[i];
+			i < rte_eventdev_globals->max_devs; i++) {
+		if ((dev->attached == RTE_EVENTDEV_ATTACHED) &&
+				(strcmp(dev->data->name, name) == 0))
+			return dev;
+	}
+
+	return NULL;
+}
+
+/**
+ * Validate if the event device index is valid attached event device.
+ *
+ * @param dev_id
+ *   Event device index.
+ *
+ * @return
+ *   - If the device index is valid (1) or not (0).
+ */
+static inline unsigned
+rte_event_pmd_is_valid_dev(uint8_t dev_id)
+{
+	struct rte_eventdev *dev;
+
+	if (dev_id >= rte_eventdev_globals->nb_devs)
+		return 0;
+
+	dev = &rte_eventdevs[dev_id];
+	if (dev->attached != RTE_EVENTDEV_ATTACHED)
+		return 0;
+	else
+		return 1;
+}
+
 /**
  * Definitions of all functions exported by a driver through the
  * the generic structure of type *event_dev_ops* supplied in the
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
new file mode 100644
index 0000000..3cae03d
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -0,0 +1,33 @@
+DPDK_17.02 {
+	global:
+
+	rte_eventdevs;
+
+	rte_event_dev_count;
+	rte_event_dev_get_dev_id;
+	rte_event_dev_socket_id;
+	rte_event_dev_info_get;
+	rte_event_dev_configure;
+	rte_event_dev_start;
+	rte_event_dev_stop;
+	rte_event_dev_close;
+	rte_event_dev_dump;
+
+	rte_event_port_default_conf_get;
+	rte_event_port_setup;
+	rte_event_port_dequeue_depth;
+	rte_event_port_enqueue_depth;
+	rte_event_port_count;
+	rte_event_port_link;
+	rte_event_port_unlink;
+	rte_event_port_links_get;
+
+	rte_event_queue_default_conf_get;
+	rte_event_queue_setup;
+	rte_event_queue_count;
+	rte_event_queue_priority;
+
+	rte_event_dequeue_timeout_ticks;
+
+	local: *;
+};
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index f75f0e2..716725a 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -93,6 +93,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF)           += -lrte_mbuf
 _LDLIBS-$(CONFIG_RTE_LIBRTE_NET)            += -lrte_net
 _LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER)          += -lrte_ethdev
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV)      += -lrte_cryptodev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV)       += -lrte_eventdev
 _LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL)        += -lrte_mempool
 _LDLIBS-$(CONFIG_RTE_LIBRTE_RING)           += -lrte_ring
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrte_eal
-- 
2.5.5

  parent reply	other threads:[~2016-12-06  3:52 UTC|newest]

Thread overview: 109+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-18  5:44 [PATCH 0/4] libeventdev API and northbound implementation Jerin Jacob
2016-11-18  5:44 ` [PATCH 1/4] eventdev: introduce event driven programming model Jerin Jacob
2016-11-23 18:39   ` Thomas Monjalon
2016-11-24  1:59     ` Jerin Jacob
2016-11-24 12:26       ` Bruce Richardson
2016-11-24 15:35       ` Thomas Monjalon
2016-11-25  0:23         ` Jerin Jacob
2016-11-25 11:00           ` Bruce Richardson
2016-11-25 13:09             ` Thomas Monjalon
2016-11-26  0:57               ` Jerin Jacob
2016-11-28  9:10                 ` Bruce Richardson
2016-11-26  2:54             ` Jerin Jacob
2016-11-28  9:16               ` Bruce Richardson
2016-11-28 11:30                 ` Thomas Monjalon
2016-11-29  4:01                 ` Jerin Jacob
2016-11-29 10:00                   ` Bruce Richardson
2016-11-25 11:59           ` Van Haaren, Harry
2016-11-25 12:09             ` Richardson, Bruce
2016-11-24 16:24   ` Bruce Richardson
2016-11-24 19:30     ` Jerin Jacob
2016-12-06  3:52   ` [PATCH v2 0/6] libeventdev API and northbound implementation Jerin Jacob
2016-12-06  3:52     ` [PATCH v2 1/6] eventdev: introduce event driven programming model Jerin Jacob
2016-12-06 16:51       ` Bruce Richardson
2016-12-07 18:53         ` Jerin Jacob
2016-12-08  9:30           ` Bruce Richardson
2016-12-08 20:41             ` Jerin Jacob
2016-12-09 15:11               ` Bruce Richardson
2016-12-14  6:55                 ` Jerin Jacob
2016-12-07 10:57       ` Van Haaren, Harry
2016-12-08  1:24         ` Jerin Jacob
2016-12-08 11:02           ` Van Haaren, Harry
2016-12-14 13:13             ` Jerin Jacob
2016-12-14 15:15               ` Bruce Richardson
2016-12-15 16:54               ` Van Haaren, Harry
2016-12-07 11:12       ` Bruce Richardson
2016-12-08  1:48         ` Jerin Jacob
2016-12-08  9:57           ` Bruce Richardson
2016-12-14  6:40             ` Jerin Jacob
2016-12-14 15:19       ` Bruce Richardson
2016-12-15 13:39         ` Jerin Jacob
2016-12-06  3:52     ` [PATCH v2 2/6] eventdev: define southbound driver interface Jerin Jacob
2016-12-06  3:52     ` Jerin Jacob [this message]
2016-12-06 17:17       ` [PATCH v2 3/6] eventdev: implement the northbound APIs Bruce Richardson
2016-12-07 17:02         ` Jerin Jacob
2016-12-08  9:59           ` Bruce Richardson
2016-12-14  6:28             ` Jerin Jacob
2016-12-06  3:52     ` [PATCH v2 4/6] eventdev: implement PMD registration functions Jerin Jacob
2016-12-06  3:52     ` [PATCH v2 5/6] event/skeleton: add skeleton eventdev driver Jerin Jacob
2016-12-06  3:52     ` [PATCH v2 6/6] app/test: unit test case for eventdev APIs Jerin Jacob
2016-12-06 16:46     ` [PATCH v2 0/6] libeventdev API and northbound implementation Bruce Richardson
2016-12-21  9:25     ` [PATCH v4 " Jerin Jacob
2016-12-21  9:25       ` [PATCH v4 1/6] eventdev: introduce event driven programming model Jerin Jacob
2017-01-25 16:32         ` Eads, Gage
2017-01-25 16:36           ` Richardson, Bruce
2017-01-25 16:53             ` Eads, Gage
2017-01-25 22:36               ` Eads, Gage
2017-01-26  9:39                 ` Jerin Jacob
2017-01-26 20:39                   ` Eads, Gage
2017-01-27 10:03                     ` Bruce Richardson
2017-01-30 10:42                     ` Jerin Jacob
2017-02-02 11:18         ` Nipun Gupta
2017-02-02 14:09           ` Jerin Jacob
2017-02-03  6:38             ` Nipun Gupta
2017-02-03 10:58               ` Hemant Agrawal
2017-02-07  4:59                 ` Jerin Jacob
2016-12-21  9:25       ` [PATCH v4 2/6] eventdev: define southbound driver interface Jerin Jacob
2017-02-02 11:19         ` Nipun Gupta
2017-02-02 11:34           ` Bruce Richardson
2017-02-02 12:53             ` Nipun Gupta
2017-02-02 13:58               ` Bruce Richardson
2017-02-03  5:59                 ` Nipun Gupta
2016-12-21  9:25       ` [PATCH v4 3/6] eventdev: implement the northbound APIs Jerin Jacob
2017-02-02 11:19         ` Nipun Gupta
2017-02-02 14:32           ` Jerin Jacob
2017-02-03  6:59             ` Nipun Gupta
2016-12-21  9:25       ` [PATCH v4 4/6] eventdev: implement PMD registration functions Jerin Jacob
2017-02-02 11:20         ` Nipun Gupta
2017-02-05 13:04           ` Jerin Jacob
2016-12-21  9:25       ` [PATCH v4 5/6] event/skeleton: add skeleton eventdev driver Jerin Jacob
2016-12-21  9:25       ` [PATCH v4 6/6] app/test: unit test case for eventdev APIs Jerin Jacob
2016-11-18  5:45 ` [PATCH 2/4] eventdev: implement the northbound APIs Jerin Jacob
2016-11-21 17:45   ` Eads, Gage
2016-11-21 19:13     ` Jerin Jacob
2016-11-21 19:31       ` Jerin Jacob
2016-11-22 15:15         ` Eads, Gage
2016-11-22 18:19           ` Jerin Jacob
2016-11-22 19:43             ` Eads, Gage
2016-11-22 20:00               ` Jerin Jacob
2016-11-22 22:48                 ` Eads, Gage
2016-11-22 23:43                   ` Jerin Jacob
2016-11-28 15:53                     ` Eads, Gage
2016-11-29  2:01                       ` Jerin Jacob
2016-11-29  3:43                       ` Jerin Jacob
2016-11-29  5:46                         ` Eads, Gage
2016-11-23  9:57           ` Bruce Richardson
2016-11-23 19:18   ` Thomas Monjalon
2016-11-25  4:17     ` Jerin Jacob
2016-11-25  9:55       ` Richardson, Bruce
2016-11-25 23:08         ` Jerin Jacob
2016-11-18  5:45 ` [PATCH 3/4] event/skeleton: add skeleton eventdev driver Jerin Jacob
2016-11-18  5:45 ` [PATCH 4/4] app/test: unit test case for eventdev APIs Jerin Jacob
2016-11-18 15:25 ` [PATCH 0/4] libeventdev API and northbound implementation Bruce Richardson
2016-11-18 16:04   ` Bruce Richardson
2016-11-18 19:27     ` Jerin Jacob
2016-11-21  9:40       ` Thomas Monjalon
2016-11-21  9:57         ` Bruce Richardson
2016-11-22  0:11           ` Thomas Monjalon
2016-11-22  2:00       ` Yuanhan Liu
2016-11-22  9:05         ` Shreyansh Jain

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1480996340-29871-4-git-send-email-jerin.jacob@caviumnetworks.com \
    --to=jerin.jacob@caviumnetworks.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=thomas.monjalon@6wind.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.