All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jasvinder Singh <jasvinder.singh@intel.com>
To: dev@dpdk.org
Cc: cristian.dumitrescu@intel.com, ferruh.yigit@intel.com,
	hemant.agrawal@nxp.com, Jerin.JacobKollanukkaran@cavium.com,
	wenzhuo.lu@intel.com
Subject: [PATCH v2 2/2] net/softnic: add traffic management ops
Date: Mon, 26 Jun 2017 17:43:34 +0100	[thread overview]
Message-ID: <20170626164334.50621-3-jasvinder.singh@intel.com> (raw)
In-Reply-To: <20170626164334.50621-1-jasvinder.singh@intel.com>

The traffic management specific functions of the softnic driver are
supplied through set of pointers contained in the generic structure
of type 'rte_tm_ops'. These functions help to build and manage the
hierarchical QoS scheduler for traffic management.

Signed-off-by: Jasvinder Singh <jasvinder.singh@intel.com>
Signed-off-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
---
v2 changes:
- add TM functions for hierarchical QoS scheduler

 drivers/net/softnic/Makefile                    |    1 +
 drivers/net/softnic/rte_eth_softnic.c           |   48 +-
 drivers/net/softnic/rte_eth_softnic_internals.h |   81 ++
 drivers/net/softnic/rte_eth_softnic_tm.c        | 1145 +++++++++++++++++++++++
 4 files changed, 1274 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/softnic/rte_eth_softnic_tm.c

diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index 809112c..e59766d 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -47,6 +47,7 @@ LIBABIVER := 1
 # all source are stored in SRCS-y
 #
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_default.c
 
 #
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index d4ac100..24abb8e 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -41,6 +41,8 @@
 #include <rte_vdev.h>
 #include <rte_kvargs.h>
 #include <rte_errno.h>
+#include <rte_tm_driver.h>
+#include <rte_sched.h>
 
 #include "rte_eth_softnic.h"
 #include "rte_eth_softnic_internals.h"
@@ -59,6 +61,10 @@ static const char *pmd_valid_args[] = {
 static struct rte_vdev_driver pmd_drv;
 static struct rte_device *device;
 
+#ifndef TM
+#define TM						0
+#endif
+
 static int
 pmd_eth_dev_configure(struct rte_eth_dev *dev)
 {
@@ -114,6 +120,14 @@ pmd_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct pmd_internals *p = dev->data->dev_private;
 
+#if TM
+	/* Initialize the Traffic Manager for the overlay device */
+	int status = tm_init(p);
+
+	if (status)
+		return status;
+#endif
+
 	/* Clone dev->data from underlay to overlay */
 	memcpy(dev->data->mac_pool_sel,
 		p->udev->data->mac_pool_sel,
@@ -132,6 +146,11 @@ pmd_eth_dev_stop(struct rte_eth_dev *dev)
 
 	/* Call the current function for the underlay device */
 	rte_eth_dev_stop(p->uport_id);
+
+#if TM
+	/* Free the Traffic Manager for the overlay device */
+	tm_free(p);
+#endif
 }
 
 static void
@@ -247,6 +266,14 @@ pmd_eth_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 	rte_eth_dev_mac_addr_remove(p->uport_id, &dev->data->mac_addrs[index]);
 }
 
+static int
+pmd_eth_dev_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+	*(const struct rte_tm_ops **)arg = &pmd_tm_ops;
+
+	return 0;
+}
+
 static uint16_t
 pmd_eth_dev_tx_burst(void *txq,
 	struct rte_mbuf **tx_pkts,
@@ -254,12 +281,30 @@ pmd_eth_dev_tx_burst(void *txq,
 {
 	struct pmd_internals *p = txq;
 
+#if TM
+	rte_sched_port_enqueue(p->sched, tx_pkts, nb_pkts);
+	rte_eth_softnic_run(p->oport_id);
+	return nb_pkts;
+#else
 	return rte_eth_tx_burst(p->uport_id, p->txq_id, tx_pkts, nb_pkts);
+#endif
 }
 
 int
-rte_eth_softnic_run(uint8_t port_id __rte_unused)
+rte_eth_softnic_run(uint8_t port_id)
 {
+	struct rte_eth_dev *odev = &rte_eth_devices[port_id];
+	struct pmd_internals *p = odev->data->dev_private;
+	uint32_t n_pkts, n_pkts_deq;
+
+	n_pkts_deq = rte_sched_port_dequeue(p->sched, p->pkts, p->deq_bsz);
+
+	for (n_pkts = 0; n_pkts < n_pkts_deq;)
+		n_pkts += rte_eth_tx_burst(p->uport_id,
+			p->txq_id,
+			&p->pkts[n_pkts],
+			(uint16_t)(n_pkts_deq - n_pkts));
+
 	return 0;
 }
 
@@ -284,6 +329,7 @@ pmd_ops_build(struct eth_dev_ops *o, const struct eth_dev_ops *u)
 	o->mac_addr_set = pmd_eth_dev_mac_addr_set;
 	o->mac_addr_add = pmd_eth_dev_mac_addr_add;
 	o->mac_addr_remove = pmd_eth_dev_mac_addr_remove;
+	o->tm_ops_get = pmd_eth_dev_tm_ops_get;
 }
 
 int
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index d456a54..5ca5121 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -38,9 +38,73 @@
 
 #include <rte_mbuf.h>
 #include <rte_ethdev.h>
+#include <rte_sched.h>
 
+#include <rte_tm_driver.h>
 #include "rte_eth_softnic.h"
 
+#ifndef TM_MAX_SUBPORTS
+#define TM_MAX_SUBPORTS					8
+#endif
+
+#ifndef TM_MAX_PIPES_PER_SUBPORT
+#define TM_MAX_PIPES_PER_SUBPORT		4096
+#endif
+
+#ifndef TM_MAX_QUEUE_SIZE
+#define TM_MAX_QUEUE_SIZE				64
+#endif
+
+/* TM Shaper Profile. */
+struct tm_shaper_profile {
+	TAILQ_ENTRY(tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t shared_shaper_id;
+	uint32_t n_users;
+	struct rte_tm_shaper_params params;
+};
+
+/* TM Node */
+struct tm_node {
+	TAILQ_ENTRY(tm_node) node;
+	uint32_t id;
+	uint32_t priority;
+	uint32_t weight;
+	uint32_t level;
+	uint32_t n_child;
+	struct tm_node *parent_node;
+	struct tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(tm_nodes, tm_node);
+TAILQ_HEAD(tm_shaper_profiles, tm_shaper_profile);
+
+/* TM node levels */
+enum tm_node_level {
+	TM_NODE_LEVEL_PORT = 0,
+	TM_NODE_LEVEL_SUBPORT,
+	TM_NODE_LEVEL_PIPE,
+	TM_NODE_LEVEL_TC,
+	TM_NODE_LEVEL_QUEUE,
+	TM_NODE_LEVEL_MAX,
+};
+
+/* TM Configuration */
+struct tm_conf {
+	struct tm_shaper_profiles shaper_profiles;	/*< TM shaper profile */
+	struct tm_nodes tm_nodes;	/*< TM nodes */
+	uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];	/*< TM nodes per level */
+};
+
+struct tm_params {
+	struct rte_sched_port_params port_params;
+	struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
+	struct rte_sched_pipe_params
+		pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+	int pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
+};
+
 struct pmd_internals {
 	/* Devices */
 	struct rte_eth_dev *odev;
@@ -54,10 +118,27 @@ struct pmd_internals {
 
 	/* Operation */
 	struct rte_mbuf *pkts[RTE_ETH_SOFTNIC_DEQ_BSZ_MAX];
+	struct tm_params tm_params;
+	struct rte_sched_port *sched;
+	struct tm_conf tm_conf;
 	uint32_t deq_bsz;
 	uint32_t txq_id;
 };
 
+extern const struct rte_tm_ops pmd_tm_ops;
+
+void
+tm_conf_init(struct rte_eth_dev *dev);
+
+void
+tm_conf_uninit(struct rte_eth_dev *dev);
+
+int
+tm_init(struct pmd_internals *p);
+
+void
+tm_free(struct pmd_internals *p);
+
 void
 pmd_ops_inherit(struct eth_dev_ops *o, const struct eth_dev_ops *u);
 
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
new file mode 100644
index 0000000..7c55cfd
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -0,0 +1,1145 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_malloc.h>
+#include <rte_sched.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+void
+tm_conf_init(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+
+	/* Initialize shaper profiles list */
+	TAILQ_INIT(&p->tm_conf.shaper_profiles);
+
+	/* Initialize TM nodes */
+	TAILQ_INIT(&p->tm_conf.tm_nodes);
+
+	memset(p->tm_conf.n_tm_nodes, 0, TM_NODE_LEVEL_MAX);
+}
+
+void
+tm_conf_uninit(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profile *shaper_profile;
+	struct tm_node *tm_node;
+
+	/* Remove all tm shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&p->tm_conf.shaper_profiles))) {
+		TAILQ_REMOVE(&p->tm_conf.shaper_profiles,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
+
+	/* Remove all tm nodes*/
+	while ((tm_node =
+	       TAILQ_FIRST(&p->tm_conf.tm_nodes))) {
+		TAILQ_REMOVE(&p->tm_conf.tm_nodes,
+			     tm_node, node);
+		rte_free(tm_node);
+	}
+
+	memset(p->tm_conf.n_tm_nodes, 0, TM_NODE_LEVEL_MAX);
+}
+
+static struct tm_shaper_profile *
+tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profiles *shaper_profile_list =
+		&p->tm_conf.shaper_profiles;
+	struct tm_shaper_profile *sp;
+
+	TAILQ_FOREACH(sp, shaper_profile_list, node) {
+		if (shaper_profile_id == sp->shaper_profile_id)
+			return sp;
+	}
+
+	return NULL;
+}
+
+static int
+tm_shaper_profile_count(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profiles *shaper_profile_list =
+		&p->tm_conf.shaper_profiles;
+	struct tm_shaper_profile *sp;
+	int n_shapers = 0;
+
+	/* Private Shaper Profile */
+	TAILQ_FOREACH(sp, shaper_profile_list, node) {
+		if (sp->shared_shaper_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+			n_shapers++;
+	}
+
+	return n_shapers;
+}
+
+static int
+tm_shared_shaper_count(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profiles *shaper_profile_list =
+		&p->tm_conf.shaper_profiles;
+	struct tm_shaper_profile *sp;
+	int n_shapers = 0;
+
+	/* Shared Shaper */
+	TAILQ_FOREACH(sp, shaper_profile_list, node) {
+		if (sp->shared_shaper_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
+			n_shapers++;
+	}
+
+	return n_shapers;
+}
+
+static struct tm_shaper_profile *
+tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profiles *shaper_profile_list =
+		&p->tm_conf.shaper_profiles;
+	struct tm_shaper_profile *sp;
+
+	TAILQ_FOREACH(sp, shaper_profile_list, node) {
+		if (shared_shaper_id == sp->shared_shaper_id)
+			return sp;
+	}
+
+	return NULL;
+}
+
+static struct tm_node *
+tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_nodes *tm_nodes_list = &p->tm_conf.tm_nodes;
+	struct tm_node *tm_node;
+
+	TAILQ_FOREACH(tm_node, tm_nodes_list, node) {
+		if (tm_node->id == node_id)
+			return tm_node;
+	}
+
+	return NULL;
+}
+
+static int
+tm_node_get_child(struct rte_eth_dev *dev, uint32_t parent_id)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_nodes *tm_nodes_list = &p->tm_conf.tm_nodes;
+	struct tm_node *tm_node;
+	int n_child = 0;
+
+	TAILQ_FOREACH(tm_node, tm_nodes_list, node) {
+		if (tm_node->parent_node->id == parent_id)
+			n_child++;
+	}
+
+	return n_child;
+}
+
+static struct tm_node *
+tm_root_node_present(struct rte_eth_dev *dev)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_nodes *tm_nodes_list = &p->tm_conf.tm_nodes;
+	struct tm_node *tm_node;
+
+	TAILQ_FOREACH(tm_node, tm_nodes_list, node) {
+		if (tm_node->parent_node->id == RTE_TM_NODE_ID_NULL)
+			return tm_node;
+	}
+	return NULL;
+}
+
+int
+tm_init(struct pmd_internals *p)
+{
+	struct tm_params *t = &p->tm_params;
+	struct tm_nodes *tm_nodes_list = &p->tm_conf.tm_nodes;
+	uint32_t n_subports, subport_id, n_pipes;
+	struct tm_node *tm_node;
+	int status;
+
+	/* Port */
+	t->port_params.name = p->odev->data->name;
+	t->port_params.socket = p->udev->data->numa_node;
+	t->port_params.rate = p->udev->data->dev_link.link_speed;
+	t->port_params.mtu = p->udev->data->mtu;
+
+	p->sched = rte_sched_port_config(&t->port_params);
+	if (!p->sched)
+		return -EINVAL;
+
+	/* Subport */
+	n_subports = t->port_params.n_subports_per_port;
+	for (subport_id = 0; subport_id < n_subports; subport_id++) {
+		uint32_t n_pipes_per_subport
+			= t->port_params.n_pipes_per_subport;
+		uint32_t pipe_id;
+
+		status = rte_sched_subport_config(p->sched,
+				subport_id,
+				&t->subport_params[subport_id]);
+		if (status) {
+			rte_sched_port_free(p->sched);
+			return -EINVAL;
+		}
+
+		/* Pipe */
+		n_pipes = 0;
+		pipe_id = n_subports + 1;
+		for (; pipe_id < n_pipes_per_subport; pipe_id++) {
+			TAILQ_FOREACH(tm_node, tm_nodes_list, node) {
+				if (tm_node->parent_node->id == subport_id)
+				n_pipes++;
+			}
+
+			uint32_t pos = subport_id * n_pipes + pipe_id;
+			int profile_id = t->pipe_to_profile[pos];
+
+			if (profile_id < 0)
+				continue;
+
+			status = rte_sched_pipe_config(p->sched,
+				subport_id,
+				pipe_id,
+				profile_id);
+			if (status) {
+				rte_sched_port_free(p->sched);
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void
+tm_free(struct pmd_internals *p)
+{
+	if (p->sched)
+		rte_sched_port_free(p->sched);
+}
+
+/* Traffic manager node type get */
+static int
+pmd_tm_node_type_get(struct rte_eth_dev *dev __rte_unused,
+	uint32_t node_id,
+	int *is_leaf,
+	struct rte_tm_error *error)
+{
+	struct tm_node *tm_node;
+
+	if (!is_leaf || !error)
+		return -EINVAL;
+
+	/* Check: node id */
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id invalid!";
+		return -EINVAL;
+	}
+
+	tm_node = tm_node_search(dev, node_id);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node doesn't exists!";
+		return -EINVAL;
+	}
+
+	if (tm_node->n_child)
+		*is_leaf = 0;
+	else
+		*is_leaf = 1;
+
+	return 0;
+}
+
+/* Traffic manager capabilities get */
+static int
+pmd_tm_capabilities_get(struct rte_eth_dev *dev,
+	struct rte_tm_capabilities *cap,
+	struct rte_tm_error *error __rte_unused)
+{
+	uint64_t n_nodes_level1 = TM_MAX_SUBPORTS;
+	uint64_t n_nodes_level2 = n_nodes_level1 * TM_MAX_PIPES_PER_SUBPORT;
+	uint64_t n_nodes_level3 = n_nodes_level2 * RTE_SCHED_QUEUES_PER_PIPE;
+	struct pmd_internals *p = dev->data->dev_private;
+	uint32_t ls = p->udev->data->dev_link.link_speed;
+	int i;
+
+	if (!cap || !error)
+		return -EINVAL;
+
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	/* TM  capabilities */
+	cap->n_nodes_max = n_nodes_level1 + n_nodes_level2 + n_nodes_level3 + 1;
+	cap->n_levels_max = TM_NODE_LEVEL_MAX;
+	cap->non_leaf_nodes_identical = 0;
+	cap->leaf_nodes_identical = 1;
+	cap->shaper_n_max = n_nodes_level1 + n_nodes_level2;
+	cap->shaper_private_n_max = n_nodes_level2;
+	cap->shaper_private_dual_rate_n_max = 0;
+	cap->shaper_private_rate_min = 0;
+	cap->shaper_private_rate_max = (ls * 1000000) / 8;
+	cap->shaper_shared_n_max = n_nodes_level1;
+	cap->shaper_shared_n_nodes_per_shaper_max = TM_MAX_PIPES_PER_SUBPORT;
+	cap->shaper_shared_n_shapers_per_node_max = n_nodes_level1;
+	cap->shaper_shared_dual_rate_n_max = 0;
+	cap->shaper_shared_rate_min = 0;
+	cap->shaper_shared_rate_max = (ls * 1000000) / 8;
+	cap->shaper_pkt_length_adjust_min = 0;
+	cap->shaper_pkt_length_adjust_max = RTE_SCHED_FRAME_OVERHEAD_DEFAULT;
+	cap->sched_n_children_max = TM_MAX_PIPES_PER_SUBPORT;
+	cap->sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+	cap->sched_wfq_n_children_per_group_max = TM_MAX_PIPES_PER_SUBPORT;
+	cap->sched_wfq_n_groups_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+	cap->sched_wfq_weight_max = 1;
+	cap->cman_head_drop_supported = 0;
+	cap->cman_wred_context_n_max = n_nodes_level3;
+	cap->cman_wred_context_private_n_max = n_nodes_level3;
+	cap->cman_wred_context_shared_n_max = 0;
+	cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+	cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+
+	for (i = 0; i < RTE_TM_COLORS; i++) {
+		cap->mark_vlan_dei_supported[i] = 0;
+		cap->mark_ip_ecn_tcp_supported[i] = 0;
+		cap->mark_ip_ecn_sctp_supported[i] = 0;
+		cap->mark_ip_dscp_supported[i] = 0;
+	}
+
+	cap->dynamic_update_mask = 0;
+	cap->stats_mask = 0;
+
+	return 0;
+}
+
+/* Traffic manager level capabilities get */
+static int
+pmd_tm_level_capabilities_get(struct rte_eth_dev *dev,
+	uint32_t level_id,
+	struct rte_tm_level_capabilities *cap,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	uint32_t ls = p->udev->data->dev_link.link_speed;
+
+	if (!cap || !error)
+		return -EINVAL;
+
+	if (level_id >= TM_NODE_LEVEL_MAX) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "level id invalid!";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	if (level_id == TM_NODE_LEVEL_PORT) {
+		/* Root node */
+		cap->n_nodes_max = 1;
+		cap->n_nodes_nonleaf_max = 1;
+		cap->n_nodes_leaf_max = 0;
+		cap->non_leaf_nodes_identical = 1;
+		cap->leaf_nodes_identical = 0;
+		cap->nonleaf.shaper_private_supported = 1;
+		cap->nonleaf.shaper_private_dual_rate_supported = 0;
+		cap->nonleaf.shaper_private_rate_min = 0;
+
+		cap->nonleaf.shaper_private_rate_max = (ls * 1000000) / 8;
+		cap->nonleaf.shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max = TM_MAX_SUBPORTS;
+		cap->nonleaf.sched_sp_n_priorities_max = 0;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+		cap->nonleaf.sched_wfq_n_groups_max = 0;
+		cap->nonleaf.sched_wfq_weight_max = 0;
+		cap->nonleaf.stats_mask = 0;
+
+	} else if (level_id == TM_NODE_LEVEL_SUBPORT) {
+		/* Subport */
+		cap->n_nodes_max = TM_MAX_SUBPORTS;
+		cap->n_nodes_nonleaf_max = TM_MAX_SUBPORTS;
+		cap->n_nodes_leaf_max = 0;
+		cap->non_leaf_nodes_identical = 1;
+		cap->leaf_nodes_identical = 0;
+		cap->nonleaf.shaper_private_supported = 0;
+		cap->nonleaf.shaper_private_dual_rate_supported = 0;
+		cap->nonleaf.shaper_private_rate_min = 0;
+		cap->nonleaf.shaper_private_rate_max = (ls * 1000000) / 8;
+		cap->nonleaf.shaper_shared_n_max = 1;
+		cap->nonleaf.sched_n_children_max = TM_MAX_PIPES_PER_SUBPORT;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max
+			= TM_MAX_PIPES_PER_SUBPORT;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.stats_mask = 0;
+
+	} else if (level_id == TM_NODE_LEVEL_PIPE) {
+		/* Pipe */
+		cap->n_nodes_max
+			= TM_MAX_PIPES_PER_SUBPORT * TM_MAX_SUBPORTS;
+		cap->n_nodes_nonleaf_max
+			= TM_MAX_PIPES_PER_SUBPORT * TM_MAX_SUBPORTS;
+		cap->n_nodes_leaf_max = 0;
+		cap->non_leaf_nodes_identical = 1;
+		cap->leaf_nodes_identical = 0;
+		cap->nonleaf.shaper_private_supported = 1;
+		cap->nonleaf.shaper_private_dual_rate_supported = 0;
+		cap->nonleaf.shaper_private_rate_min = 0;
+		cap->nonleaf.shaper_private_rate_max
+			= (ls * 1000000) / (8 * TM_MAX_PIPES_PER_SUBPORT);
+		cap->nonleaf.shaper_shared_n_max = 1;
+		cap->nonleaf.sched_n_children_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.stats_mask = 0;
+
+	} else if (level_id == TM_NODE_LEVEL_TC) {
+		/* Traffic Class */
+		cap->n_nodes_max
+			= TM_MAX_SUBPORTS
+			* TM_MAX_PIPES_PER_SUBPORT
+			* RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->n_nodes_nonleaf_max
+			= TM_MAX_SUBPORTS
+			* TM_MAX_PIPES_PER_SUBPORT
+			* RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->n_nodes_leaf_max = 0;
+		cap->non_leaf_nodes_identical = 1;
+		cap->leaf_nodes_identical = 0;
+		cap->nonleaf.shaper_private_supported = 1;
+		cap->nonleaf.shaper_private_dual_rate_supported = 0;
+		cap->nonleaf.shaper_private_rate_min = 0;
+		cap->nonleaf.shaper_private_rate_max
+			= (ls * 1000000) / (8 * TM_MAX_PIPES_PER_SUBPORT);
+		cap->nonleaf.shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max = RTE_SCHED_QUEUES_PER_PIPE;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max
+			= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+		cap->nonleaf.stats_mask = 0;
+
+		} else {
+		/* TM Queues */
+		cap->n_nodes_max
+			= TM_MAX_SUBPORTS
+			* TM_MAX_PIPES_PER_SUBPORT
+			* RTE_SCHED_QUEUES_PER_PIPE;
+		cap->n_nodes_nonleaf_max = 0;
+		cap->n_nodes_leaf_max = cap->n_nodes_max;
+		cap->non_leaf_nodes_identical = 0;
+		cap->leaf_nodes_identical = 1;
+		cap->leaf.shaper_private_supported = 0;
+		cap->leaf.shaper_private_dual_rate_supported = 0;
+		cap->leaf.shaper_private_rate_min = 0;
+		cap->leaf.shaper_private_rate_max = 0;
+		cap->leaf.shaper_shared_n_max = 0;
+		cap->leaf.cman_head_drop_supported = 0;
+		cap->leaf.cman_wred_context_private_supported = 1;
+		cap->leaf.cman_wred_context_shared_n_max = 0;
+		cap->nonleaf.stats_mask = 0;
+	}
+	return 0;
+}
+
+/* Traffic manager node capabilities get */
+static int
+pmd_tm_node_capabilities_get(struct rte_eth_dev *dev,
+	uint32_t node_id,
+	struct rte_tm_node_capabilities *cap,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	uint32_t ls = p->udev->data->dev_link.link_speed;
+	struct tm_node *tm_node;
+
+	if (!cap || !error)
+		return -EINVAL;
+
+	tm_node = tm_node_search(dev, node_id);
+
+	/* Check: node validity */
+	if ((node_id == RTE_TM_NODE_ID_NULL) || (!tm_node)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id invalid!";
+		return -EINVAL;
+	}
+
+	memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+	/* Check: node level */
+	if (tm_node->level == 0) {
+		/* Root node */
+		cap->shaper_private_supported = 1;
+		cap->shaper_private_dual_rate_supported = 0;
+		cap->shaper_private_rate_min = 0;
+		cap->shaper_private_rate_max = (ls * 1000000) / 8;
+		cap->shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max = TM_MAX_SUBPORTS;
+		cap->nonleaf.sched_sp_n_priorities_max = 0;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+		cap->nonleaf.sched_wfq_n_groups_max = 0;
+		cap->nonleaf.sched_wfq_weight_max = 0;
+
+	} else if (tm_node->level == 1) {
+		/* Subport */
+		cap->shaper_private_supported = 0;
+		cap->shaper_private_dual_rate_supported = 0;
+		cap->shaper_private_rate_min = 0;
+		cap->shaper_private_rate_max = (ls * 1000000) / 8;
+		cap->shaper_shared_n_max = 1;
+		cap->nonleaf.sched_n_children_max = TM_MAX_PIPES_PER_SUBPORT;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max
+			= TM_MAX_PIPES_PER_SUBPORT;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+
+	} else if (tm_node->level == 2) {
+		/* Pipe */
+		cap->shaper_private_supported = 1;
+		cap->shaper_private_dual_rate_supported = 0;
+		cap->shaper_private_rate_min = 0;
+		cap->shaper_private_rate_max
+			= (ls * 1000000) / (8 * TM_MAX_PIPES_PER_SUBPORT);
+		cap->shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+
+	} else if (tm_node->level == 3) {
+		/* Traffic Class */
+		cap->shaper_private_supported = 0;
+		cap->shaper_private_dual_rate_supported = 0;
+		cap->shaper_private_rate_min = 0;
+		cap->shaper_private_rate_max
+			= (ls * 1000000) / (8 * TM_MAX_PIPES_PER_SUBPORT);
+		cap->shaper_shared_n_max = 0;
+		cap->nonleaf.sched_n_children_max
+			= RTE_SCHED_QUEUES_PER_PIPE;
+		cap->nonleaf.sched_sp_n_priorities_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_n_children_per_group_max
+			= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+		cap->nonleaf.sched_wfq_n_groups_max
+			= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+		cap->nonleaf.sched_wfq_weight_max = 1;
+	} else {
+		/* Queue */
+		cap->shaper_private_supported = 1;
+		cap->shaper_private_dual_rate_supported = 0;
+		cap->shaper_private_rate_min = 0;
+		cap->shaper_private_rate_max = 0;
+		cap->shaper_shared_n_max = 0;
+		cap->leaf.cman_head_drop_supported = 1;
+		cap->leaf.cman_wred_context_private_supported = 0;
+		cap->leaf.cman_wred_context_shared_n_max = 0;
+	}
+	cap->stats_mask = 0;
+
+	return 0;
+}
+
+/* Traffic manager shaper profile add */
+static int
+pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
+	uint32_t shaper_profile_id,
+	struct rte_tm_shaper_params *profile,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profile *sp;
+	struct tm_shaper_profiles *spl = &p->tm_conf.shaper_profiles;
+	char shaper_name[256];
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	/* Shaper Rate */
+	if (!profile->peak.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+		error->message = "rate not specified!";
+		return -EINVAL;
+	}
+
+	/* Shaper Bucket Size */
+	if (!profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "bucket size not specified!";
+		return -EINVAL;
+	}
+
+	/* Shaper Committed Rate */
+	if (profile->committed.rate) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+		error->message = "dual rate shaper not supported!";
+		return -EINVAL;
+	}
+
+	/* Shaper Committed Size */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "dual rate shaper not supported!";
+		return -EINVAL;
+	}
+
+	sp = tm_shaper_profile_search(dev, shaper_profile_id);
+
+	if (sp) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	snprintf(shaper_name, sizeof(shaper_name),
+		"tm_shaper_profile_%u", shaper_profile_id);
+
+	sp = rte_zmalloc(shaper_name, sizeof(struct tm_shaper_profile), 0);
+	if (!sp)
+		return -ENOMEM;
+
+	sp->shaper_profile_id = shaper_profile_id;
+	sp->shared_shaper_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+
+	(void)rte_memcpy(&sp->params, profile,
+			 sizeof(struct rte_tm_shaper_params));
+
+	if (!spl->tqh_first)
+		tm_conf_init(dev);
+
+	TAILQ_INSERT_TAIL(spl, sp, node);
+
+	return 0;
+}
+
+/* Traffic manager shaper profile delete */
+static int
+pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
+	uint32_t shaper_profile_id,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profile *sp;
+
+	if (!error)
+		return -EINVAL;
+
+	sp = tm_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!sp) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* Check: profile usage */
+	if (sp->n_users) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use!";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&p->tm_conf.shaper_profiles, sp, node);
+	rte_free(sp);
+
+	return 0;
+}
+
+/* Traffic manager shared shaper add/update */
+static int
+pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
+	uint32_t shared_shaper_id,
+	uint32_t shaper_profile_id,
+	struct rte_tm_error *error)
+{
+	struct tm_shaper_profile *sp;
+	uint32_t n_shared_shapers;
+
+	if (!error)
+		return -EINVAL;
+
+	sp = tm_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!sp) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "shaper profile doesn't exist!";
+		return -EINVAL;
+	}
+
+	/* Shared shaper add/update */
+	n_shared_shapers = tm_shared_shaper_count(dev);
+	if (sp->shared_shaper_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
+		sp->shared_shaper_id = n_shared_shapers;
+	else
+		sp->shared_shaper_id = shared_shaper_id;
+
+	return 0;
+}
+
+/* Traffic manager shared shaper delete */
+static int
+pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
+	uint32_t shared_shaper_id,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_shaper_profile *sp;
+
+	if (!error)
+		return -EINVAL;
+
+	sp = tm_shared_shaper_search(dev, shared_shaper_id);
+
+	if (!sp) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "shared shaper not exist";
+		return -EINVAL;
+	}
+
+	/* Check: profile usage */
+	if (sp->n_users) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use!";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&p->tm_conf.shaper_profiles, sp, node);
+	rte_free(sp);
+
+	return 0;
+}
+
+/* Traffic manager node add */
+static int
+pmd_tm_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	uint32_t parent_node_id, uint32_t priority, uint32_t weight,
+	uint32_t level_id, struct rte_tm_node_params *params,
+	struct rte_tm_error *error)
+{
+	struct tm_node *tm_node, *parent_node;
+	struct tm_shaper_profile *sp;
+	uint64_t nodes_l1 = TM_MAX_SUBPORTS;
+	uint64_t nodes_l2 = nodes_l1 * TM_MAX_PIPES_PER_SUBPORT;
+	uint64_t nodes_l3 = nodes_l2 * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+	uint64_t nodes_l4 = nodes_l2 * RTE_SCHED_QUEUES_PER_PIPE;
+	char node_name[256];
+
+	struct pmd_internals *p = dev->data->dev_private;
+
+	if (!params || !error)
+		return -EINVAL;
+
+	/* Check: node id NULL*/
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id!";
+		return -EINVAL;
+	}
+
+	/* Check: node priority */
+	if (!priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority not supported!";
+		return -EINVAL;
+	}
+
+	/* Check: node weight */
+	if (weight < 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight not supported!";
+		return -EINVAL;
+	}
+
+	/* Check: node ID used */
+	if (tm_node_search(dev, node_id)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used!";
+		return -EINVAL;
+	}
+
+	/* Check: level */
+	if ((level_id != RTE_TM_NODE_LEVEL_ID_ANY) &&
+		(level_id >= TM_NODE_LEVEL_MAX)) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "levels exceeds the maximum allowed!";
+		return -EINVAL;
+	}
+
+	/* Check: number of nodes at levels*/
+	if (((level_id == TM_NODE_LEVEL_PORT) &&
+		(p->tm_conf.n_tm_nodes[TM_NODE_LEVEL_PORT] > 1)) ||
+		((level_id == TM_NODE_LEVEL_SUBPORT) &&
+		(p->tm_conf.n_tm_nodes[TM_NODE_LEVEL_SUBPORT] > nodes_l1)) ||
+		((level_id == TM_NODE_LEVEL_PIPE) &&
+		(p->tm_conf.n_tm_nodes[TM_NODE_LEVEL_PIPE] > nodes_l2)) ||
+		((level_id == TM_NODE_LEVEL_TC) &&
+		(p->tm_conf.n_tm_nodes[TM_NODE_LEVEL_TC] > nodes_l3)) ||
+		((level_id == TM_NODE_LEVEL_QUEUE) &&
+		(p->tm_conf.n_tm_nodes[TM_NODE_LEVEL_QUEUE] > nodes_l4))) {
+		error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+		error->message = "nodes exceeds the max at this level!";
+		return -EINVAL;
+	}
+
+	/* Check: node shaper profile */
+	sp = tm_shaper_profile_search(dev, params->shaper_profile_id);
+	if (!sp) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+		error->message = "shaper profile invalid! ";
+		return -EINVAL;
+	}
+
+	/* Check: root node */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+		/* Check: level id */
+		if (level_id) {
+			error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+			error->message = "level id invalid! ";
+			return -EINVAL;
+		}
+
+		/* Check: root node shaper params */
+		if ((!sp) || (sp->params.committed.size > 0) ||
+			(sp->params.committed.rate > 0) ||
+			(params->n_shared_shapers > 0) ||
+			(params->shared_shaper_id) ||
+			(params->shaper_profile_id
+				== RTE_TM_SHAPER_PROFILE_ID_NONE)) {
+			error->type
+			 = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "root node shaper invalid! ";
+			return -EINVAL;
+		}
+
+		/* Check: root node */
+		if (tm_root_node_present(dev)) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+			error->message = "root node already present!";
+			return -EINVAL;
+		}
+	}
+
+	/* Node add */
+	snprintf(node_name, sizeof(node_name), "node_%u", node_id);
+	tm_node = rte_zmalloc(node_name, sizeof(struct tm_node), 0);
+	if (!tm_node)
+		return -ENOMEM;
+
+	/*  Check: parent node */
+	if (parent_node_id != RTE_TM_NODE_ID_NULL) {
+		parent_node = tm_node_search(dev, parent_node_id);
+
+		if ((level_id != RTE_TM_NODE_LEVEL_ID_ANY) &&
+			(level_id != parent_node->level + 1)) {
+			error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+			error->message = "level id invalid! ";
+			rte_free(tm_node);
+			return -EINVAL;
+		}
+
+		tm_node->parent_node = parent_node;
+		parent_node->n_child += 1;
+
+	} else {
+		tm_node->parent_node = NULL;
+	}
+
+	tm_node->id = node_id;
+	tm_node->priority = priority;
+	tm_node->weight = weight;
+	tm_node->n_child = 0;
+	tm_node->level = level_id;
+
+	(void)rte_memcpy(&tm_node->params,
+		params, sizeof(struct rte_tm_node_params));
+
+	/* Update:  shaper profile users */
+	sp->n_users++;
+
+	/* Update: number of nodes */
+	p->tm_conf.n_tm_nodes[tm_node->level] += 1;
+
+	return 0;
+}
+
+/* Traffic manager node delete */
+static int
+pmd_tm_node_delete(struct rte_eth_dev *dev,
+	uint32_t node_id,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_node *tm_node;
+
+	if (!error)
+		return -EINVAL;
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* Check: node id */
+	tm_node = tm_node_search(dev, node_id);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id invalid!";
+		return -EINVAL;
+	}
+
+	/* Check: node child */
+	if (tm_node->n_child) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node children exist!";
+		return -EINVAL;
+	}
+
+	/* Delete node */
+	tm_node->shaper_profile->n_users--;
+	tm_node->parent_node->n_child--;
+	TAILQ_REMOVE(&p->tm_conf.tm_nodes, tm_node, node);
+	p->tm_conf.n_tm_nodes[tm_node->level]--;
+	rte_free(tm_node);
+
+	return 0;
+}
+
+/* Traffic manager hierarchy commit */
+static int
+pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
+	int clear_on_fail,
+	struct rte_tm_error *error)
+{
+	struct pmd_internals *p = dev->data->dev_private;
+	struct tm_params *t = &p->tm_params;
+	struct tm_conf *tm_conf = &p->tm_conf;
+	struct tm_node *tm_node;
+	struct tm_shaper_profile *sp;
+	uint32_t i, pid = 0, subport_id, pipe_id, n_subports;
+	uint32_t n_subports_per_port, n_pipes_per_subport, n_pipe_profiles;
+	struct tm_shaper_profiles *sp_list = &tm_conf->shaper_profiles;
+
+	if (!error) {
+		if (clear_on_fail)
+			goto fail_clear;
+		return -EINVAL;
+	}
+
+	/* TM Port */
+	tm_node = tm_root_node_present(dev);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "root node not exists!";
+		if (clear_on_fail)
+			goto fail_clear;
+		return -EINVAL;
+	}
+
+	n_subports_per_port = tm_conf->n_tm_nodes[1];
+	if (n_subports_per_port > TM_MAX_SUBPORTS) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Number of subports exceeded!";
+		if (clear_on_fail)
+			goto fail_clear;
+		return -EINVAL;
+	}
+
+	n_pipes_per_subport = tm_conf->n_tm_nodes[2];
+	if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Number of pipes exceeded!";
+		if (clear_on_fail)
+			goto fail_clear;
+		return -EINVAL;
+	}
+	n_pipe_profiles = tm_shaper_profile_count(dev);
+	if (n_pipe_profiles > RTE_SCHED_PIPE_PROFILES_PER_PORT) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Number of pipe profiles exceeded!";
+		if (clear_on_fail)
+			goto fail_clear;
+		return -EINVAL;
+	}
+
+	t->port_params.n_subports_per_port = n_subports_per_port;
+	t->port_params.n_pipes_per_subport = n_pipes_per_subport;
+	t->port_params.n_pipe_profiles = n_pipe_profiles;
+	t->port_params.frame_overhead = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+	for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++)
+		t->port_params.qsize[i] = TM_MAX_QUEUE_SIZE;
+
+	TAILQ_FOREACH(sp, sp_list, node) {
+		if (sp->shared_shaper_id == RTE_TM_SHAPER_PROFILE_ID_NONE) {
+			t->port_params.pipe_profiles[pid].tb_rate
+				= sp->params.peak.rate;
+			t->port_params.pipe_profiles[pid].tb_size
+				= sp->params.peak.size;
+
+			for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+				t->port_params.pipe_profiles[pid].tc_rate[i]
+					= sp->params.peak.rate;
+
+			t->port_params.pipe_profiles[pid].tc_period = 40;
+
+			for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++)
+				t->port_params.pipe_profiles[pid].wrr_weights[i]
+					= 1;
+
+			pid++;
+		}
+	}
+
+	/* TM Subport */
+	n_subports = t->port_params.n_subports_per_port;
+	for (subport_id = 0; subport_id < n_subports; subport_id++) {
+		struct tm_node *subport = tm_node_search(dev, subport_id + 1);
+		uint32_t n_shapers = subport->params.n_shared_shapers;
+
+		for (i = 0; i < n_shapers; i++) {
+			struct tm_shaper_profile *sp
+				= tm_shared_shaper_search(dev,
+					subport->params.shared_shaper_id[i]);
+
+			t->subport_params[subport_id].tb_rate
+				= sp->params.peak.rate;
+			t->subport_params[subport_id].tb_size
+				= sp->params.peak.size;
+			for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+				t->subport_params[subport_id].tc_rate[i]
+					= sp->params.peak.rate;
+
+			t->subport_params[subport_id].tc_period = 10;
+		}
+
+		/* TM Pipe */
+		n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+		pipe_id = n_subports + 1;
+		for (; pipe_id < n_pipes_per_subport; pipe_id++) {
+			uint32_t n_max_pipes
+				= tm_node_get_child(dev, subport_id);
+			uint32_t pos = subport_id * n_max_pipes + pipe_id;
+			struct tm_node *pipe = tm_node_search(dev, pos);
+
+			t->pipe_to_profile[pos]
+				= pipe->shaper_profile->shaper_profile_id;
+		}
+	}
+
+	return 0;
+
+fail_clear:
+	if (clear_on_fail) {
+		tm_conf_uninit(dev);
+		tm_conf_init(dev);
+	}
+	return -EINVAL;
+}
+
+/* Traffic manager read stats counters for specific node */
+static int
+pmd_tm_node_stats_read(struct rte_eth_dev *dev __rte_unused,
+	uint32_t node_id __rte_unused,
+	struct rte_tm_node_stats *stats __rte_unused,
+	uint64_t *stats_mask __rte_unused,
+	int clear __rte_unused,
+	struct rte_tm_error *error __rte_unused)
+{
+	return 0;
+}
+
+const struct rte_tm_ops pmd_tm_ops = {
+	.node_type_get = pmd_tm_node_type_get,
+	.capabilities_get = pmd_tm_capabilities_get,
+	.level_capabilities_get = pmd_tm_level_capabilities_get,
+	.node_capabilities_get = pmd_tm_node_capabilities_get,
+
+	.wred_profile_add = NULL,
+	.wred_profile_delete = NULL,
+	.shared_wred_context_add_update = NULL,
+	.shared_wred_context_delete = NULL,
+
+	.shaper_profile_add = pmd_tm_shaper_profile_add,
+	.shaper_profile_delete = pmd_tm_shaper_profile_delete,
+	.shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
+	.shared_shaper_delete = pmd_tm_shared_shaper_delete,
+
+	.node_add = pmd_tm_node_add,
+	.node_delete = pmd_tm_node_delete,
+	.node_suspend = NULL,
+	.node_resume = NULL,
+	.hierarchy_commit = pmd_tm_hierarchy_commit,
+
+	.node_parent_update = NULL,
+	.node_shaper_update = NULL,
+	.node_shared_shaper_update = NULL,
+	.node_stats_update = NULL,
+	.node_wfq_weight_mode_update = NULL,
+	.node_cman_update = NULL,
+	.node_wred_context_update = NULL,
+	.node_shared_wred_context_update = NULL,
+
+	.node_stats_read = pmd_tm_node_stats_read,
+};
-- 
2.9.3

  parent reply	other threads:[~2017-06-26 16:33 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-26 18:11 [PATCH 0/2] net/softnic: sw fall-back for traffic management Jasvinder Singh
2017-05-26 18:11 ` [PATCH 1/2] net/softnic: add softnic PMD " Jasvinder Singh
2017-06-26 16:43   ` [PATCH v2 0/2] net/softnic: sw fall-back " Jasvinder Singh
2017-06-26 16:43     ` [PATCH v2 1/2] net/softnic: add softnic PMD " Jasvinder Singh
2017-08-11 12:49       ` [PATCH v3 0/4] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-08-11 12:49         ` [PATCH v3 1/4] net/softnic: add softnic PMD Jasvinder Singh
2017-09-05 14:53           ` Ferruh Yigit
2017-09-08  9:30             ` Singh, Jasvinder
2017-09-08  9:48               ` Ferruh Yigit
2017-09-08 10:42                 ` Singh, Jasvinder
2017-09-18  9:10           ` [PATCH v4 0/4] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-09-18  9:10             ` [PATCH v4 1/4] net/softnic: add softnic PMD Jasvinder Singh
2017-09-18 16:58               ` Singh, Jasvinder
2017-09-18 19:09                 ` Thomas Monjalon
2017-09-18  9:10             ` [PATCH v4 2/4] net/softnic: add traffic management support Jasvinder Singh
2017-09-25  1:58               ` Lu, Wenzhuo
2017-09-28  8:14                 ` Singh, Jasvinder
2017-09-29 14:04               ` [PATCH v5 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-09-29 14:04                 ` [PATCH v5 1/5] net/softnic: add softnic PMD Jasvinder Singh
2017-09-29 14:04                 ` [PATCH v5 2/5] net/softnic: add traffic management support Jasvinder Singh
2017-10-06 16:59                   ` [PATCH v6 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-10-06 16:59                     ` [PATCH v6 1/5] net/softnic: add softnic PMD Jasvinder Singh
2017-10-09 12:58                       ` [PATCH v7 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-10-09 12:58                         ` [PATCH v7 1/5] net/softnic: add softnic PMD Jasvinder Singh
2017-10-09 20:18                           ` Ferruh Yigit
2017-10-10 10:08                             ` Singh, Jasvinder
2017-10-10 10:18                           ` [PATCH v8 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Jasvinder Singh
2017-10-10 10:18                             ` [PATCH v8 1/5] net/softnic: add softnic PMD Jasvinder Singh
2017-10-11 23:18                               ` Thomas Monjalon
2017-10-12  8:22                                 ` Singh, Jasvinder
2017-10-10 10:18                             ` [PATCH v8 2/5] net/softnic: add traffic management support Jasvinder Singh
2017-10-10 10:18                             ` [PATCH v8 3/5] net/softnic: add TM capabilities ops Jasvinder Singh
2017-10-10 10:18                             ` [PATCH v8 4/5] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-10-10 10:18                             ` [PATCH v8 5/5] app/testpmd: add traffic management forwarding mode Jasvinder Singh
2017-10-10 18:24                               ` Ferruh Yigit
2017-10-10 18:31                             ` [PATCH v8 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Ferruh Yigit
2017-10-10 19:09                               ` Singh, Jasvinder
2017-10-09 12:58                         ` [PATCH v7 2/5] net/softnic: add traffic management support Jasvinder Singh
2017-10-09 12:58                         ` [PATCH v7 3/5] net/softnic: add TM capabilities ops Jasvinder Singh
2017-10-09 12:58                         ` [PATCH v7 4/5] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-10-09 12:58                         ` [PATCH v7 5/5] app/testpmd: add traffic management forwarding mode Jasvinder Singh
2017-10-09 20:17                           ` Ferruh Yigit
2017-10-10 10:07                             ` Singh, Jasvinder
2017-10-06 17:00                     ` [PATCH v6 2/5] net/softnic: add traffic management support Jasvinder Singh
2017-10-06 17:00                     ` [PATCH v6 3/5] net/softnic: add TM capabilities ops Jasvinder Singh
2017-10-06 17:00                     ` [PATCH v6 4/5] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-10-06 17:00                     ` [PATCH v6 5/5] app/testpmd: add traffic management forwarding mode Jasvinder Singh
2017-10-06 18:57                     ` [PATCH v6 0/5] net/softnic: sw fall-back pmd for traffic mgmt and others Ferruh Yigit
2017-10-09 11:32                       ` Singh, Jasvinder
2017-09-29 14:04                 ` [PATCH v5 3/5] net/softnic: add TM capabilities ops Jasvinder Singh
2017-09-29 14:04                 ` [PATCH v5 4/5] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-09-29 14:04                 ` [PATCH v5 5/5] app/testpmd: add traffic management forwarding mode Jasvinder Singh
2017-09-18  9:10             ` [PATCH v4 3/4] net/softnic: add TM capabilities ops Jasvinder Singh
2017-09-25  2:33               ` Lu, Wenzhuo
2017-09-28  8:16                 ` Singh, Jasvinder
2017-09-18  9:10             ` [PATCH v4 4/4] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-09-25  7:14               ` Lu, Wenzhuo
2017-09-28  8:39                 ` Singh, Jasvinder
2017-09-20 15:35             ` [PATCH v4 0/4] net/softnic: sw fall-back pmd for traffic mgmt and others Thomas Monjalon
2017-09-22 22:07               ` Singh, Jasvinder
2017-10-06 10:40               ` Dumitrescu, Cristian
2017-10-06 12:13                 ` Thomas Monjalon
2017-08-11 12:49         ` [PATCH v3 2/4] net/softnic: add traffic management support Jasvinder Singh
2017-08-11 12:49         ` [PATCH v3 3/4] net/softnic: add TM capabilities ops Jasvinder Singh
2017-08-11 12:49         ` [PATCH v3 4/4] net/softnic: add TM hierarchy related ops Jasvinder Singh
2017-09-08 17:08         ` [PATCH v3 0/4] net/softnic: sw fall-back pmd for traffic mgmt and others Dumitrescu, Cristian
2017-06-26 16:43     ` Jasvinder Singh [this message]
2017-05-26 18:11 ` [PATCH 2/2] net/softnic: add traffic management ops Jasvinder Singh
2017-06-07 14:32 ` [PATCH 0/2] net/softnic: sw fall-back for traffic management Thomas Monjalon
2017-06-08 13:27   ` Dumitrescu, Cristian
2017-06-08 13:59     ` Thomas Monjalon
2017-06-08 15:27       ` Dumitrescu, Cristian
2017-06-08 16:16         ` Thomas Monjalon
2017-06-08 16:43           ` Dumitrescu, Cristian
2017-07-04 23:48             ` Thomas Monjalon
2017-07-05  9:32               ` Dumitrescu, Cristian
2017-07-05 10:17                 ` Thomas Monjalon
2017-08-11 15:28 ` Stephen Hemminger
2017-08-11 16:22   ` Dumitrescu, Cristian

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170626164334.50621-3-jasvinder.singh@intel.com \
    --to=jasvinder.singh@intel.com \
    --cc=Jerin.JacobKollanukkaran@cavium.com \
    --cc=cristian.dumitrescu@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.