All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com,
	beilei.xing@intel.com, qiming.yang@intel.com,
	Ting Xu <ting.xu@intel.com>
Subject: [dpdk-dev] [PATCH v4 4/7] net/ice: support QoS config VF bandwidth in DCF
Date: Wed, 30 Jun 2021 14:53:41 +0800	[thread overview]
Message-ID: <20210630065344.50352-5-ting.xu@intel.com> (raw)
In-Reply-To: <20210630065344.50352-1-ting.xu@intel.com>

This patch supports the ETS-based QoS configuration. It enables the DCF
to configure bandwidth limits for each VF VSI of different TCs. A
hierarchy scheduler tree is built with port, TC and VSI nodes.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/ice/ice_dcf.c        |   9 +-
 drivers/net/ice/ice_dcf.h        |  48 ++
 drivers/net/ice/ice_dcf_ethdev.c |  14 +
 drivers/net/ice/ice_dcf_ethdev.h |   3 +
 drivers/net/ice/ice_dcf_parent.c |  30 ++
 drivers/net/ice/ice_dcf_sched.c  | 759 +++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build      |   3 +-
 7 files changed, 864 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ice/ice_dcf_sched.c

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index d72a6f357e..349d23ee4f 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -235,7 +235,8 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
 	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
-	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
+	       VIRTCHNL_VF_OFFLOAD_QOS;
 
 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
 					  (uint8_t *)&caps, sizeof(caps));
@@ -668,6 +669,9 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 		}
 	}
 
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+		ice_dcf_tm_conf_init(eth_dev);
+
 	hw->eth_dev = eth_dev;
 	rte_intr_callback_register(&pci_dev->intr_handle,
 				   ice_dcf_dev_interrupt_handler, hw);
@@ -703,6 +707,9 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	ice_dcf_mode_disable(hw);
 	iavf_shutdown_adminq(&hw->avf);
 
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
+		ice_dcf_tm_conf_uninit(eth_dev);
+
 	rte_free(hw->arq_buf);
 	rte_free(hw->vf_vsi_map);
 	rte_free(hw->vf_res);
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index 587093b909..1c7653de3d 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -6,6 +6,7 @@
 #define _ICE_DCF_H_
 
 #include <ethdev_driver.h>
+#include <rte_tm_driver.h>
 
 #include <iavf_prototype.h>
 #include <iavf_adminq_cmd.h>
@@ -30,6 +31,49 @@ struct dcf_virtchnl_cmd {
 	volatile int pending;
 };
 
+struct ice_dcf_tm_shaper_profile {
+	TAILQ_ENTRY(ice_dcf_tm_shaper_profile) node;
+	uint32_t shaper_profile_id;
+	uint32_t reference_count;
+	struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(ice_dcf_shaper_profile_list, ice_dcf_tm_shaper_profile);
+
+/* Struct to store Traffic Manager node configuration. */
+struct ice_dcf_tm_node {
+	TAILQ_ENTRY(ice_dcf_tm_node) node;
+	uint32_t id;
+	uint32_t tc;
+	uint32_t priority;
+	uint32_t weight;
+	uint32_t reference_count;
+	struct ice_dcf_tm_node *parent;
+	struct ice_dcf_tm_shaper_profile *shaper_profile;
+	struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(ice_dcf_tm_node_list, ice_dcf_tm_node);
+
+/* node type of Traffic Manager */
+enum ice_dcf_tm_node_type {
+	ICE_DCF_TM_NODE_TYPE_PORT,
+	ICE_DCF_TM_NODE_TYPE_TC,
+	ICE_DCF_TM_NODE_TYPE_VSI,
+	ICE_DCF_TM_NODE_TYPE_MAX,
+};
+
+/* Struct to store all the Traffic Manager configuration. */
+struct ice_dcf_tm_conf {
+	struct ice_dcf_shaper_profile_list shaper_profile_list;
+	struct ice_dcf_tm_node *root; /* root node - port */
+	struct ice_dcf_tm_node_list tc_list; /* node list for all the TCs */
+	struct ice_dcf_tm_node_list vsi_list; /* node list for all the queues */
+	uint32_t nb_tc_node;
+	uint32_t nb_vsi_node;
+	bool committed;
+};
+
 struct ice_dcf_hw {
 	struct iavf_hw avf;
 
@@ -45,6 +89,8 @@ struct ice_dcf_hw {
 	uint16_t *vf_vsi_map;
 	uint16_t pf_vsi_id;
 
+	struct ice_dcf_tm_conf tm_conf;
+	struct ice_aqc_port_ets_elem *ets_config;
 	struct virtchnl_version_info virtchnl_version;
 	struct virtchnl_vf_resource *vf_res; /* VF resource */
 	struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
@@ -83,5 +129,7 @@ int ice_dcf_query_stats(struct ice_dcf_hw *hw,
 int ice_dcf_add_del_all_mac_addr(struct ice_dcf_hw *hw, bool add);
 int ice_dcf_link_update(struct rte_eth_dev *dev,
 		    __rte_unused int wait_to_complete);
+void ice_dcf_tm_conf_init(struct rte_eth_dev *dev);
+void ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 0b40ebbec6..69fe6e63d1 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -622,6 +622,7 @@ ice_dcf_dev_stop(struct rte_eth_dev *dev)
 	ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false);
 	dev->data->dev_link.link_status = ETH_LINK_DOWN;
 	ad->pf.adapter_stopped = 1;
+	dcf_ad->real_hw.tm_conf.committed = false;
 
 	return 0;
 }
@@ -994,6 +995,18 @@ ice_dcf_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 	return ret;
 }
 
+static int
+ice_dcf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+		void *arg)
+{
+	if (!arg)
+		return -EINVAL;
+
+	*(const void **)arg = &ice_dcf_tm_ops;
+
+	return 0;
+}
+
 static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.dev_start               = ice_dcf_dev_start,
 	.dev_stop                = ice_dcf_dev_stop,
@@ -1018,6 +1031,7 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
 	.flow_ops_get            = ice_dcf_dev_flow_ops_get,
 	.udp_tunnel_port_add	 = ice_dcf_dev_udp_tunnel_port_add,
 	.udp_tunnel_port_del	 = ice_dcf_dev_udp_tunnel_port_del,
+	.tm_ops_get              = ice_dcf_tm_ops_get,
 };
 
 static int
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index e7c9d7fe41..8510e37119 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -7,6 +7,8 @@
 
 #include "base/ice_common.h"
 #include "base/ice_adminq_cmd.h"
+#include "base/ice_dcb.h"
+#include "base/ice_sched.h"
 
 #include "ice_ethdev.h"
 #include "ice_dcf.h"
@@ -52,6 +54,7 @@ struct ice_dcf_vf_repr {
 	struct ice_dcf_vlan outer_vlan_info; /* DCF always handle outer VLAN */
 };
 
+extern const struct rte_tm_ops ice_dcf_tm_ops;
 void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
 				 uint8_t *msg, uint16_t msglen);
 int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 788f6dd2a0..0ea32cf8e9 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -264,6 +264,29 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
 	}
 }
 
+static int
+ice_dcf_query_port_ets(struct ice_hw *parent_hw, struct ice_dcf_hw *real_hw)
+{
+	int ret;
+
+	real_hw->ets_config = (struct ice_aqc_port_ets_elem *)
+			ice_malloc(real_hw, sizeof(*real_hw->ets_config));
+	if (!real_hw->ets_config)
+		return ICE_ERR_NO_MEMORY;
+
+	ret = ice_aq_query_port_ets(parent_hw->port_info,
+			real_hw->ets_config, sizeof(*real_hw->ets_config),
+			NULL);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "DCF Query Port ETS failed");
+		rte_free(real_hw->ets_config);
+		real_hw->ets_config = NULL;
+		return ret;
+	}
+
+	return ICE_SUCCESS;
+}
+
 static int
 ice_dcf_init_parent_hw(struct ice_hw *hw)
 {
@@ -486,6 +509,13 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
 		return err;
 	}
 
+	err = ice_dcf_query_port_ets(parent_hw, hw);
+	if (err) {
+		PMD_INIT_LOG(ERR, "failed to query port ets with error %d",
+			     err);
+		goto uninit_hw;
+	}
+
 	err = ice_dcf_load_pkg(parent_hw);
 	if (err) {
 		PMD_INIT_LOG(ERR, "failed to load package with error %d",
diff --git a/drivers/net/ice/ice_dcf_sched.c b/drivers/net/ice/ice_dcf_sched.c
new file mode 100644
index 0000000000..5c82a1a559
--- /dev/null
+++ b/drivers/net/ice/ice_dcf_sched.c
@@ -0,0 +1,759 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+#include <rte_tm_driver.h>
+
+#include "base/ice_sched.h"
+#include "ice_dcf_ethdev.h"
+
+static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
+				 __rte_unused int clear_on_fail,
+				 __rte_unused struct rte_tm_error *error);
+static int ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	      uint32_t parent_node_id, uint32_t priority,
+	      uint32_t weight, uint32_t level_id,
+	      struct rte_tm_node_params *params,
+	      struct rte_tm_error *error);
+static int ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+			    struct rte_tm_error *error);
+static int ice_dcf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error);
+static int ice_dcf_shaper_profile_del(struct rte_eth_dev *dev,
+				   uint32_t shaper_profile_id,
+				   struct rte_tm_error *error);
+
+const struct rte_tm_ops ice_dcf_tm_ops = {
+	.shaper_profile_add = ice_dcf_shaper_profile_add,
+	.shaper_profile_delete = ice_dcf_shaper_profile_del,
+	.hierarchy_commit = ice_dcf_hierarchy_commit,
+	.node_add = ice_dcf_node_add,
+	.node_delete = ice_dcf_node_delete,
+};
+
+void
+ice_dcf_tm_conf_init(struct rte_eth_dev *dev)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+
+	/* initialize shaper profile list */
+	TAILQ_INIT(&hw->tm_conf.shaper_profile_list);
+
+	/* initialize node configuration */
+	hw->tm_conf.root = NULL;
+	TAILQ_INIT(&hw->tm_conf.tc_list);
+	TAILQ_INIT(&hw->tm_conf.vsi_list);
+	hw->tm_conf.nb_tc_node = 0;
+	hw->tm_conf.nb_vsi_node = 0;
+	hw->tm_conf.committed = false;
+}
+
+void
+ice_dcf_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_shaper_profile *shaper_profile;
+	struct ice_dcf_tm_node *tm_node;
+
+	/* clear node configuration */
+	while ((tm_node = TAILQ_FIRST(&hw->tm_conf.vsi_list))) {
+		TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	hw->tm_conf.nb_vsi_node = 0;
+	while ((tm_node = TAILQ_FIRST(&hw->tm_conf.tc_list))) {
+		TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node);
+		rte_free(tm_node);
+	}
+	hw->tm_conf.nb_tc_node = 0;
+	if (hw->tm_conf.root) {
+		rte_free(hw->tm_conf.root);
+		hw->tm_conf.root = NULL;
+	}
+
+	/* Remove all shaper profiles */
+	while ((shaper_profile =
+	       TAILQ_FIRST(&hw->tm_conf.shaper_profile_list))) {
+		TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list,
+			     shaper_profile, node);
+		rte_free(shaper_profile);
+	}
+}
+
+static inline struct ice_dcf_tm_node *
+ice_dcf_tm_node_search(struct rte_eth_dev *dev,
+		    uint32_t node_id, enum ice_dcf_tm_node_type *node_type)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
+	struct ice_dcf_tm_node_list *tc_list = &hw->tm_conf.tc_list;
+	struct ice_dcf_tm_node *tm_node;
+
+	if (hw->tm_conf.root && hw->tm_conf.root->id == node_id) {
+		*node_type = ICE_DCF_TM_NODE_TYPE_PORT;
+		return hw->tm_conf.root;
+	}
+
+	TAILQ_FOREACH(tm_node, tc_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = ICE_DCF_TM_NODE_TYPE_TC;
+			return tm_node;
+		}
+	}
+
+	TAILQ_FOREACH(tm_node, vsi_list, node) {
+		if (tm_node->id == node_id) {
+			*node_type = ICE_DCF_TM_NODE_TYPE_VSI;
+			return tm_node;
+		}
+	}
+
+	return NULL;
+}
+
+static inline struct ice_dcf_tm_shaper_profile *
+ice_dcf_shaper_profile_search(struct rte_eth_dev *dev,
+			   uint32_t shaper_profile_id)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_shaper_profile_list *shaper_profile_list =
+		&hw->tm_conf.shaper_profile_list;
+	struct ice_dcf_tm_shaper_profile *shaper_profile;
+
+	TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+		if (shaper_profile_id == shaper_profile->shaper_profile_id)
+			return shaper_profile;
+	}
+
+	return NULL;
+}
+
+static int
+ice_dcf_node_param_check(struct ice_dcf_hw *hw, uint32_t node_id,
+		      uint32_t priority, uint32_t weight,
+		      struct rte_tm_node_params *params,
+		      struct rte_tm_error *error)
+{
+	/* checked all the unsupported parameter */
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	if (priority) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+		error->message = "priority should be 0";
+		return -EINVAL;
+	}
+
+	if (weight != 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+		error->message = "weight must be 1";
+		return -EINVAL;
+	}
+
+	/* not support shared shaper */
+	if (params->shared_shaper_id) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+	if (params->n_shared_shapers) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+		error->message = "shared shaper not supported";
+		return -EINVAL;
+	}
+
+	/* for non-leaf node */
+	if (node_id >= 8 * hw->num_vfs) {
+		if (params->nonleaf.wfq_weight_mode) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFQ not supported";
+			return -EINVAL;
+		}
+		if (params->nonleaf.n_sp_priorities != 1) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+			error->message = "SP priority not supported";
+			return -EINVAL;
+		} else if (params->nonleaf.wfq_weight_mode &&
+			   !(*params->nonleaf.wfq_weight_mode)) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+			error->message = "WFP should be byte mode";
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/* for leaf node */
+	if (params->leaf.cman) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+		error->message = "Congestion management not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.wred_profile_id !=
+	    RTE_TM_WRED_PROFILE_ID_NONE) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.shared_wred_context_id) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+	if (params->leaf.wred.n_shared_wred_contexts) {
+		error->type =
+			RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+		error->message = "WRED not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+ice_dcf_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+	      uint32_t parent_node_id, uint32_t priority,
+	      uint32_t weight, uint32_t level_id,
+	      struct rte_tm_node_params *params,
+	      struct rte_tm_error *error)
+{
+	enum ice_dcf_tm_node_type parent_node_type = ICE_DCF_TM_NODE_TYPE_MAX;
+	enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX;
+	struct ice_dcf_tm_shaper_profile *shaper_profile = NULL;
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_node *parent_node;
+	struct ice_dcf_tm_node *tm_node;
+	uint16_t tc_nb = 1;
+	int i, ret;
+
+	if (!params || !error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (hw->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	ret = ice_dcf_node_param_check(hw, node_id, priority, weight,
+				   params, error);
+	if (ret)
+		return ret;
+
+	for (i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+		if (hw->ets_config->tc_valid_bits & (1 << i))
+			tc_nb++;
+	}
+
+	/* check if the node is already existed */
+	if (ice_dcf_tm_node_search(dev, node_id, &node_type)) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "node id already used";
+		return -EINVAL;
+	}
+
+	/* check the shaper profile id */
+	if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+		shaper_profile = ice_dcf_shaper_profile_search(dev,
+			params->shaper_profile_id);
+		if (!shaper_profile) {
+			error->type =
+				RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+			error->message = "shaper profile not exist";
+			return -EINVAL;
+		}
+	}
+
+	/* add root node if not have a parent */
+	if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+		/* check level */
+		if (level_id != ICE_DCF_TM_NODE_TYPE_PORT) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+			error->message = "Wrong level";
+			return -EINVAL;
+		}
+
+		/* obviously no more than one root */
+		if (hw->tm_conf.root) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+			error->message = "already have a root";
+			return -EINVAL;
+		}
+
+		/* add the root node */
+		tm_node = rte_zmalloc("ice_dcf_tm_node",
+				      sizeof(struct ice_dcf_tm_node),
+				      0);
+		if (!tm_node)
+			return -ENOMEM;
+		tm_node->id = node_id;
+		tm_node->parent = NULL;
+		tm_node->reference_count = 0;
+		rte_memcpy(&tm_node->params, params,
+				 sizeof(struct rte_tm_node_params));
+		hw->tm_conf.root = tm_node;
+
+		return 0;
+	}
+
+	/* TC or vsi node */
+	/* check the parent node */
+	parent_node = ice_dcf_tm_node_search(dev, parent_node_id,
+					  &parent_node_type);
+	if (!parent_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent not exist";
+		return -EINVAL;
+	}
+	if (parent_node_type != ICE_DCF_TM_NODE_TYPE_PORT &&
+	    parent_node_type != ICE_DCF_TM_NODE_TYPE_TC) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+		error->message = "parent is not port or TC";
+		return -EINVAL;
+	}
+	/* check level */
+	if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+	    level_id != parent_node_type + 1) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+		error->message = "Wrong level";
+		return -EINVAL;
+	}
+
+	/* check the TC node number */
+	if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
+		/* check the TC number */
+		if (hw->tm_conf.nb_tc_node >= tc_nb) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many TCs";
+			return -EINVAL;
+		}
+	} else {
+		/* check the vsi node number */
+		if (parent_node->reference_count >= hw->num_vfs) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too many VSI for one TC";
+			return -EINVAL;
+		}
+		/* check the vsi node id */
+		if (node_id > tc_nb * hw->num_vfs) {
+			error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+			error->message = "too large VSI id";
+			return -EINVAL;
+		}
+	}
+
+	/* add the TC or vsi node */
+	tm_node = rte_zmalloc("ice_dcf_tm_node",
+			      sizeof(struct ice_dcf_tm_node),
+			      0);
+	if (!tm_node)
+		return -ENOMEM;
+	tm_node->id = node_id;
+	tm_node->priority = priority;
+	tm_node->weight = weight;
+	tm_node->shaper_profile = shaper_profile;
+	tm_node->reference_count = 0;
+	tm_node->parent = parent_node;
+	rte_memcpy(&tm_node->params, params,
+			 sizeof(struct rte_tm_node_params));
+	if (parent_node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
+		TAILQ_INSERT_TAIL(&hw->tm_conf.tc_list,
+				  tm_node, node);
+		tm_node->tc = hw->tm_conf.nb_tc_node;
+		hw->tm_conf.nb_tc_node++;
+	} else {
+		TAILQ_INSERT_TAIL(&hw->tm_conf.vsi_list,
+				  tm_node, node);
+		tm_node->tc = parent_node->tc;
+		hw->tm_conf.nb_vsi_node++;
+	}
+	tm_node->parent->reference_count++;
+
+	/* increase the reference counter of the shaper profile */
+	if (shaper_profile)
+		shaper_profile->reference_count++;
+
+	return 0;
+}
+
+static int
+ice_dcf_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+		 struct rte_tm_error *error)
+{
+	enum ice_dcf_tm_node_type node_type = ICE_DCF_TM_NODE_TYPE_MAX;
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_node *tm_node;
+
+	if (!error)
+		return -EINVAL;
+
+	/* if already committed */
+	if (hw->tm_conf.committed) {
+		error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+		error->message = "already committed";
+		return -EINVAL;
+	}
+
+	if (node_id == RTE_TM_NODE_ID_NULL) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "invalid node id";
+		return -EINVAL;
+	}
+
+	/* check if the node id exists */
+	tm_node = ice_dcf_tm_node_search(dev, node_id, &node_type);
+	if (!tm_node) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message = "no such node";
+		return -EINVAL;
+	}
+
+	/* the node should have no child */
+	if (tm_node->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+		error->message =
+			"cannot delete a node which has children";
+		return -EINVAL;
+	}
+
+	/* root node */
+	if (node_type == ICE_DCF_TM_NODE_TYPE_PORT) {
+		if (tm_node->shaper_profile)
+			tm_node->shaper_profile->reference_count--;
+		rte_free(tm_node);
+		hw->tm_conf.root = NULL;
+		return 0;
+	}
+
+	/* TC or VSI node */
+	if (tm_node->shaper_profile)
+		tm_node->shaper_profile->reference_count--;
+	tm_node->parent->reference_count--;
+	if (node_type == ICE_DCF_TM_NODE_TYPE_TC) {
+		TAILQ_REMOVE(&hw->tm_conf.tc_list, tm_node, node);
+		hw->tm_conf.nb_tc_node--;
+	} else {
+		TAILQ_REMOVE(&hw->tm_conf.vsi_list, tm_node, node);
+		hw->tm_conf.nb_vsi_node--;
+	}
+	rte_free(tm_node);
+
+	return 0;
+}
+
+static int
+ice_dcf_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+				struct rte_tm_error *error)
+{
+	/* min bucket size not supported */
+	if (profile->committed.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+		error->message = "committed bucket size not supported";
+		return -EINVAL;
+	}
+	/* max bucket size not supported */
+	if (profile->peak.size) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+		error->message = "peak bucket size not supported";
+		return -EINVAL;
+	}
+	/* length adjustment not supported */
+	if (profile->pkt_length_adjust) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+		error->message = "packet length adjustment not supported";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+ice_dcf_shaper_profile_add(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_shaper_params *profile,
+			struct rte_tm_error *error)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_shaper_profile *shaper_profile;
+	int ret;
+
+	if (!profile || !error)
+		return -EINVAL;
+
+	ret = ice_dcf_shaper_profile_param_check(profile, error);
+	if (ret)
+		return ret;
+
+	shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID exist";
+		return -EINVAL;
+	}
+
+	shaper_profile = rte_zmalloc("ice_dcf_tm_shaper_profile",
+				     sizeof(struct ice_dcf_tm_shaper_profile),
+				     0);
+	if (!shaper_profile)
+		return -ENOMEM;
+	shaper_profile->shaper_profile_id = shaper_profile_id;
+	rte_memcpy(&shaper_profile->profile, profile,
+			 sizeof(struct rte_tm_shaper_params));
+	TAILQ_INSERT_TAIL(&hw->tm_conf.shaper_profile_list,
+			  shaper_profile, node);
+
+	return 0;
+}
+
+static int
+ice_dcf_shaper_profile_del(struct rte_eth_dev *dev,
+			uint32_t shaper_profile_id,
+			struct rte_tm_error *error)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct ice_dcf_tm_shaper_profile *shaper_profile;
+
+	if (!error)
+		return -EINVAL;
+
+	shaper_profile = ice_dcf_shaper_profile_search(dev, shaper_profile_id);
+
+	if (!shaper_profile) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+		error->message = "profile ID not exist";
+		return -EINVAL;
+	}
+
+	/* don't delete a profile if it's used by one or several nodes */
+	if (shaper_profile->reference_count) {
+		error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+		error->message = "profile in use";
+		return -EINVAL;
+	}
+
+	TAILQ_REMOVE(&hw->tm_conf.shaper_profile_list, shaper_profile, node);
+	rte_free(shaper_profile);
+
+	return 0;
+}
+
+static int
+ice_dcf_set_vf_bw(struct ice_dcf_hw *hw,
+			struct virtchnl_dcf_bw_cfg_list *vf_bw,
+			uint16_t len)
+{
+	struct dcf_virtchnl_cmd args;
+	int err;
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_DCF_CONFIG_BW;
+	args.req_msg = (uint8_t *)vf_bw;
+	args.req_msglen  = len;
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+			    "VIRTCHNL_OP_DCF_CONFIG_BW");
+	return err;
+}
+
+static int
+ice_dcf_validate_tc_bw(struct virtchnl_dcf_bw_cfg_list *tc_bw,
+			uint32_t port_bw)
+{
+	struct virtchnl_dcf_bw_cfg *cfg;
+	bool lowest_cir_mark = false;
+	u32 total_peak, rest_peak;
+	u32 committed, peak;
+	int i;
+
+	total_peak = 0;
+	for (i = 0; i < tc_bw->num_elem; i++)
+		total_peak += tc_bw->cfg[i].shaper.peak;
+
+	for (i = 0; i < tc_bw->num_elem; i++) {
+		cfg = &tc_bw->cfg[i];
+		peak = cfg->shaper.peak;
+		committed = cfg->shaper.committed;
+		rest_peak = total_peak - peak;
+
+		if (lowest_cir_mark && peak == 0) {
+			PMD_DRV_LOG(ERR, "Max bandwidth must be configured for TC%u",
+				cfg->tc_num);
+			return -EINVAL;
+		}
+
+		if (!lowest_cir_mark && committed)
+			lowest_cir_mark = true;
+
+		if (committed && committed + rest_peak > port_bw) {
+			PMD_DRV_LOG(ERR, "Total value of TC%u min bandwidth and other TCs' max bandwidth %ukbps should be less than port link speed %ukbps",
+				cfg->tc_num, committed + rest_peak, port_bw);
+			return -EINVAL;
+		}
+
+		if (committed && committed < ICE_SCHED_MIN_BW) {
+			PMD_DRV_LOG(ERR, "If TC%u min Tx bandwidth is set, it cannot be less than 500Kbps",
+				cfg->tc_num);
+			return -EINVAL;
+		}
+
+		if (peak && committed > peak) {
+			PMD_DRV_LOG(ERR, "TC%u Min Tx bandwidth cannot be greater than max Tx bandwidth",
+				cfg->tc_num);
+			return -EINVAL;
+		}
+
+		if (peak > port_bw) {
+			PMD_DRV_LOG(ERR, "TC%u max Tx bandwidth %uKbps is greater than current link speed %uKbps",
+				cfg->tc_num, peak, port_bw);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+static int ice_dcf_hierarchy_commit(struct rte_eth_dev *dev,
+				 int clear_on_fail,
+				 __rte_unused struct rte_tm_error *error)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	struct virtchnl_dcf_bw_cfg_list *vf_bw;
+	struct virtchnl_dcf_bw_cfg_list *tc_bw;
+	struct ice_dcf_tm_node_list *vsi_list = &hw->tm_conf.vsi_list;
+	struct rte_tm_shaper_params *profile;
+	struct ice_dcf_tm_node *tm_node;
+	uint32_t port_bw, cir_total;
+	uint16_t size, vf_id;
+	uint8_t num_elem = 0;
+	int i, ret_val = ICE_SUCCESS;
+
+	/* check if all TC nodes are set */
+	if (BIT(hw->tm_conf.nb_tc_node) & hw->ets_config->tc_valid_bits) {
+		PMD_DRV_LOG(ERR, "Not all enabled TC nodes are set");
+		ret_val = ICE_ERR_PARAM;
+		goto fail_clear;
+	}
+
+	size = sizeof(struct virtchnl_dcf_bw_cfg_list) +
+		sizeof(struct virtchnl_dcf_bw_cfg) *
+		(hw->tm_conf.nb_tc_node - 1);
+	vf_bw = rte_zmalloc("vf_bw", size, 0);
+	if (!vf_bw) {
+		ret_val = ICE_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+	tc_bw = rte_zmalloc("tc_bw", size, 0);
+	if (!tc_bw) {
+		ret_val = ICE_ERR_NO_MEMORY;
+		goto fail_clear;
+	}
+
+	/* port bandwidth (Kbps) */
+	port_bw = hw->link_speed * 1000;
+	cir_total = 0;
+
+	/* init tc bw configuration */
+#define ICE_DCF_SCHED_TC_NODE 0xffff
+	tc_bw->vf_id = ICE_DCF_SCHED_TC_NODE;
+	tc_bw->node_type = VIRTCHNL_DCF_TARGET_TC_BW;
+	tc_bw->num_elem = hw->tm_conf.nb_tc_node;
+	for (i = 0; i < tc_bw->num_elem; i++) {
+		tc_bw->cfg[i].tc_num = i;
+		tc_bw->cfg[i].type = VIRTCHNL_BW_SHAPER;
+		tc_bw->cfg[i].bw_type |=
+			VIRTCHNL_DCF_BW_PIR | VIRTCHNL_DCF_BW_CIR;
+	}
+
+	for (vf_id = 0; vf_id < hw->num_vfs; vf_id++) {
+		num_elem = 0;
+		vf_bw->vf_id = vf_id;
+		vf_bw->node_type = VIRTCHNL_DCF_TARGET_VF_BW;
+		TAILQ_FOREACH(tm_node, vsi_list, node) {
+			/* scan the nodes belong to one VSI */
+			if (tm_node->id - hw->num_vfs * tm_node->tc != vf_id)
+				continue;
+			vf_bw->cfg[num_elem].tc_num = tm_node->tc;
+			vf_bw->cfg[num_elem].type = VIRTCHNL_BW_SHAPER;
+			if (tm_node->shaper_profile) {
+				/* Transfer from Byte per seconds to Kbps */
+				profile = &tm_node->shaper_profile->profile;
+				vf_bw->cfg[num_elem].shaper.peak =
+				profile->peak.rate / 1000 * BITS_PER_BYTE;
+				vf_bw->cfg[num_elem].shaper.committed =
+				profile->committed.rate / 1000 * BITS_PER_BYTE;
+				vf_bw->cfg[num_elem].bw_type |=
+					VIRTCHNL_DCF_BW_PIR |
+					VIRTCHNL_DCF_BW_CIR;
+			}
+
+			/* update tc node bw configuration */
+			tc_bw->cfg[tm_node->tc].shaper.peak +=
+				vf_bw->cfg[num_elem].shaper.peak;
+			tc_bw->cfg[tm_node->tc].shaper.committed +=
+				vf_bw->cfg[num_elem].shaper.committed;
+
+			cir_total += vf_bw->cfg[num_elem].shaper.committed;
+			num_elem++;
+		}
+
+		/* check if all TC nodes are set with VF vsi nodes */
+		if (num_elem != hw->tm_conf.nb_tc_node) {
+			PMD_DRV_LOG(ERR, "VF%u vsi nodes are not set to all TC nodes",
+				    vf_id);
+			ret_val = ICE_ERR_PARAM;
+			goto fail_clear;
+		}
+
+		vf_bw->num_elem = num_elem;
+		ret_val = ice_dcf_set_vf_bw(hw, vf_bw, size);
+		if (ret_val)
+			goto fail_clear;
+		memset(vf_bw, 0, size);
+	}
+
+	/* check if total CIR is larger than port bandwidth */
+	if (cir_total > port_bw) {
+		PMD_DRV_LOG(ERR, "Total CIR of all VFs is larger than port bandwidth");
+		ret_val = ICE_ERR_PARAM;
+		goto fail_clear;
+	}
+
+	/* check and commit tc node bw configuration */
+	ret_val = ice_dcf_validate_tc_bw(tc_bw, port_bw);
+	if (ret_val)
+		goto fail_clear;
+	ret_val = ice_dcf_set_vf_bw(hw, tc_bw, size);
+	if (ret_val)
+		goto fail_clear;
+
+	hw->tm_conf.committed = true;
+	return ret_val;
+
+fail_clear:
+	/* clear all the traffic manager configuration */
+	if (clear_on_fail) {
+		ice_dcf_tm_conf_uninit(dev);
+		ice_dcf_tm_conf_init(dev);
+	}
+	return ret_val;
+}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 65750d3501..0b86d74a49 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -70,6 +70,7 @@ endif
 sources += files('ice_dcf.c',
          'ice_dcf_vf_representor.c',
          'ice_dcf_ethdev.c',
-         'ice_dcf_parent.c')
+         'ice_dcf_parent.c',
+	 'ice_dcf_sched.c')
 
 headers = files('rte_pmd_ice.h')
-- 
2.17.1


  parent reply	other threads:[~2021-06-30  6:51 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-01  1:40 [dpdk-dev] [PATCH v1 0/5] Enable ETS-based Tx QoS for VF in DCF Ting Xu
2021-06-01  1:40 ` [dpdk-dev] [PATCH v1 1/5] common/iavf: add support for ETS-based Tx QoS Ting Xu
2021-06-01  1:40 ` [dpdk-dev] [PATCH v1 2/5] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-06-01  1:40 ` [dpdk-dev] [PATCH v1 3/5] net/ice: support DCF link status event handling Ting Xu
2021-06-01  1:40 ` [dpdk-dev] [PATCH v1 4/5] net/ice: support QoS config VF bandwidth in DCF Ting Xu
2021-06-01  1:40 ` [dpdk-dev] [PATCH v1 5/5] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-06-17 10:17 ` [dpdk-dev] [PATCH v2 0/5] Enable ETS-based Tx QoS for VF in DCF Ting Xu
2021-06-17 10:17   ` [dpdk-dev] [PATCH v2 1/5] common/iavf: support ETS-based QoS offload configuration Ting Xu
2021-06-17 10:17   ` [dpdk-dev] [PATCH v2 2/5] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-06-17 10:17   ` [dpdk-dev] [PATCH v2 3/5] net/ice: support DCF link status event handling Ting Xu
2021-06-17 10:17   ` [dpdk-dev] [PATCH v2 4/5] net/ice: support QoS config VF bandwidth in DCF Ting Xu
2021-06-17 10:17   ` [dpdk-dev] [PATCH v2 5/5] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-06-25  9:31 ` [dpdk-dev] [PATCH v3 0/5] Enable ETS-based Tx QoS for VF in DCF Ting Xu
2021-06-25  9:31   ` [dpdk-dev] [PATCH v3 1/5] common/iavf: support ETS-based QoS offload configuration Ting Xu
2021-06-25  9:31   ` [dpdk-dev] [PATCH v3 2/5] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-06-25  9:31   ` [dpdk-dev] [PATCH v3 3/5] net/ice: support DCF link status event handling Ting Xu
2021-06-25  9:31   ` [dpdk-dev] [PATCH v3 4/5] net/ice: support QoS config VF bandwidth in DCF Ting Xu
2021-06-25  9:31   ` [dpdk-dev] [PATCH v3 5/5] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-06-30  6:53 ` [dpdk-dev] [PATCH v4 0/7] Enable ETS-based Tx QoS for VF in DCF Ting Xu
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 1/7] common/iavf: support ETS-based QoS offload configuration Ting Xu
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 2/7] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 3/7] net/ice: support DCF link status event handling Ting Xu
2021-06-30  6:53   ` Ting Xu [this message]
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 5/7] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 6/7] net/iavf: check Tx packet with correct UP and queue Ting Xu
2021-06-30  6:53   ` [dpdk-dev] [PATCH v4 7/7] doc: release note for ETS-based Tx QoS Ting Xu
2021-07-01 10:20 ` [dpdk-dev] [PATCH v5 0/7] Enable ETS-based Tx QoS for VF in DCF Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 1/7] common/iavf: support ETS-based QoS offload configuration Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 2/7] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 3/7] net/ice: support DCF link status event handling Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 4/7] net/ice: support QoS config VF bandwidth in DCF Ting Xu
2021-07-07  9:23     ` Thomas Monjalon
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 5/7] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 6/7] net/iavf: check Tx packet with correct UP and queue Ting Xu
2021-07-01 10:20   ` [dpdk-dev] [PATCH v5 7/7] doc: release note for ETS-based Tx QoS Ting Xu
2021-07-02  3:00   ` [dpdk-dev] [PATCH v5 0/7] Enable ETS-based Tx QoS for VF in DCF Zhang, Qi Z
2021-07-01 11:41 ` Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 1/7] common/iavf: support ETS-based QoS offload configuration Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 2/7] net/ice/base: support DCF query port ETS adminq Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 3/7] net/ice: support DCF link status event handling Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 4/7] net/ice: support QoS config VF bandwidth in DCF Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 5/7] net/iavf: query QoS cap and set queue TC mapping Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 6/7] net/iavf: check Tx packet with correct UP and queue Ting Xu
2021-07-01 11:41   ` [dpdk-dev] [PATCH v5 7/7] doc: release note for ETS-based Tx QoS Ting Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210630065344.50352-5-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.