All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Wilczynski <michal.wilczynski@intel.com>
To: netdev@vger.kernel.org
Cc: alexandr.lobakin@intel.com, dchumak@nvidia.com,
	maximmi@nvidia.com, jiri@resnulli.us, simon.horman@corigine.com,
	jacob.e.keller@intel.com, jesse.brandeburg@intel.com,
	przemyslaw.kitszel@intel.com,
	Michal Wilczynski <michal.wilczynski@intel.com>
Subject: [RFC PATCH net-next v4 4/6] ice: Implement devlink-rate API
Date: Thu, 15 Sep 2022 15:42:37 +0200	[thread overview]
Message-ID: <20220915134239.1935604-5-michal.wilczynski@intel.com> (raw)
In-Reply-To: <20220915134239.1935604-1-michal.wilczynski@intel.com>

There is a need to support modification of Tx scheduler topology, in the
ice driver. This will allow user to control Tx settings of each node in
the internal hierarchy of nodes. A number of parameters is supported per
each node: tx_max, tx_share, tx_priority and tx_weight.

Signed-off-by: Michal Wilczynski <michal.wilczynski@intel.com>
---
 drivers/net/ethernet/intel/ice/ice_devlink.c | 511 +++++++++++++++++++
 drivers/net/ethernet/intel/ice/ice_devlink.h |   2 +
 2 files changed, 513 insertions(+)

diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index e6ec20079ced..925283605b59 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -713,6 +713,490 @@ ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
 	return ice_devlink_port_split(devlink, port, 1, extack);
 }
 
+/**
+ * ice_traverse_tx_tree - traverse Tx scheduler tree
+ * @devlink: devlink struct
+ * @node: current node, used for recursion
+ * @tc_node: tc_node struct, that is treated as a root
+ * @pf: pf struct
+ *
+ * This function traverses Tx scheduler tree and exports
+ * entire structure to the devlink-rate.
+ */
+static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
+				 struct ice_sched_node *tc_node, struct ice_pf *pf)
+{
+	struct ice_vf *vf;
+	int i;
+
+	devl_lock(devlink);
+
+	if (node->parent == tc_node) {
+		/* create root node */
+		devl_rate_node_create(devlink, node, node->name, NULL);
+	} else if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF &&
+		   node->parent->name) {
+		devl_rate_queue_create(devlink, node->parent->name, node->tx_queue_id, node);
+	} else if (node->vsi_handle &&
+		   pf->vsi[node->vsi_handle]->vf) {
+		vf = pf->vsi[node->vsi_handle]->vf;
+		snprintf(node->name, DEVLINK_RATE_NAME_MAX_LEN, "vport_%u", vf->devlink_port.index);
+		if (!vf->devlink_port.devlink_rate)
+			devl_rate_vport_create(&vf->devlink_port, node, node->parent->name);
+	} else {
+		devl_rate_node_create(devlink, node, node->name, node->parent->name);
+	}
+
+	devl_unlock(devlink);
+
+	for (i = 0; i < node->num_children; i++)
+		ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
+}
+
+/**
+ * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
+ * @devlink: devlink struct
+ * @vsi: main vsi struct
+ *
+ * This function finds a root node, then calls ice_traverse_tx tree, which
+ * traverses the tree and export it's contents to devlink rate.
+ */
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
+{
+	struct ice_port_info *pi = vsi->port_info;
+	struct ice_sched_node *tc_node;
+	struct ice_pf *pf = vsi->back;
+	int i;
+
+	tc_node = pi->root->children[0];
+	mutex_lock(&pi->sched_lock);
+	for (i = 0; i < tc_node->num_children; i++)
+		ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
+	mutex_unlock(&pi->sched_lock);
+
+	return 0;
+}
+
+/**
+ * ice_set_object_tx_share - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MIN_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
+				   struct netlink_ext_ack *extack)
+{
+	int status;
+
+	mutex_lock(&pi->sched_lock);
+	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
+	mutex_unlock(&pi->sched_lock);
+
+	if (status)
+		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
+
+	return status;
+}
+
+/**
+ * ice_set_object_tx_max - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MAX_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
+				 struct netlink_ext_ack *extack)
+{
+	int status;
+
+	mutex_lock(&pi->sched_lock);
+	status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
+	mutex_unlock(&pi->sched_lock);
+
+	if (status)
+		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
+
+	return status;
+}
+
+/**
+ * ice_set_object_tx_priority - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @extack: extended netdev ack structure
+ *
+ * This function sets priority of node among siblings.
+ */
+static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+				      struct netlink_ext_ack *extack)
+{
+	int status;
+
+	mutex_lock(&pi->sched_lock);
+	status = ice_sched_set_node_priority(pi, node, node->tx_priority);
+	mutex_unlock(&pi->sched_lock);
+
+	if (status)
+		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
+
+	return status;
+}
+
+/**
+ * ice_set_object_tx_weight - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @extack: extended netdev ack structure
+ *
+ * This function sets node weight for WFQ algorithm.
+ */
+static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
+				    struct netlink_ext_ack *extack)
+{
+	int status;
+
+	mutex_lock(&pi->sched_lock);
+	status = ice_sched_set_node_weight(pi, node, node->tx_weight);
+	mutex_unlock(&pi->sched_lock);
+
+	if (status)
+		NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
+
+	return status;
+}
+
+/**
+ * ice_get_pi_from_dev_rate - get port info from devlink_rate
+ * @rate_node: devlink struct instance
+ *
+ * This function returns corresponding port_info struct of devlink_rate
+ */
+static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
+{
+	struct ice_pf *pf = devlink_priv(rate_node->devlink);
+
+	return ice_get_main_vsi(pf)->port_info;
+}
+
+static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
+				     struct netlink_ext_ack *extack)
+{
+	return 0;
+}
+
+static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
+				     struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node, *tc_node;
+	struct ice_port_info *pi;
+
+	pi = ice_get_pi_from_dev_rate(rate_node);
+	tc_node = pi->root->children[0];
+	node = priv;
+
+	if (!rate_node->parent || !node || tc_node == node)
+		return 0;
+
+	if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
+		NL_SET_ERR_MSG_MOD(extack, "Queue can't be deleted");
+		return -EINVAL;
+	}
+	mutex_lock(&pi->sched_lock);
+	ice_free_sched_node(pi, node);
+	mutex_unlock(&pi->sched_lock);
+
+	return 0;
+}
+
+static int ice_devlink_rate_vport_tx_max_set(struct devlink_rate *rate_vport, void *priv,
+					     u64 tx_max, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_max = tx_max / 10;
+
+	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_vport), node, extack);
+}
+
+static int ice_devlink_rate_vport_tx_share_set(struct devlink_rate *rate_vport, void *priv,
+					       u64 tx_share, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_share = tx_share / 10;
+
+	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_vport), node, extack);
+}
+
+static int ice_devlink_rate_vport_tx_priority_set(struct devlink_rate *rate_vport, void *priv,
+						  u64 tx_priority, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_priority = tx_priority;
+
+	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_vport), node, extack);
+}
+
+static int ice_devlink_rate_vport_tx_weight_set(struct devlink_rate *rate_vport, void *priv,
+						u64 tx_weight, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_weight = tx_weight;
+
+	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_vport), node, extack);
+}
+
+static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
+					    u64 tx_max, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_max = tx_max / 10;
+
+	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node), node, extack);
+}
+
+static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
+					      u64 tx_share, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_share = tx_share / 10;
+
+	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node), node, extack);
+}
+
+static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv,
+						 u64 tx_priority, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_priority = tx_priority;
+
+	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node), node, extack);
+}
+
+static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv,
+					       u64 tx_weight, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_weight = tx_weight;
+
+	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node), node, extack);
+}
+
+static int ice_devlink_rate_queue_tx_max_set(struct devlink_rate *rate_queue, void *priv,
+					     u64 tx_max, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_max = tx_max / 10;
+
+	return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_queue), node, extack);
+}
+
+static int ice_devlink_rate_queue_tx_share_set(struct devlink_rate *rate_queue, void *priv,
+					       u64 tx_share, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_share = tx_share / 10;
+
+	return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_queue), node, extack);
+}
+
+static int ice_devlink_rate_queue_tx_priority_set(struct devlink_rate *rate_queue, void *priv,
+						  u64 tx_priority, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_priority = tx_priority;
+
+	return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_queue), node, extack);
+}
+
+static int ice_devlink_rate_queue_tx_weight_set(struct devlink_rate *rate_queue, void *priv,
+						u64 tx_weight, struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node = priv;
+
+	if (!node)
+		return 0;
+
+	node->tx_weight = tx_weight;
+
+	return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_queue), node, extack);
+}
+
+static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
+				  struct devlink_rate *parent,
+				  void **priv, void *parent_priv,
+				  struct netlink_ext_ack *extack)
+{
+	struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate);
+	struct ice_sched_node *tc_node, *node, *parent_node;
+	u16 num_nodes_added;
+	u32 first_node_teid;
+	u32 node_teid;
+	int status;
+
+	tc_node = pi->root->children[0];
+	node = *priv;
+
+	if (!parent) {
+		if (!node || tc_node == node ||
+		    node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
+			return -EINVAL;
+		}
+
+		mutex_lock(&pi->sched_lock);
+		ice_free_sched_node(pi, node);
+		mutex_unlock(&pi->sched_lock);
+
+		return 0;
+	}
+
+	parent_node = parent_priv;
+
+	/* if the node doesn't exist, create it */
+	if (!node) {
+		mutex_lock(&pi->sched_lock);
+
+		status = ice_sched_add_elems(pi, tc_node, parent_node,
+					     parent_node->tx_sched_layer + 1,
+					     1, &num_nodes_added, &first_node_teid);
+
+		mutex_unlock(&pi->sched_lock);
+
+		if (status) {
+			NL_SET_ERR_MSG_MOD(extack, "Can't add a new node");
+			return status;
+		}
+
+		node = ice_sched_find_node_by_teid(parent_node, first_node_teid);
+		*priv = node;
+
+		if (devlink_rate->tx_share) {
+			node->tx_share = devlink_rate->tx_share;
+			ice_set_object_tx_share(pi, node, extack);
+		}
+		if (devlink_rate->tx_max) {
+			node->tx_max = devlink_rate->tx_max;
+			ice_set_object_tx_max(pi, node, extack);
+		}
+		if (devlink_rate->tx_priority) {
+			node->tx_priority = devlink_rate->tx_priority;
+			ice_set_object_tx_priority(pi, node, extack);
+		}
+		if (devlink_rate->tx_weight) {
+			node->tx_weight = devlink_rate->tx_weight;
+			ice_set_object_tx_weight(pi, node, extack);
+		}
+	} else {
+		node_teid = le32_to_cpu(node->info.node_teid);
+		mutex_lock(&pi->sched_lock);
+		status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid);
+		mutex_unlock(&pi->sched_lock);
+
+		if (status)
+			NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent");
+	}
+
+	return status;
+}
+
+static int
+ice_devlink_reassign_queue(struct ice_port_info *pi, struct ice_sched_node *queue_node,
+			   struct ice_sched_node *src_node, struct ice_sched_node *dst_node)
+{
+	struct ice_aqc_move_txqs_data *buf;
+	struct ice_hw *hw = pi->hw;
+	u32 blocked_cgds;
+	u8 txqs_moved;
+	u16 buf_size;
+	int status;
+
+	buf_size = struct_size(buf, txqs, 1);
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf->src_teid = src_node->info.node_teid;
+	buf->dest_teid = dst_node->info.node_teid;
+	buf->txqs[0].txq_id = queue_node->tx_queue_id;
+	buf->txqs[0].q_cgd = 0;
+	buf->txqs[0].q_teid = queue_node->info.node_teid;
+
+	status = ice_aq_move_recfg_lan_txq(hw, 1, true, false, false, false, 50,
+					   &blocked_cgds, buf, buf_size, &txqs_moved, NULL);
+	if (!status)
+		ice_sched_update_parent(dst_node, queue_node);
+
+	kfree(buf);
+
+	return status;
+}
+
+static int ice_devlink_rate_queue_parent_set(struct devlink_rate *devlink_rate,
+					     struct devlink_rate *parent,
+					     void **priv, void *parent_priv,
+					     struct netlink_ext_ack *extack)
+{
+	struct ice_sched_node *node, *prev_parent, *next_parent;
+	struct ice_port_info *pi;
+
+	if (!parent)
+		return -EINVAL;
+
+	pi = ice_get_pi_from_dev_rate(devlink_rate);
+
+	node = *priv;
+	next_parent = parent_priv;
+	prev_parent = node->parent;
+
+	return ice_devlink_reassign_queue(pi, node, prev_parent, next_parent);
+}
+
 static const struct devlink_ops ice_devlink_ops = {
 	.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
 	.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
@@ -725,6 +1209,28 @@ static const struct devlink_ops ice_devlink_ops = {
 	.eswitch_mode_set = ice_eswitch_mode_set,
 	.info_get = ice_devlink_info_get,
 	.flash_update = ice_devlink_flash_update,
+
+	.rate_node_new = ice_devlink_rate_node_new,
+	.rate_node_del = ice_devlink_rate_node_del,
+
+	.rate_vport_tx_max_set = ice_devlink_rate_vport_tx_max_set,
+	.rate_vport_tx_share_set = ice_devlink_rate_vport_tx_share_set,
+	.rate_vport_tx_priority_set = ice_devlink_rate_vport_tx_priority_set,
+	.rate_vport_tx_weight_set = ice_devlink_rate_vport_tx_weight_set,
+
+	.rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set,
+	.rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set,
+	.rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set,
+	.rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set,
+
+	.rate_queue_tx_max_set = ice_devlink_rate_queue_tx_max_set,
+	.rate_queue_tx_share_set = ice_devlink_rate_queue_tx_share_set,
+	.rate_queue_tx_priority_set = ice_devlink_rate_queue_tx_priority_set,
+	.rate_queue_tx_weight_set = ice_devlink_rate_queue_tx_weight_set,
+
+	.rate_vport_parent_set = ice_devlink_set_parent,
+	.rate_node_parent_set = ice_devlink_set_parent,
+	.rate_queue_parent_set = ice_devlink_rate_queue_parent_set,
 };
 
 static int
@@ -893,6 +1399,9 @@ void ice_devlink_register(struct ice_pf *pf)
  */
 void ice_devlink_unregister(struct ice_pf *pf)
 {
+	devl_lock(priv_to_devlink(pf));
+	devl_rate_objects_destroy(priv_to_devlink(pf));
+	devl_unlock(priv_to_devlink(pf));
 	devlink_unregister(priv_to_devlink(pf));
 }
 
@@ -1098,6 +1607,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
 
 	devlink_port = &vf->devlink_port;
 
+	devl_rate_vport_destroy(devlink_port);
+
 	devlink_port_type_clear(devlink_port);
 	devlink_port_unregister(devlink_port);
 }
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index fe006d9946f8..8bfed9ee2c4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -18,4 +18,6 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf);
 void ice_devlink_init_regions(struct ice_pf *pf);
 void ice_devlink_destroy_regions(struct ice_pf *pf);
 
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
+
 #endif /* _ICE_DEVLINK_H_ */
-- 
2.37.2


  parent reply	other threads:[~2022-09-15 13:44 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-15 13:42 [RFC PATCH net-next v4 0/6] Implement devlink-rate API and extend it Michal Wilczynski
2022-09-15 13:42 ` [RFC PATCH net-next v4 1/6] ice: Add function for move/reconfigure TxQ AQ command Michal Wilczynski
2022-09-15 13:42 ` [RFC PATCH net-next v4 2/6] devlink: Extend devlink-rate api with queues and new parameters Michal Wilczynski
2022-09-15 15:31   ` Edward Cree
2022-09-15 18:41     ` Wilczynski, Michal
2022-09-15 21:01       ` Edward Cree
2022-09-19 13:12         ` Wilczynski, Michal
2022-09-20 11:09           ` Edward Cree
2022-09-26 11:58             ` Jiri Pirko
2022-09-28 11:53               ` Wilczynski, Michal
2022-09-29  7:08                 ` Jiri Pirko
2022-09-21 23:33       ` Jakub Kicinski
2022-09-22 11:44         ` Wilczynski, Michal
2022-09-22 12:50           ` Jakub Kicinski
2022-09-22 13:45             ` Wilczynski, Michal
2022-09-22 20:29               ` Jakub Kicinski
2022-09-23 12:11                 ` Wilczynski, Michal
2022-09-23 13:16                   ` Jakub Kicinski
2022-09-23 15:46                     ` Wilczynski, Michal
2022-09-27  0:16                       ` Jakub Kicinski
2022-09-28 12:02                         ` Wilczynski, Michal
2022-09-28 17:39                           ` Jakub Kicinski
2022-09-26 11:51       ` Jiri Pirko
2022-09-28 11:47         ` Wilczynski, Michal
2022-09-29  7:12           ` Jiri Pirko
2022-10-11 13:28             ` Wilczynski, Michal
2022-10-11 14:17               ` Jiri Pirko
2022-09-15 13:42 ` [RFC PATCH net-next v4 3/6] ice: Introduce new parameters in ice_sched_node Michal Wilczynski
2022-09-15 13:42 ` Michal Wilczynski [this message]
2022-09-22 13:08   ` [RFC PATCH net-next v4 4/6] ice: Implement devlink-rate API Przemek Kitszel
2022-09-15 13:42 ` [RFC PATCH net-next v4 5/6] ice: Export Tx scheduler configuration to devlink-rate Michal Wilczynski
2022-09-15 13:42 ` [RFC PATCH net-next v4 6/6] ice: Prevent ADQ, DCB and RDMA coexistence with Custom Tx scheduler Michal Wilczynski
2022-09-15 13:57 ` [RFC PATCH net-next v4 0/6] Implement devlink-rate API and extend it Wilczynski, Michal
2022-09-19  7:22 [RFC PATCH net-next v4 2/6] devlink: Extend devlink-rate api with queues and new parameters kernel test robot
2022-09-19  9:22 ` Dan Carpenter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220915134239.1935604-5-michal.wilczynski@intel.com \
    --to=michal.wilczynski@intel.com \
    --cc=alexandr.lobakin@intel.com \
    --cc=dchumak@nvidia.com \
    --cc=jacob.e.keller@intel.com \
    --cc=jesse.brandeburg@intel.com \
    --cc=jiri@resnulli.us \
    --cc=maximmi@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=przemyslaw.kitszel@intel.com \
    --cc=simon.horman@corigine.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.