netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Simon Horman <simon.horman@corigine.com>
To: Dave Ertman <david.m.ertman@intel.com>
Cc: intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org,
	daniel.machon@microchip.com
Subject: Re: [PATCH iwl-next v3 06/10] ice: Flesh out implementation of support for SRIOV on bonded interface
Date: Fri, 9 Jun 2023 11:01:19 +0200	[thread overview]
Message-ID: <ZILqX7x7RP/cN5+0@corigine.com> (raw)
In-Reply-To: <20230608180618.574171-7-david.m.ertman@intel.com>

On Thu, Jun 08, 2023 at 11:06:14AM -0700, Dave Ertman wrote:

...

> @@ -245,6 +353,167 @@ static void
>  ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
>  			u16 vsi_num, u8 tc)
>  {
> +	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
> +	struct ice_sched_node *n_prt, *tc_node, *aggnode;
> +	u16 numq, valq, buf_size, num_moved, qbuf_size;
> +	struct device *dev = ice_pf_to_dev(lag->pf);
> +	struct ice_aqc_cfg_txqs_buf *qbuf;
> +	struct ice_aqc_move_elem *buf;
> +	struct ice_hw *new_hw = NULL;
> +	struct ice_port_info *pi;
> +	__le32 teid, parent_teid;
> +	struct ice_vsi_ctx *ctx;
> +	u8 aggl, vsil;
> +	u32 tmp_teid;
> +	int n;
> +
> +	ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num);
> +	if (!ctx) {
> +		dev_warn(dev, "Unable to locate VSI context for LAG failover\n");
> +		return;
> +	}
> +
> +	/* check to see if this VF is enabled on this TC */
> +	if (!ctx->sched.vsi_node[tc])
> +		return;
> +
> +	/* locate HW struct for destination port */
> +	new_hw = ice_lag_find_hw_by_lport(lag, newport);
> +	if (!new_hw) {
> +		dev_warn(dev, "Unable to locate HW struct for LAG node destination\n");
> +		return;
> +	}
> +
> +	pi = new_hw->port_info;
> +
> +	numq = ctx->num_lan_q_entries[tc];
> +	teid = ctx->sched.vsi_node[tc]->info.node_teid;
> +	tmp_teid = le32_to_cpu(teid);
> +	parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
> +	/* if no teid assigned or numq == 0, then this TC is not active */
> +	if (!tmp_teid || !numq)
> +		return;
> +
> +	/* suspend VSI subtree for Traffic Class "tc" on
> +	 * this VF's VSI
> +	 */
> +	if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true))
> +		dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
> +
> +	/* reconfigure all VF's queues on this Traffic Class
> +	 * to new port
> +	 */
> +	qbuf_size = struct_size(qbuf, queue_info, numq);
> +	qbuf = kzalloc(qbuf_size, GFP_KERNEL);
> +	if (!qbuf) {
> +		dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
> +		goto resume_traffic;
> +	}
> +
> +	/* add the per queue info for the reconfigure command buffer */
> +	valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc);
> +	if (!valq) {
> +		dev_dbg(dev, "No valid queues found for LAG failover\n");
> +		goto qbuf_none;
> +	}
> +
> +	if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
> +			       newport, NULL)) {
> +		dev_warn(dev, "Failure to configure queues for LAG failover\n");
> +		goto qbuf_err;
> +	}
> +
> +qbuf_none:
> +	kfree(qbuf);
> +
> +	/* find new parent in destination port's tree for VF VSI node on this
> +	 * Traffic Class
> +	 */
> +	tc_node = ice_sched_get_tc_node(pi, tc);
> +	if (!tc_node) {
> +		dev_warn(dev, "Failure to find TC node in failover tree\n");
> +		goto resume_traffic;
> +	}
> +
> +	aggnode = ice_sched_get_agg_node(pi, tc_node,
> +					 ICE_DFLT_AGG_ID);
> +	if (!aggnode) {
> +		dev_warn(dev, "Failure to find aggregate node in failover tree\n");
> +		goto resume_traffic;
> +	}
> +
> +	aggl = ice_sched_get_agg_layer(new_hw);
> +	vsil = ice_sched_get_vsi_layer(new_hw);
> +
> +	for (n = aggl + 1; n < vsil; n++)
> +		num_nodes[n] = 1;
> +
> +	for (n = 0; n < aggnode->num_children; n++) {
> +		n_prt = ice_sched_get_free_vsi_parent(new_hw,
> +						      aggnode->children[n],
> +						      num_nodes);
> +		if (n_prt)
> +			break;
> +	}
> +
> +	/* add parent if none were free */
> +	if (!n_prt) {

Hi Dave,

I suppose this can't happen.
But if aggnode->num_children is 0 then n_prt will be uninitialised here.

> +		u16 num_nodes_added;
> +		u32 first_teid;
> +		int status;
> +
> +		n_prt = aggnode;
> +		for (n = aggl + 1; n < vsil; n++) {
> +			status = ice_sched_add_nodes_to_layer(pi, tc_node,
> +							      n_prt, n,
> +							      num_nodes[n],
> +							      &first_teid,
> +							      &num_nodes_added);
> +			if (status || num_nodes[n] != num_nodes_added)
> +				goto resume_traffic;
> +
> +			if (num_nodes_added)
> +				n_prt = ice_sched_find_node_by_teid(tc_node,
> +								    first_teid);
> +			else
> +				n_prt = n_prt->children[0];
> +			if (!n_prt) {
> +				dev_warn(dev, "Failure to add new parent for LAG node\n");
> +				goto resume_traffic;
> +			}
> +		}
> +	}
> +
> +	/* Move Vf's VSI node for this TC to newport's scheduler tree */
> +	buf_size = struct_size(buf, teid, 1);
> +	buf = kzalloc(buf_size, GFP_KERNEL);
> +	if (!buf) {
> +		dev_warn(dev, "Failure to alloc memory for VF node failover\n");
> +		goto resume_traffic;
> +	}
> +
> +	buf->hdr.src_parent_teid = parent_teid;
> +	buf->hdr.dest_parent_teid = n_prt->info.node_teid;
> +	buf->hdr.num_elems = cpu_to_le16(1);
> +	buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
> +	buf->teid[0] = teid;
> +
> +	if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
> +				    NULL))
> +		dev_warn(dev, "Failure to move VF nodes for failover\n");
> +	else
> +		ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
> +
> +	kfree(buf);
> +	goto resume_traffic;
> +
> +qbuf_err:
> +	kfree(qbuf);
> +
> +resume_traffic:
> +	/* restart traffic for VSI node */
> +	if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false))
> +		dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
>  }

...

> @@ -362,6 +735,155 @@ static void
>  ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
>  		      u8 tc)
>  {
> +	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
> +	struct ice_sched_node *n_prt, *tc_node, *aggnode;
> +	u16 numq, valq, buf_size, num_moved, qbuf_size;
> +	struct device *dev = ice_pf_to_dev(lag->pf);
> +	struct ice_aqc_cfg_txqs_buf *qbuf;
> +	struct ice_aqc_move_elem *buf;
> +	struct ice_port_info *pi;
> +	__le32 teid, parent_teid;
> +	struct ice_vsi_ctx *ctx;
> +	struct ice_hw *hw;
> +	u8 aggl, vsil;
> +	u32 tmp_teid;
> +	int n;
> +
> +	hw = &lag->pf->hw;
> +	ctx = ice_get_vsi_ctx(hw, vsi_num);
> +	if (!ctx) {
> +		dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
> +		return;
> +	}
> +
> +	/* check to see if this VF is enabled on this TC */
> +	if (!ctx->sched.vsi_node[tc])
> +		return;
> +
> +	numq = ctx->num_lan_q_entries[tc];
> +	teid = ctx->sched.vsi_node[tc]->info.node_teid;
> +	tmp_teid = le32_to_cpu(teid);
> +	parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
> +
> +	/* if !teid or !numq, then this TC is not active */
> +	if (!tmp_teid || !numq)
> +		return;
> +
> +	/* suspend traffic */
> +	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
> +		dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
> +
> +	/* reconfig queues for new port */
> +	qbuf_size = struct_size(qbuf, queue_info, numq);
> +	qbuf = kzalloc(qbuf_size, GFP_KERNEL);
> +	if (!qbuf) {
> +		dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
> +		goto resume_reclaim;
> +	}
> +
> +	/* add the per queue info for the reconfigure command buffer */
> +	valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
> +	if (!valq) {
> +		dev_dbg(dev, "No valid queues found for LAG reclaim\n");
> +		goto reclaim_none;
> +	}
> +
> +	if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
> +			       src_hw->port_info->lport, hw->port_info->lport,
> +			       NULL)) {
> +		dev_warn(dev, "Failure to configure queues for LAG failover\n");
> +		goto reclaim_qerr;
> +	}
> +
> +reclaim_none:
> +	kfree(qbuf);
> +
> +	/* find parent in primary tree */
> +	pi = hw->port_info;
> +	tc_node = ice_sched_get_tc_node(pi, tc);
> +	if (!tc_node) {
> +		dev_warn(dev, "Failure to find TC node in failover tree\n");
> +		goto resume_reclaim;
> +	}
> +
> +	aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID);
> +	if (!aggnode) {
> +		dev_warn(dev, "Failure to find aggreagte node in failover tree\n");
> +		goto resume_reclaim;
> +	}
> +
> +	aggl = ice_sched_get_agg_layer(hw);
> +	vsil = ice_sched_get_vsi_layer(hw);
> +
> +	for (n = aggl + 1; n < vsil; n++)
> +		num_nodes[n] = 1;
> +
> +	for (n = 0; n < aggnode->num_children; n++) {
> +		n_prt = ice_sched_get_free_vsi_parent(hw, aggnode->children[n],
> +						      num_nodes);
> +		if (n_prt)
> +			break;
> +	}
> +
> +	/* if no free parent found - add one */
> +	if (!n_prt) {

Likewise, here too.

> +		u16 num_nodes_added;
> +		u32 first_teid;
> +		int status;
> +
> +		n_prt = aggnode;
> +		for (n = aggl + 1; n < vsil; n++) {
> +			status = ice_sched_add_nodes_to_layer(pi, tc_node,
> +							      n_prt, n,
> +							      num_nodes[n],
> +							      &first_teid,
> +							      &num_nodes_added);
> +			if (status || num_nodes[n] != num_nodes_added)
> +				goto resume_reclaim;
> +
> +			if (num_nodes_added)
> +				n_prt = ice_sched_find_node_by_teid(tc_node,
> +								    first_teid);
> +			else
> +				n_prt = n_prt->children[0];
> +
> +			if (!n_prt) {
> +				dev_warn(dev, "Failure to add new parent for LAG node\n");
> +				goto resume_reclaim;
> +			}
> +		}
> +	}
> +
> +	/* Move node to new parent */
> +	buf_size = struct_size(buf, teid, 1);
> +	buf = kzalloc(buf_size, GFP_KERNEL);
> +	if (!buf) {
> +		dev_warn(dev, "Failure to alloc memory for VF node failover\n");
> +		goto resume_reclaim;
> +	}
> +
> +	buf->hdr.src_parent_teid = parent_teid;
> +	buf->hdr.dest_parent_teid = n_prt->info.node_teid;
> +	buf->hdr.num_elems = cpu_to_le16(1);
> +	buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
> +	buf->teid[0] = teid;
> +
> +	if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
> +				    NULL))
> +		dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
> +	else
> +		ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
> +
> +	kfree(buf);
> +	goto resume_reclaim;
> +
> +reclaim_qerr:
> +	kfree(qbuf);
> +
> +resume_reclaim:
> +	/* restart traffic */
> +	if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
> +		dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n");
>  }

...

  reply	other threads:[~2023-06-09  9:02 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-08 18:06 [PATCH iwl-next v3 00/10] Implement support for SRIOV + LAG Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 01/10] ice: Correctly initialize queue context values Dave Ertman
2023-06-09 10:45   ` Daniel Machon
2023-06-08 18:06 ` [PATCH iwl-next v3 02/10] ice: Add driver support for firmware changes for LAG Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 03/10] ice: changes to the interface with the HW and FW for SRIOV_VF+LAG Dave Ertman
2023-06-09 10:05   ` Daniel Machon
2023-06-08 18:06 ` [PATCH iwl-next v3 04/10] ice: implement lag netdev event handler Dave Ertman
2023-06-09 10:00   ` Daniel Machon
2023-06-09 16:34     ` Ertman, David M
2023-06-08 18:06 ` [PATCH iwl-next v3 05/10] ice: process events created by " Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 06/10] ice: Flesh out implementation of support for SRIOV on bonded interface Dave Ertman
2023-06-09  9:01   ` Simon Horman [this message]
2023-06-09 16:32     ` Ertman, David M
2023-06-12  7:23       ` Simon Horman
2023-06-09 10:40   ` Daniel Machon
2023-06-09 16:39     ` Ertman, David M
2023-06-08 18:06 ` [PATCH iwl-next v3 07/10] ice: support non-standard teardown of bond interface Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 08/10] ice: enforce interface eligibility and add messaging for SRIOV LAG Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 09/10] ice: enforce no DCB config changing when in bond Dave Ertman
2023-06-08 18:06 ` [PATCH iwl-next v3 10/10] ice: update reset path for SRIOV LAG support Dave Ertman
2023-06-09 10:42   ` Daniel Machon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZILqX7x7RP/cN5+0@corigine.com \
    --to=simon.horman@corigine.com \
    --cc=daniel.machon@microchip.com \
    --cc=david.m.ertman@intel.com \
    --cc=intel-wired-lan@lists.osuosl.org \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).