All of lore.kernel.org
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeed@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>,
	netdev@vger.kernel.org, Tariq Toukan <tariqt@nvidia.com>,
	Shay Drory <shayd@nvidia.com>, Mark Bloch <mbloch@nvidia.com>,
	Roi Dayan <roid@nvidia.com>
Subject: [net-next V2 05/14] net/mlx5e: Handle offloads flows per peer
Date: Fri,  2 Jun 2023 12:12:52 -0700	[thread overview]
Message-ID: <20230602191301.47004-6-saeed@kernel.org> (raw)
In-Reply-To: <20230602191301.47004-1-saeed@kernel.org>

From: Shay Drory <shayd@nvidia.com>

Currently, E-switch offloads table have a list of all flows that
create a peer_flow over the peer eswitch.
In order to support more than one peer, extend E-switch offloads
table peer_flow to hold an array of lists, where each peer have
dedicate index via mlx5_get_dev_index(). Thereafter, extend original
flow to hold an array of peers as well.

Signed-off-by: Shay Drory <shayd@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../ethernet/mellanox/mlx5/core/en/tc_priv.h  |  2 +-
 .../net/ethernet/mellanox/mlx5/core/en_tc.c   | 37 +++++++++++++++----
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |  2 +-
 .../mellanox/mlx5/core/eswitch_offloads.c     |  4 +-
 4 files changed, 34 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 8a500a966f06..6cc23af66b5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -96,7 +96,7 @@ struct mlx5e_tc_flow {
 	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
 	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
 	struct list_head hairpin; /* flows sharing the same hairpin */
-	struct list_head peer;    /* flows with peer flow */
+	struct list_head peer[MLX5_MAX_PORTS];    /* flows with peer flow */
 	struct list_head unready; /* flows not ready to be offloaded (e.g
 				   * due to missing route)
 				   */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a096005fd163..c7797e3de093 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1980,7 +1980,8 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
 		mlx5e_flow_put(priv, flow);
 }
 
-static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
+static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
+				       int peer_index)
 {
 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
 	struct mlx5e_tc_flow *peer_flow;
@@ -1991,18 +1992,32 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
 		return;
 
 	mutex_lock(&esw->offloads.peer_mutex);
-	list_del(&flow->peer);
+	list_del(&flow->peer[peer_index]);
 	mutex_unlock(&esw->offloads.peer_mutex);
 
-	flow_flag_clear(flow, DUP);
-
 	list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
+		if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+			continue;
 		if (refcount_dec_and_test(&peer_flow->refcnt)) {
 			mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
 			list_del(&peer_flow->peer_flows);
 			kfree(peer_flow);
 		}
 	}
+
+	if (list_empty(&flow->peer_flows))
+		flow_flag_clear(flow, DUP);
+}
+
+static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
+{
+	int i;
+
+	for (i = 0; i < MLX5_MAX_PORTS; i++) {
+		if (i == mlx5_get_dev_index(flow->priv->mdev))
+			continue;
+		mlx5e_tc_del_fdb_peer_flow(flow, i);
+	}
 }
 
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
@@ -2017,7 +2032,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
 			mlx5e_tc_del_fdb_flow(priv, flow);
 			return;
 		}
-		mlx5e_tc_del_fdb_peer_flow(flow);
+		mlx5e_tc_del_fdb_peers_flow(flow);
 		mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
 		mlx5e_tc_del_fdb_flow(priv, flow);
 	} else {
@@ -4403,6 +4418,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 	struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
 	struct mlx5e_tc_flow_parse_attr *parse_attr;
+	int i = mlx5_get_dev_index(peer_esw->dev);
 	struct mlx5e_rep_priv *peer_urpriv;
 	struct mlx5e_tc_flow *peer_flow;
 	struct mlx5_core_dev *in_mdev;
@@ -4435,7 +4451,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
 	list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
 	flow_flag_set(flow, DUP);
 	mutex_lock(&esw->offloads.peer_mutex);
-	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
+	list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
 	mutex_unlock(&esw->offloads.peer_mutex);
 
 out:
@@ -5288,9 +5304,14 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
 {
 	struct mlx5e_tc_flow *flow, *tmp;
+	int i;
 
-	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
-		mlx5e_tc_del_fdb_peer_flow(flow);
+	for (i = 0; i < MLX5_MAX_PORTS; i++) {
+		if (i == mlx5_get_dev_index(esw->dev))
+			continue;
+		list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
+			mlx5e_tc_del_fdb_peers_flow(flow);
+	}
 }
 
 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index f70124ad71cf..eadc39542e5e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -249,7 +249,7 @@ struct mlx5_esw_offload {
 	struct mlx5_flow_group *vport_rx_drop_group;
 	struct mlx5_flow_handle *vport_rx_drop_rule;
 	struct xarray vport_reps;
-	struct list_head peer_flows;
+	struct list_head peer_flows[MLX5_MAX_PORTS];
 	struct mutex peer_mutex;
 	struct mutex encap_tbl_lock; /* protects encap_tbl */
 	DECLARE_HASHTABLE(encap_tbl, 8);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9526382f1573..a767f3d52c76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2825,8 +2825,10 @@ static int mlx5_esw_offloads_devcom_event(int event,
 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
 {
 	struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+	int i;
 
-	INIT_LIST_HEAD(&esw->offloads.peer_flows);
+	for (i = 0; i < MLX5_MAX_PORTS; i++)
+		INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
 	mutex_init(&esw->offloads.peer_mutex);
 
 	if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
-- 
2.40.1


  parent reply	other threads:[~2023-06-02 19:13 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-02 19:12 [pull request][net-next V2 00/14] mlx5 updates 2023-05-31 Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 01/14] net/mlx5e: en_tc, Extend peer flows to a list Saeed Mahameed
2023-06-05 23:00   ` patchwork-bot+netdevbpf
2023-06-02 19:12 ` [net-next V2 02/14] net/mlx5e: tc, Refactor peer add/del flow Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 03/14] net/mlx5e: rep, store send to vport rules per peer Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 04/14] net/mlx5e: en_tc, re-factor query route port Saeed Mahameed
2023-06-02 19:12 ` Saeed Mahameed [this message]
2023-06-02 19:12 ` [net-next V2 06/14] net/mlx5: E-switch, enlarge peer miss group table Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 07/14] net/mlx5: E-switch, refactor FDB miss rule add/remove Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 08/14] net/mlx5: E-switch, Handle multiple master egress rules Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 09/14] net/mlx5: E-switch, generalize shared FDB creation Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 10/14] net/mlx5: DR, handle more than one peer domain Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 11/14] net/mlx5: Devcom, Rename paired to ready Saeed Mahameed
2023-06-02 19:12 ` [net-next V2 12/14] net/mlx5: E-switch, mark devcom as not ready when all eswitches are unpaired Saeed Mahameed
2023-06-02 19:13 ` [net-next V2 13/14] net/mlx5: Devcom, introduce devcom_for_each_peer_entry Saeed Mahameed
2023-06-02 19:13 ` [net-next V2 14/14] net/mlx5: Devcom, extend mlx5_devcom_send_event to work with more than two devices Saeed Mahameed
2023-06-03  7:26 ` [pull request][net-next V2 00/14] mlx5 updates 2023-05-31 Simon Horman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230602191301.47004-6-saeed@kernel.org \
    --to=saeed@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=mbloch@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=roid@nvidia.com \
    --cc=saeedm@nvidia.com \
    --cc=shayd@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.