All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode
@ 2023-07-11  9:28 Leon Romanovsky
  2023-07-11  9:28 ` [PATCH net-next 01/12] net/mlx5e: Add function to get IPsec offload namespace Leon Romanovsky
                   ` (11 more replies)
  0 siblings, 12 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:28 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Leon Romanovsky, Eric Dumazet, Jianbo Liu, Mark Bloch, netdev,
	Paolo Abeni, Saeed Mahameed, David S . Miller

From: Leon Romanovsky <leonro@nvidia.com>

Hi,

This series from Jianbo adds mlx5 IPsec packet offload support in eswitch
offloaded mode.

Thanks

Jianbo Liu (12):
  net/mlx5e: Add function to get IPsec offload namespace
  net/mlx5e: Change the parameter of IPsec RX skb handle function
  net/mlx5e: Prepare IPsec packet offload for switchdev mode
  net/mlx5e: Refactor IPsec RX tables creation and destruction
  net/mlx5e: Support IPsec packet offload for RX in switchdev mode
  net/mlx5e: Handle IPsec offload for RX datapath in switchdev mode
  net/mlx5e: Refactor IPsec TX tables creation
  net/mlx5e: Support IPsec packet offload for TX in switchdev mode
  net/mlx5: Compare with old_dest param to modify rule destination
  net/mlx5e: Make IPsec offload work together with eswitch and TC
  net/mlx5e: Modify and restore TC rules for IPSec TX rules
  net/mlx5e: Add get IPsec offload stats for uplink representor

 .../net/ethernet/mellanox/mlx5/core/Makefile  |   4 +
 .../ethernet/mellanox/mlx5/core/en/rep/tc.c   |  17 +-
 .../mellanox/mlx5/core/en_accel/ipsec.c       |   2 +
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  65 +-
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 626 ++++++++++++------
 .../mlx5/core/en_accel/ipsec_offload.c        |   3 +-
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c  |  25 +-
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h  |   6 +-
 .../net/ethernet/mellanox/mlx5/core/en_rep.c  |   1 +
 .../net/ethernet/mellanox/mlx5/core/en_rx.c   |   3 +-
 .../mellanox/mlx5/core/esw/ipsec_fs.c         | 325 +++++++++
 .../mellanox/mlx5/core/esw/ipsec_fs.h         |  67 ++
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |  17 +
 .../mellanox/mlx5/core/eswitch_offloads.c     | 174 ++++-
 .../net/ethernet/mellanox/mlx5/core/fs_core.c |  14 +-
 include/linux/mlx5/eswitch.h                  |   3 +
 include/linux/mlx5/fs.h                       |   2 +
 17 files changed, 1141 insertions(+), 213 deletions(-)
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h

-- 
2.41.0


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH net-next 01/12] net/mlx5e: Add function to get IPsec offload namespace
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
@ 2023-07-11  9:28 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 02/12] net/mlx5e: Change the parameter of IPsec RX skb handle function Leon Romanovsky
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:28 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

Add function to get namespace in different directions. It will be
extended for switchdev mode in later patch, but no functionality change
for now.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 46 +++++++++----------
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index dbe87bf89c0d..7976469108de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -814,11 +814,20 @@ static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upsp
 	}
 }
 
-static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, u8 dir)
+{
+	if (dir == XFRM_DEV_OFFLOAD_IN)
+		return MLX5_FLOW_NAMESPACE_KERNEL;
+
+	return MLX5_FLOW_NAMESPACE_EGRESS;
+}
+
+static int setup_modify_header(struct mlx5e_ipsec *ipsec, u32 val, u8 dir,
 			       struct mlx5_flow_act *flow_act)
 {
+	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, dir);
 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
-	enum mlx5_flow_namespace_type ns_type;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
 	struct mlx5_modify_hdr *modify_hdr;
 
 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
@@ -826,12 +835,10 @@ static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
 	case XFRM_DEV_OFFLOAD_IN:
 		MLX5_SET(set_action_in, action, field,
 			 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
-		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
 		break;
 	case XFRM_DEV_OFFLOAD_OUT:
 		MLX5_SET(set_action_in, action, field,
 			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
-		ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
 		break;
 	default:
 		return -EINVAL;
@@ -991,26 +998,16 @@ setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
 	return 0;
 }
 
-static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
 			      struct mlx5_flow_act *flow_act)
 {
+	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->dir);
 	struct mlx5_pkt_reformat_params reformat_params = {};
+	struct mlx5_core_dev *mdev = ipsec->mdev;
 	struct mlx5_pkt_reformat *pkt_reformat;
-	enum mlx5_flow_namespace_type ns_type;
 	int ret;
 
-	switch (attrs->dir) {
-	case XFRM_DEV_OFFLOAD_IN:
-		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
-		break;
-	case XFRM_DEV_OFFLOAD_OUT:
-		ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
-		break;
-	default:
-		return -EINVAL;
-	}
-
 	switch (attrs->mode) {
 	case XFRM_MODE_TRANSPORT:
 		ret = setup_pkt_transport_reformat(attrs, &reformat_params);
@@ -1068,14 +1065,14 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	setup_fte_esp(spec);
 	setup_fte_no_frags(spec);
 
-	err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
+	err = setup_modify_header(ipsec, sa_entry->ipsec_obj_id | BIT(31),
 				  XFRM_DEV_OFFLOAD_IN, &flow_act);
 	if (err)
 		goto err_mod_header;
 
 	switch (attrs->type) {
 	case XFRM_DEV_OFFLOAD_PACKET:
-		err = setup_pkt_reformat(mdev, attrs, &flow_act);
+		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
 		if (err)
 			goto err_pkt_reformat;
 		break;
@@ -1169,7 +1166,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	case XFRM_DEV_OFFLOAD_PACKET:
 		if (attrs->reqid)
 			setup_fte_reg_c0(spec, attrs->reqid);
-		err = setup_pkt_reformat(mdev, attrs, &flow_act);
+		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
 		if (err)
 			goto err_pkt_reformat;
 		break;
@@ -1226,15 +1223,16 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
-	struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
+	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
 	struct mlx5_flow_destination dest[2] = {};
+	struct mlx5e_ipsec_tx *tx = ipsec->tx;
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
 	struct mlx5_flow_spec *spec;
 	struct mlx5_flow_table *ft;
 	int err, dstn = 0;
 
-	ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
+	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
@@ -1258,7 +1256,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 		if (!attrs->reqid)
 			break;
 
-		err = setup_modify_header(mdev, attrs->reqid,
+		err = setup_modify_header(ipsec, attrs->reqid,
 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
 		if (err)
 			goto err_mod_header;
@@ -1298,7 +1296,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 err_mod_header:
 	kvfree(spec);
 err_alloc:
-	tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
+	tx_ft_put_policy(ipsec, attrs->prio);
 	return err;
 }
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 02/12] net/mlx5e: Change the parameter of IPsec RX skb handle function
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
  2023-07-11  9:28 ` [PATCH net-next 01/12] net/mlx5e: Add function to get IPsec offload namespace Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 03/12] net/mlx5e: Prepare IPsec packet offload for switchdev mode Leon Romanovsky
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

Refactor the function to pass in reg B value only.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 3 +--
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 4 ++--
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c               | 3 ++-
 3 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index eab5bc718771..8d6379ac4574 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -309,9 +309,8 @@ enum {
 
 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 				       struct sk_buff *skb,
-				       struct mlx5_cqe64 *cqe)
+				       u32 ipsec_meta_data)
 {
-	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_ipsec *ipsec = priv->ipsec;
 	struct mlx5e_ipsec_sa_entry *sa_entry;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 1878a70b9031..436e9a8a32d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -66,7 +66,7 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
 			       struct mlx5_wqe_inline_seg *inlseg);
 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 				       struct sk_buff *skb,
-				       struct mlx5_cqe64 *cqe);
+				       u32 ipsec_meta_data);
 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
 {
 	return ipsec_st->tailen;
@@ -145,7 +145,7 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 static inline
 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 				       struct sk_buff *skb,
-				       struct mlx5_cqe64 *cqe)
+				       u32 ipsec_meta_data)
 {}
 
 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 704b022cd1f0..b9e92456ec4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1535,7 +1535,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
 
 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
-		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
+						  be32_to_cpu(cqe->ft_metadata));
 
 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 03/12] net/mlx5e: Prepare IPsec packet offload for switchdev mode
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
  2023-07-11  9:28 ` [PATCH net-next 01/12] net/mlx5e: Add function to get IPsec offload namespace Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 02/12] net/mlx5e: Change the parameter of IPsec RX skb handle function Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 04/12] net/mlx5e: Refactor IPsec RX tables creation and destruction Leon Romanovsky
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

As the uplink representor is created only in switchdev mode, add a local
variable for IPsec to indicate the device is in this mode.

In this mode, the tables for packet offload are created in FDB,
ipsec->rx_esw and ipsec->tx_esw are added.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       |   2 +
 .../mellanox/mlx5/core/en_accel/ipsec.h       |   3 +
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 262 ++++++++++++------
 3 files changed, 184 insertions(+), 83 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 891d39b4bfd4..ad62424249e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -40,6 +40,7 @@
 #include "en.h"
 #include "ipsec.h"
 #include "ipsec_rxtx.h"
+#include "en_rep.h"
 
 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
@@ -835,6 +836,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 			goto clear_aso;
 	}
 
+	ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
 	ret = mlx5e_accel_ipsec_fs_init(ipsec);
 	if (ret)
 		goto err_fs_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 4e9887171508..ef64a042191d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -170,11 +170,14 @@ struct mlx5e_ipsec {
 	struct mlx5e_flow_steering *fs;
 	struct mlx5e_ipsec_rx *rx_ipv4;
 	struct mlx5e_ipsec_rx *rx_ipv6;
+	struct mlx5e_ipsec_rx *rx_esw;
 	struct mlx5e_ipsec_tx *tx;
+	struct mlx5e_ipsec_tx *tx_esw;
 	struct mlx5e_ipsec_aso *aso;
 	struct notifier_block nb;
 	struct notifier_block netevent_nb;
 	struct mlx5_ipsec_fs *roce;
+	u8 is_uplink_rep: 1;
 };
 
 struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 7976469108de..0c50fcea4797 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -60,14 +60,25 @@ static enum mlx5_traffic_types family2tt(u32 family)
 	return MLX5_TT_IPV6_IPSEC_ESP;
 }
 
-static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family)
+static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
 {
+	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+		return ipsec->rx_esw;
+
 	if (family == AF_INET)
 		return ipsec->rx_ipv4;
 
 	return ipsec->rx_ipv6;
 }
 
+static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
+{
+	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+		return ipsec->tx_esw;
+
+	return ipsec->tx;
+}
+
 static struct mlx5_fs_chains *
 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
 		    enum mlx5_flow_namespace_type ns, int base_prio,
@@ -406,9 +417,10 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
 }
 
 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
-					struct mlx5e_ipsec *ipsec, u32 family)
+					struct mlx5e_ipsec *ipsec, u32 family,
+					int type)
 {
-	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
 	int err;
 
 	mutex_lock(&rx->ft.mutex);
@@ -422,9 +434,9 @@ static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
 
 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
 						struct mlx5e_ipsec *ipsec,
-						u32 family, u32 prio)
+						u32 family, u32 prio, int type)
 {
-	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
 	struct mlx5_flow_table *ft;
 	int err;
 
@@ -449,18 +461,18 @@ static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
 	return ERR_PTR(err);
 }
 
-static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family)
+static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
 {
-	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
 
 	mutex_lock(&rx->ft.mutex);
 	rx_put(ipsec, rx, family);
 	mutex_unlock(&rx->ft.mutex);
 }
 
-static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio)
+static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
 {
-	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family);
+	struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
 
 	mutex_lock(&rx->ft.mutex);
 	if (rx->chains)
@@ -629,9 +641,9 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
 
 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
 						struct mlx5e_ipsec *ipsec,
-						u32 prio)
+						u32 prio, int type)
 {
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
 	struct mlx5_flow_table *ft;
 	int err;
 
@@ -657,9 +669,9 @@ static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
 }
 
 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
-					struct mlx5e_ipsec *ipsec)
+					struct mlx5e_ipsec *ipsec, int type)
 {
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
 	int err;
 
 	mutex_lock(&tx->ft.mutex);
@@ -671,18 +683,18 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
 	return tx;
 }
 
-static void tx_ft_put(struct mlx5e_ipsec *ipsec)
+static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
 {
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
 
 	mutex_lock(&tx->ft.mutex);
 	tx_put(ipsec, tx);
 	mutex_unlock(&tx->ft.mutex);
 }
 
-static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
+static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
 {
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
+	struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
 
 	mutex_lock(&tx->ft.mutex);
 	if (tx->chains)
@@ -1046,7 +1058,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	struct mlx5_fc *counter;
 	int err;
 
-	rx = rx_ft_get(mdev, ipsec, attrs->family);
+	rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
 	if (IS_ERR(rx))
 		return PTR_ERR(rx);
 
@@ -1122,7 +1134,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 err_mod_header:
 	kvfree(spec);
 err_alloc:
-	rx_ft_put(ipsec, attrs->family);
+	rx_ft_put(ipsec, attrs->family, attrs->type);
 	return err;
 }
 
@@ -1139,7 +1151,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	struct mlx5_fc *counter;
 	int err;
 
-	tx = tx_ft_get(mdev, ipsec);
+	tx = tx_ft_get(mdev, ipsec, attrs->type);
 	if (IS_ERR(tx))
 		return PTR_ERR(tx);
 
@@ -1215,7 +1227,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 err_pkt_reformat:
 	kvfree(spec);
 err_alloc:
-	tx_ft_put(ipsec);
+	tx_ft_put(ipsec, attrs->type);
 	return err;
 }
 
@@ -1225,14 +1237,14 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
 	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
 	struct mlx5_flow_destination dest[2] = {};
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
 	struct mlx5_flow_spec *spec;
 	struct mlx5_flow_table *ft;
+	struct mlx5e_ipsec_tx *tx;
 	int err, dstn = 0;
 
-	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio);
+	ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
@@ -1242,6 +1254,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 		goto err_alloc;
 	}
 
+	tx = ipsec_tx(ipsec, attrs->type);
 	if (attrs->family == AF_INET)
 		setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
 	else
@@ -1296,7 +1309,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 err_mod_header:
 	kvfree(spec);
 err_alloc:
-	tx_ft_put_policy(ipsec, attrs->prio);
+	tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
 	return err;
 }
 
@@ -1312,11 +1325,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	struct mlx5e_ipsec_rx *rx;
 	int err, dstn = 0;
 
-	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio);
+	ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
+			      attrs->type);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 
-	rx = ipsec_rx(pol_entry->ipsec, attrs->family);
+	rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
 
 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
 	if (!spec) {
@@ -1365,88 +1379,110 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 err_action:
 	kvfree(spec);
 err_alloc:
-	rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio);
+	rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
 	return err;
 }
 
+static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
+					    struct mlx5e_ipsec_fc *fc)
+{
+	mlx5_fc_destroy(mdev, fc->drop);
+	mlx5_fc_destroy(mdev, fc->cnt);
+	kfree(fc);
+}
+
 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
 {
-	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
 	struct mlx5_core_dev *mdev = ipsec->mdev;
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
-
-	mlx5_fc_destroy(mdev, tx->fc->drop);
-	mlx5_fc_destroy(mdev, tx->fc->cnt);
-	kfree(tx->fc);
-	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
-	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
-	kfree(rx_ipv4->fc);
+
+	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
+	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
+	if (ipsec->is_uplink_rep) {
+		ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
+		ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
+	}
 }
 
-static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
 {
-	struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
-	struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
-	struct mlx5_core_dev *mdev = ipsec->mdev;
-	struct mlx5e_ipsec_tx *tx = ipsec->tx;
 	struct mlx5e_ipsec_fc *fc;
 	struct mlx5_fc *counter;
 	int err;
 
-	fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
+	fc = kzalloc(sizeof(*fc), GFP_KERNEL);
 	if (!fc)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
-	/* Both IPv4 and IPv6 point to same flow counters struct. */
-	rx_ipv4->fc = fc;
-	rx_ipv6->fc = fc;
 	counter = mlx5_fc_create(mdev, false);
 	if (IS_ERR(counter)) {
 		err = PTR_ERR(counter);
-		goto err_rx_cnt;
+		goto err_cnt;
 	}
-
 	fc->cnt = counter;
+
 	counter = mlx5_fc_create(mdev, false);
 	if (IS_ERR(counter)) {
 		err = PTR_ERR(counter);
-		goto err_rx_drop;
+		goto err_drop;
 	}
-
 	fc->drop = counter;
-	fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
-	if (!fc) {
-		err = -ENOMEM;
-		goto err_tx_fc;
+
+	return fc;
+
+err_drop:
+	mlx5_fc_destroy(mdev, fc->cnt);
+err_cnt:
+	kfree(fc);
+	return ERR_PTR(err);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5e_ipsec_fc *fc;
+	int err;
+
+	fc = ipsec_fs_init_single_counter(mdev);
+	if (IS_ERR(fc)) {
+		err = PTR_ERR(fc);
+		goto err_rx_cnt;
 	}
+	ipsec->rx_ipv4->fc = fc;
 
-	tx->fc = fc;
-	counter = mlx5_fc_create(mdev, false);
-	if (IS_ERR(counter)) {
-		err = PTR_ERR(counter);
+	fc = ipsec_fs_init_single_counter(mdev);
+	if (IS_ERR(fc)) {
+		err = PTR_ERR(fc);
 		goto err_tx_cnt;
 	}
+	ipsec->tx->fc = fc;
 
-	fc->cnt = counter;
-	counter = mlx5_fc_create(mdev, false);
-	if (IS_ERR(counter)) {
-		err = PTR_ERR(counter);
-		goto err_tx_drop;
+	if (ipsec->is_uplink_rep) {
+		fc = ipsec_fs_init_single_counter(mdev);
+		if (IS_ERR(fc)) {
+			err = PTR_ERR(fc);
+			goto err_rx_esw_cnt;
+		}
+		ipsec->rx_esw->fc = fc;
+
+		fc = ipsec_fs_init_single_counter(mdev);
+		if (IS_ERR(fc)) {
+			err = PTR_ERR(fc);
+			goto err_tx_esw_cnt;
+		}
+		ipsec->tx_esw->fc = fc;
 	}
 
-	fc->drop = counter;
+	/* Both IPv4 and IPv6 point to same flow counters struct. */
+	ipsec->rx_ipv6->fc = fc;
 	return 0;
 
-err_tx_drop:
-	mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_esw_cnt:
+	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
+err_rx_esw_cnt:
+	ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
 err_tx_cnt:
-	kfree(tx->fc);
-err_tx_fc:
-	mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
-err_rx_drop:
-	mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+	ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
 err_rx_cnt:
-	kfree(rx_ipv4->fc);
 	return err;
 }
 
@@ -1456,6 +1492,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
 	struct mlx5e_ipsec *ipsec = priv->ipsec;
 	struct mlx5e_ipsec_hw_stats *stats;
 	struct mlx5e_ipsec_fc *fc;
+	u64 packets, bytes;
 
 	stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
 
@@ -1477,6 +1514,30 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
 	mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
 	mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
 		      &stats->ipsec_tx_drop_bytes);
+
+	if (ipsec->is_uplink_rep) {
+		fc = ipsec->rx_esw->fc;
+		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
+			stats->ipsec_rx_pkts += packets;
+			stats->ipsec_rx_bytes += bytes;
+		}
+
+		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
+			stats->ipsec_rx_drop_pkts += packets;
+			stats->ipsec_rx_drop_bytes += bytes;
+		}
+
+		fc = ipsec->tx_esw->fc;
+		if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
+			stats->ipsec_tx_pkts += packets;
+			stats->ipsec_tx_bytes += bytes;
+		}
+
+		if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
+			stats->ipsec_tx_drop_pkts += packets;
+			stats->ipsec_tx_drop_bytes += bytes;
+		}
+	}
 }
 
 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -1498,12 +1559,12 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 		mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
 
 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
-		tx_ft_put(sa_entry->ipsec);
+		tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
 		return;
 	}
 
 	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
-	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family);
+	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
 }
 
 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -1523,14 +1584,14 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 
 	if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
 		rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
-				 pol_entry->attrs.prio);
+				 pol_entry->attrs.prio, pol_entry->attrs.type);
 		return;
 	}
 
 	if (ipsec_rule->modify_hdr)
 		mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
 
-	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
+	tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
 }
 
 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
@@ -1538,7 +1599,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
 	if (!ipsec->tx)
 		return;
 
-	if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE)
+	if (ipsec->roce)
 		mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
 
 	ipsec_fs_destroy_counters(ipsec);
@@ -1553,12 +1614,22 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
 	mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
 	WARN_ON(ipsec->rx_ipv6->ft.refcnt);
 	kfree(ipsec->rx_ipv6);
+
+	if (ipsec->is_uplink_rep) {
+		mutex_destroy(&ipsec->tx_esw->ft.mutex);
+		WARN_ON(ipsec->tx_esw->ft.refcnt);
+		kfree(ipsec->tx_esw);
+
+		mutex_destroy(&ipsec->rx_esw->ft.mutex);
+		WARN_ON(ipsec->rx_esw->ft.refcnt);
+		kfree(ipsec->rx_esw);
+	}
 }
 
 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 {
 	struct mlx5_core_dev *mdev = ipsec->mdev;
-	struct mlx5_flow_namespace *ns;
+	struct mlx5_flow_namespace *ns, *ns_esw;
 	int err = -ENOMEM;
 
 	ns = mlx5_get_flow_namespace(ipsec->mdev,
@@ -1566,9 +1637,23 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 	if (!ns)
 		return -EOPNOTSUPP;
 
+	if (ipsec->is_uplink_rep) {
+		ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
+		if (!ns_esw)
+			return -EOPNOTSUPP;
+
+		ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
+		if (!ipsec->tx_esw)
+			return -ENOMEM;
+
+		ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
+		if (!ipsec->rx_esw)
+			goto err_rx_esw;
+	}
+
 	ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
 	if (!ipsec->tx)
-		return -ENOMEM;
+		goto err_tx;
 
 	ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
 	if (!ipsec->rx_ipv4)
@@ -1587,8 +1672,13 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 	mutex_init(&ipsec->rx_ipv6->ft.mutex);
 	ipsec->tx->ns = ns;
 
-	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE)
+	if (ipsec->is_uplink_rep) {
+		mutex_init(&ipsec->tx_esw->ft.mutex);
+		mutex_init(&ipsec->rx_esw->ft.mutex);
+		ipsec->tx_esw->ns = ns_esw;
+	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+	}
 
 	return 0;
 
@@ -1598,6 +1688,10 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 	kfree(ipsec->rx_ipv4);
 err_rx_ipv4:
 	kfree(ipsec->tx);
+err_tx:
+	kfree(ipsec->rx_esw);
+err_rx_esw:
+	kfree(ipsec->tx_esw);
 	return err;
 }
 
@@ -1619,10 +1713,12 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
 
 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
-	struct mlx5e_ipsec_rx *rx =
-		ipsec_rx(sa_entry->ipsec, sa_entry->attrs.family);
-	struct mlx5e_ipsec_tx *tx = sa_entry->ipsec->tx;
+	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+	struct mlx5e_ipsec_rx *rx;
+	struct mlx5e_ipsec_tx *tx;
 
+	rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+	tx = ipsec_tx(sa_entry->ipsec, attrs->type);
 	if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
 		return tx->allow_tunnel_mode;
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 04/12] net/mlx5e: Refactor IPsec RX tables creation and destruction
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (2 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 03/12] net/mlx5e: Prepare IPsec packet offload for switchdev mode Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 05/12] net/mlx5e: Support IPsec packet offload for RX in switchdev mode Leon Romanovsky
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

Add attribute for IPsec RX creation, so rx_create() can be used by
eswitch in later patch. And move the code for TTC dest connect/disconnect,
which are needed only in NIC mode, to individual functions.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  11 ++
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 100 ++++++++++++------
 2 files changed, 80 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index ef64a042191d..92c111c28a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -161,6 +161,17 @@ struct mlx5e_ipsec_aso {
 	spinlock_t lock;
 };
 
+struct mlx5e_ipsec_rx_create_attr {
+	struct mlx5_flow_namespace *ns;
+	struct mlx5_ttc_table *ttc;
+	u32 family;
+	int prio;
+	int pol_level;
+	int sa_level;
+	int status_level;
+	enum mlx5_flow_namespace_type chains_ns;
+};
+
 struct mlx5e_ipsec {
 	struct mlx5_core_dev *mdev;
 	struct xarray sadb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0c50fcea4797..417d0a1cdf05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -249,13 +249,19 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev,
 	return err;
 }
 
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
+{
+	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+
+	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
+}
+
 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		       struct mlx5e_ipsec_rx *rx, u32 family)
 {
-	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
 
 	/* disconnect */
-	mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
+	ipsec_rx_ft_disconnect(ipsec, family);
 
 	if (rx->chains) {
 		ipsec_chains_destroy(rx->chains);
@@ -277,41 +283,78 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
 }
 
+static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				     struct mlx5e_ipsec_rx *rx,
+				     u32 family,
+				     struct mlx5e_ipsec_rx_create_attr *attr)
+{
+	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
+	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+	attr->family = family;
+	attr->prio = MLX5E_NIC_PRIO;
+	attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+	attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+	attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+	attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
+}
+
+static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+					 struct mlx5e_ipsec_rx *rx,
+					 struct mlx5e_ipsec_rx_create_attr *attr,
+					 struct mlx5_flow_destination *dest)
+{
+	struct mlx5_flow_table *ft;
+	int err;
+
+	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
+					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
+					   attr->prio);
+	if (err)
+		return err;
+
+	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
+	if (ft) {
+		dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+		dest->ft = ft;
+	}
+
+	return 0;
+}
+
+static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
+				struct mlx5e_ipsec_rx *rx,
+				struct mlx5e_ipsec_rx_create_attr *attr)
+{
+	struct mlx5_flow_destination dest = {};
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = rx->ft.pol;
+	mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+}
+
 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		     struct mlx5e_ipsec_rx *rx, u32 family)
 {
-	struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
-	struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
-	struct mlx5_flow_destination default_dest;
+	struct mlx5e_ipsec_rx_create_attr attr;
 	struct mlx5_flow_destination dest[2];
 	struct mlx5_flow_table *ft;
 	u32 flags = 0;
 	int err;
 
-	default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
-	err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest,
-					   family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
-					   MLX5E_NIC_PRIO);
+	ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
+
+	err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
 	if (err)
 		return err;
 
-	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
-			     MLX5E_NIC_PRIO, 1, 0);
+	ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 1, 0);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_fs_ft_status;
 	}
-
 	rx->ft.status = ft;
 
-	ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
-	if (ft) {
-		dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-		dest[0].ft = ft;
-	} else {
-		dest[0] = default_dest;
-	}
-
 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
 	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
 	err = ipsec_status_rule(mdev, rx, dest);
@@ -323,8 +366,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
 	if (rx->allow_tunnel_mode)
 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
-	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO, 2,
-			     flags);
+	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_fs_ft;
@@ -337,9 +379,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
 		rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
-						 MLX5_FLOW_NAMESPACE_KERNEL,
-						 MLX5E_NIC_PRIO,
-						 MLX5E_ACCEL_FS_POL_FT_LEVEL,
+						 attr.chains_ns,
+						 attr.prio,
+						 attr.pol_level,
 						 &rx->ft.pol);
 		if (IS_ERR(rx->chains)) {
 			err = PTR_ERR(rx->chains);
@@ -349,8 +391,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		goto connect;
 	}
 
-	ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
-			     2, 0);
+	ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_pol_ft;
@@ -365,10 +406,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 connect:
 	/* connect */
-	memset(dest, 0x00, sizeof(*dest));
-	dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest[0].ft = rx->ft.pol;
-	mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest[0]);
+	ipsec_rx_ft_connect(ipsec, rx, &attr);
 	return 0;
 
 err_pol_miss:
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 05/12] net/mlx5e: Support IPsec packet offload for RX in switchdev mode
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (3 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 04/12] net/mlx5e: Refactor IPsec RX tables creation and destruction Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 06/12] net/mlx5e: Handle IPsec offload for RX datapath " Leon Romanovsky
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

As decryption must be done first, add new prio for IPsec offload in
FDB, and put it just lower than BYPASS prio and higher than TC prio.
Three levels are added for RX. The first one is for ip xfrm policy. SA
table is created in the second level for ip xfrm state. The status
table is created in the last to check the decryption result. If
success, packets continue with the next process, or dropped otherwise.
For now, the set of reg c1 is removed for swtichdev mode, and the
datapath process will be added in the next patch.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/Makefile  |   4 +
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  41 +++-
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    |  80 ++++----
 .../mlx5/core/en_accel/ipsec_offload.c        |   3 +-
 .../mellanox/mlx5/core/esw/ipsec_fs.c         | 184 ++++++++++++++++++
 .../mellanox/mlx5/core/esw/ipsec_fs.h         |  39 ++++
 .../net/ethernet/mellanox/mlx5/core/fs_core.c |   6 +
 include/linux/mlx5/fs.h                       |   1 +
 8 files changed, 312 insertions(+), 46 deletions(-)
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 35f00700a4d6..63a2f2bb80a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -75,6 +75,10 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH)   += esw/acl/helper.o \
 				      esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
 				      esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
 
+ifneq ($(CONFIG_MLX5_EN_IPSEC),)
+	mlx5_core-$(CONFIG_MLX5_ESWITCH)   += esw/ipsec_fs.o
+endif
+
 mlx5_core-$(CONFIG_MLX5_BRIDGE)    += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
 				      en/rep/bridge.o
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 92c111c28a7a..5ab8c9e714c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -135,7 +135,7 @@ struct mlx5e_ipsec_sw_stats {
 	atomic64_t ipsec_tx_drop_trailer;
 };
 
-struct mlx5e_ipsec_rx;
+struct mlx5e_ipsec_fc;
 struct mlx5e_ipsec_tx;
 
 struct mlx5e_ipsec_work {
@@ -172,6 +172,38 @@ struct mlx5e_ipsec_rx_create_attr {
 	enum mlx5_flow_namespace_type chains_ns;
 };
 
+struct mlx5e_ipsec_ft {
+	struct mutex mutex; /* Protect changes to this struct */
+	struct mlx5_flow_table *pol;
+	struct mlx5_flow_table *sa;
+	struct mlx5_flow_table *status;
+	u32 refcnt;
+};
+
+struct mlx5e_ipsec_rule {
+	struct mlx5_flow_handle *rule;
+	struct mlx5_modify_hdr *modify_hdr;
+	struct mlx5_pkt_reformat *pkt_reformat;
+	struct mlx5_fc *fc;
+};
+
+struct mlx5e_ipsec_miss {
+	struct mlx5_flow_group *group;
+	struct mlx5_flow_handle *rule;
+};
+
+struct mlx5e_ipsec_rx {
+	struct mlx5e_ipsec_ft ft;
+	struct mlx5e_ipsec_miss pol;
+	struct mlx5e_ipsec_miss sa;
+	struct mlx5e_ipsec_rule status;
+	struct mlx5e_ipsec_miss status_drop;
+	struct mlx5_fc *status_drop_cnt;
+	struct mlx5e_ipsec_fc *fc;
+	struct mlx5_fs_chains *chains;
+	u8 allow_tunnel_mode : 1;
+};
+
 struct mlx5e_ipsec {
 	struct mlx5_core_dev *mdev;
 	struct xarray sadb;
@@ -197,13 +229,6 @@ struct mlx5e_ipsec_esn_state {
 	u8 overlap: 1;
 };
 
-struct mlx5e_ipsec_rule {
-	struct mlx5_flow_handle *rule;
-	struct mlx5_modify_hdr *modify_hdr;
-	struct mlx5_pkt_reformat *pkt_reformat;
-	struct mlx5_fc *fc;
-};
-
 struct mlx5e_ipsec_limits {
 	u64 round;
 	u8 soft_limit_hit : 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 417d0a1cdf05..0e7095c60b64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -9,6 +9,7 @@
 #include "fs_core.h"
 #include "lib/ipsec_fs_roce.h"
 #include "lib/fs_chains.h"
+#include "esw/ipsec_fs.h"
 
 #define NUM_IPSEC_FTE BIT(15)
 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
@@ -19,29 +20,6 @@ struct mlx5e_ipsec_fc {
 	struct mlx5_fc *drop;
 };
 
-struct mlx5e_ipsec_ft {
-	struct mutex mutex; /* Protect changes to this struct */
-	struct mlx5_flow_table *pol;
-	struct mlx5_flow_table *sa;
-	struct mlx5_flow_table *status;
-	u32 refcnt;
-};
-
-struct mlx5e_ipsec_miss {
-	struct mlx5_flow_group *group;
-	struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_ipsec_rx {
-	struct mlx5e_ipsec_ft ft;
-	struct mlx5e_ipsec_miss pol;
-	struct mlx5e_ipsec_miss sa;
-	struct mlx5e_ipsec_rule status;
-	struct mlx5e_ipsec_fc *fc;
-	struct mlx5_fs_chains *chains;
-	u8 allow_tunnel_mode : 1;
-};
-
 struct mlx5e_ipsec_tx {
 	struct mlx5e_ipsec_ft ft;
 	struct mlx5e_ipsec_miss pol;
@@ -259,9 +237,9 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		       struct mlx5e_ipsec_rx *rx, u32 family)
 {
-
 	/* disconnect */
-	ipsec_rx_ft_disconnect(ipsec, family);
+	if (rx != ipsec->rx_esw)
+		ipsec_rx_ft_disconnect(ipsec, family);
 
 	if (rx->chains) {
 		ipsec_chains_destroy(rx->chains);
@@ -276,8 +254,12 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	mlx5_destroy_flow_table(rx->ft.sa);
 	if (rx->allow_tunnel_mode)
 		mlx5_eswitch_unblock_encap(mdev);
-	mlx5_del_flow_rules(rx->status.rule);
-	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+	if (rx == ipsec->rx_esw) {
+		mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
+	} else {
+		mlx5_del_flow_rules(rx->status.rule);
+		mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+	}
 	mlx5_destroy_flow_table(rx->ft.status);
 
 	mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
@@ -288,6 +270,13 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
 				     u32 family,
 				     struct mlx5e_ipsec_rx_create_attr *attr)
 {
+	if (rx == ipsec->rx_esw) {
+		/* For packet offload in switchdev mode, RX & TX use FDB namespace */
+		attr->ns = ipsec->tx_esw->ns;
+		mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
+		return;
+	}
+
 	attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
 	attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
 	attr->family = family;
@@ -306,6 +295,9 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
 	struct mlx5_flow_table *ft;
 	int err;
 
+	if (rx == ipsec->rx_esw)
+		return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
+
 	*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
 	err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
 					   attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
@@ -357,7 +349,10 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 	dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
 	dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
-	err = ipsec_status_rule(mdev, rx, dest);
+	if (rx == ipsec->rx_esw)
+		err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
+	else
+		err = ipsec_status_rule(mdev, rx, dest);
 	if (err)
 		goto err_add;
 
@@ -406,7 +401,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 
 connect:
 	/* connect */
-	ipsec_rx_ft_connect(ipsec, rx, &attr);
+	if (rx != ipsec->rx_esw)
+		ipsec_rx_ft_connect(ipsec, rx, &attr);
 	return 0;
 
 err_pol_miss:
@@ -864,18 +860,22 @@ static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upsp
 	}
 }
 
-static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, u8 dir)
+static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
+						     int type, u8 dir)
 {
+	if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+		return MLX5_FLOW_NAMESPACE_FDB;
+
 	if (dir == XFRM_DEV_OFFLOAD_IN)
 		return MLX5_FLOW_NAMESPACE_KERNEL;
 
 	return MLX5_FLOW_NAMESPACE_EGRESS;
 }
 
-static int setup_modify_header(struct mlx5e_ipsec *ipsec, u32 val, u8 dir,
+static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
 			       struct mlx5_flow_act *flow_act)
 {
-	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, dir);
+	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
 	struct mlx5_core_dev *mdev = ipsec->mdev;
 	struct mlx5_modify_hdr *modify_hdr;
@@ -1052,7 +1052,8 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
 			      struct mlx5_accel_esp_xfrm_attrs *attrs,
 			      struct mlx5_flow_act *flow_act)
 {
-	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->dir);
+	enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
+								attrs->dir);
 	struct mlx5_pkt_reformat_params reformat_params = {};
 	struct mlx5_core_dev *mdev = ipsec->mdev;
 	struct mlx5_pkt_reformat *pkt_reformat;
@@ -1094,7 +1095,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	struct mlx5_flow_spec *spec;
 	struct mlx5e_ipsec_rx *rx;
 	struct mlx5_fc *counter;
-	int err;
+	int err = 0;
 
 	rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
 	if (IS_ERR(rx))
@@ -1115,8 +1116,10 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	setup_fte_esp(spec);
 	setup_fte_no_frags(spec);
 
-	err = setup_modify_header(ipsec, sa_entry->ipsec_obj_id | BIT(31),
-				  XFRM_DEV_OFFLOAD_IN, &flow_act);
+	if (rx != ipsec->rx_esw)
+		err = setup_modify_header(ipsec, attrs->type,
+					  sa_entry->ipsec_obj_id | BIT(31),
+					  XFRM_DEV_OFFLOAD_IN, &flow_act);
 	if (err)
 		goto err_mod_header;
 
@@ -1307,7 +1310,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 		if (!attrs->reqid)
 			break;
 
-		err = setup_modify_header(ipsec, attrs->reqid,
+		err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
 					  XFRM_DEV_OFFLOAD_OUT, &flow_act);
 		if (err)
 			goto err_mod_header;
@@ -1355,6 +1358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
 	struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
 	struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+	struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
 	struct mlx5_flow_destination dest[2];
 	struct mlx5_flow_act flow_act = {};
 	struct mlx5_flow_handle *rule;
@@ -1400,6 +1404,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
+	if (rx == ipsec->rx_esw && rx->chains)
+		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	dest[dstn].ft = rx->ft.sa;
 	dstn++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index a3554bde3e07..beed57e24088 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -46,7 +46,8 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
 			caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
 
 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
-		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
+		    MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level) &&
+		    MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
 			caps |= MLX5_IPSEC_CAP_PRIO;
 
 		if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
new file mode 100644
index 000000000000..7df7a8b0a6a0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en_accel/ipsec.h"
+#include "esw/ipsec_fs.h"
+
+enum {
+	MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
+	MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
+	MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+};
+
+static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+					     struct mlx5e_ipsec_rx *rx)
+{
+	mlx5_del_flow_rules(rx->status_drop.rule);
+	mlx5_destroy_flow_group(rx->status_drop.group);
+	mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+}
+
+static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+					     struct mlx5e_ipsec_rx *rx)
+{
+	mlx5_del_flow_rules(rx->status.rule);
+	mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+}
+
+static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+					   struct mlx5e_ipsec_rx *rx)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_flow_table *ft = rx->ft.status;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_act flow_act = {};
+	struct mlx5_flow_handle *rule;
+	struct mlx5_fc *flow_counter;
+	struct mlx5_flow_spec *spec;
+	struct mlx5_flow_group *g;
+	u32 *flow_group_in;
+	int err = 0;
+
+	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+	if (!flow_group_in || !spec) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+	g = mlx5_create_flow_group(ft, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		mlx5_core_err(mdev,
+			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+		goto err_out;
+	}
+
+	flow_counter = mlx5_fc_create(mdev, false);
+	if (IS_ERR(flow_counter)) {
+		err = PTR_ERR(flow_counter);
+		mlx5_core_err(mdev,
+			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+		goto err_cnt;
+	}
+
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+	dest.counter_id = mlx5_fc_id(flow_counter);
+	spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		mlx5_core_err(mdev,
+			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
+		goto err_rule;
+	}
+
+	rx->status_drop.group = g;
+	rx->status_drop.rule = rule;
+	rx->status_drop_cnt = flow_counter;
+
+	kvfree(flow_group_in);
+	kvfree(spec);
+	return 0;
+
+err_rule:
+	mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+	mlx5_destroy_flow_group(g);
+err_out:
+	kvfree(flow_group_in);
+	kvfree(spec);
+	return err;
+}
+
+static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+					   struct mlx5e_ipsec_rx *rx,
+					   struct mlx5_flow_destination *dest)
+{
+	struct mlx5_flow_act flow_act = {};
+	struct mlx5_flow_handle *rule;
+	struct mlx5_flow_spec *spec;
+	int err;
+
+	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+	if (!spec)
+		return -ENOMEM;
+
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+			 misc_parameters_2.ipsec_syndrome);
+	MLX5_SET(fte_match_param, spec->match_value,
+		 misc_parameters_2.ipsec_syndrome, 0);
+	spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+	flow_act.flags = FLOW_ACT_NO_APPEND;
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
+	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		mlx5_core_warn(ipsec->mdev,
+			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
+		goto err_rule;
+	}
+
+	rx->status.rule = rule;
+	kvfree(spec);
+	return 0;
+
+err_rule:
+	kvfree(spec);
+	return err;
+}
+
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+				      struct mlx5e_ipsec_rx *rx)
+{
+	esw_ipsec_rx_status_pass_destroy(ipsec, rx);
+	esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+				    struct mlx5e_ipsec_rx *rx,
+				    struct mlx5_flow_destination *dest)
+{
+	int err;
+
+	err = esw_ipsec_rx_status_drop_create(ipsec, rx);
+	if (err)
+		return err;
+
+	err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
+	if (err)
+		goto err_pass_create;
+
+	return 0;
+
+err_pass_create:
+	esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+	return err;
+}
+
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				       struct mlx5e_ipsec_rx_create_attr *attr)
+{
+	attr->prio = FDB_CRYPTO_INGRESS;
+	attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
+	attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
+	attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
+	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+					   struct mlx5_flow_destination *dest)
+{
+	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
new file mode 100644
index 000000000000..1d6104648d32
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_ESW_IPSEC_FS_H__
+#define __MLX5_ESW_IPSEC_FS_H__
+
+struct mlx5e_ipsec;
+
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+				      struct mlx5e_ipsec_rx *rx);
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+				    struct mlx5e_ipsec_rx *rx,
+				    struct mlx5_flow_destination *dest);
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				       struct mlx5e_ipsec_rx_create_attr *attr);
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+					   struct mlx5_flow_destination *dest);
+#else
+static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+						    struct mlx5e_ipsec_rx *rx) {}
+
+static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+						  struct mlx5e_ipsec_rx *rx,
+						  struct mlx5_flow_destination *dest)
+{
+	return  -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+						     struct mlx5e_ipsec_rx_create_attr *attr) {}
+
+static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+							 struct mlx5_flow_destination *dest)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 4ef04aa28771..8ae1854d6b73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2987,6 +2987,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
 	if (err)
 		goto out_err;
 
+	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3);
+	if (IS_ERR(maj_prio)) {
+		err = PTR_ERR(maj_prio);
+		goto out_err;
+	}
+
 	err = create_fdb_fast_path(steering);
 	if (err)
 		goto out_err;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 2cb404c7ea13..6b1fa94f69c8 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -109,6 +109,7 @@ enum mlx5_flow_namespace_type {
 
 enum {
 	FDB_BYPASS_PATH,
+	FDB_CRYPTO_INGRESS,
 	FDB_TC_OFFLOAD,
 	FDB_FT_OFFLOAD,
 	FDB_TC_MISS,
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 06/12] net/mlx5e: Handle IPsec offload for RX datapath in switchdev mode
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (4 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 05/12] net/mlx5e: Support IPsec packet offload for RX in switchdev mode Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 07/12] net/mlx5e: Refactor IPsec TX tables creation Leon Romanovsky
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

Reuse tun opts bits in reg c1, to pass IPsec obj id to datapath.
As this is only for RX SA and there are only 11 bits, xarray is used
to map IPsec obj id to an index, which is between 1 and 0x7ff, and
replace obj id to write to reg c1.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../ethernet/mellanox/mlx5/core/en/rep/tc.c   | 17 ++++-
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  2 +
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    |  7 ++
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c  | 22 ++++++
 .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h  |  2 +
 .../mellanox/mlx5/core/esw/ipsec_fs.c         | 69 +++++++++++++++++++
 .../mellanox/mlx5/core/esw/ipsec_fs.h         | 20 ++++++
 include/linux/mlx5/eswitch.h                  |  3 +
 8 files changed, 139 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b5c773ffc763..b12fe3c5a258 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -715,9 +715,20 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
 	uplink_priv = &uplink_rpriv->uplink_priv;
 	ct_priv = uplink_priv->ct_priv;
 
-	if (!mlx5_ipsec_is_rx_flow(cqe) &&
-	    !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
-				 &tc_priv))
+#ifdef CONFIG_MLX5_EN_IPSEC
+	if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
+		u32 mapped_id;
+		u32 metadata;
+
+		mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
+		if (mapped_id &&
+		    !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
+			mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
+	}
+#endif
+
+	if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
+				 zone_restore_id, tunnel_id, &tc_priv))
 		goto free_skb;
 
 forward:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 5ab8c9e714c4..bc8e200f4059 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -202,6 +202,7 @@ struct mlx5e_ipsec_rx {
 	struct mlx5e_ipsec_fc *fc;
 	struct mlx5_fs_chains *chains;
 	u8 allow_tunnel_mode : 1;
+	struct xarray ipsec_obj_id_map;
 };
 
 struct mlx5e_ipsec {
@@ -248,6 +249,7 @@ struct mlx5e_ipsec_sa_entry {
 	struct mlx5e_ipsec_work *work;
 	struct mlx5e_ipsec_dwork *dwork;
 	struct mlx5e_ipsec_limits limits;
+	u32 rx_mapped_id;
 };
 
 struct mlx5_accel_pol_xfrm_attrs {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0e7095c60b64..0d386fbfb18c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -1120,6 +1120,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 		err = setup_modify_header(ipsec, attrs->type,
 					  sa_entry->ipsec_obj_id | BIT(31),
 					  XFRM_DEV_OFFLOAD_IN, &flow_act);
+	else
+		err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
+
 	if (err)
 		goto err_mod_header;
 
@@ -1608,6 +1611,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 	}
 
 	mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+	mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
 	rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
 }
 
@@ -1660,6 +1664,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
 	kfree(ipsec->rx_ipv6);
 
 	if (ipsec->is_uplink_rep) {
+		xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+
 		mutex_destroy(&ipsec->tx_esw->ft.mutex);
 		WARN_ON(ipsec->tx_esw->ft.refcnt);
 		kfree(ipsec->tx_esw);
@@ -1720,6 +1726,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 		mutex_init(&ipsec->tx_esw->ft.mutex);
 		mutex_init(&ipsec->rx_esw->ft.mutex);
 		ipsec->tx_esw->ns = ns_esw;
+		xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
 	} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
 		ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 8d6379ac4574..c00fe0d5ea11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -37,6 +37,7 @@
 #include "ipsec.h"
 #include "ipsec_rxtx.h"
 #include "en.h"
+#include "esw/ipsec_fs.h"
 
 enum {
 	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
@@ -355,3 +356,24 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
 	}
 }
+
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
+{
+	struct mlx5e_ipsec *ipsec = priv->ipsec;
+	u32 ipsec_obj_id;
+	int err;
+
+	if (!ipsec || !ipsec->is_uplink_rep)
+		return -EINVAL;
+
+	err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id);
+	if (err) {
+		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+		return err;
+	}
+
+	*metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
+					       MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 436e9a8a32d3..9ee014a8ad24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,6 +43,7 @@
 #define MLX5_IPSEC_METADATA_MARKER(metadata)  (((metadata) >> 31) & 0x1)
 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
 #define MLX5_IPSEC_METADATA_HANDLE(metadata)  ((metadata) & GENMASK(23, 0))
+#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
 
 struct mlx5e_accel_tx_ipsec_state {
 	struct xfrm_offload *xo;
@@ -67,6 +68,7 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 				       struct sk_buff *skb,
 				       u32 ipsec_meta_data);
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata);
 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
 {
 	return ipsec_st->tailen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 7df7a8b0a6a0..0675587c1a79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -182,3 +182,72 @@ int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
 
 	return 0;
 }
+
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+					  struct mlx5_flow_act *flow_act)
+{
+	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5_modify_hdr *modify_hdr;
+	u32 mapped_id;
+	int err;
+
+	err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+			  xa_mk_value(sa_entry->ipsec_obj_id),
+			  XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
+	if (err)
+		return err;
+
+	/* reuse tunnel bits for ipsec,
+	 * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
+	 */
+	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+	MLX5_SET(set_action_in, action, field,
+		 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+	MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
+	MLX5_SET(set_action_in, action, length,
+		 ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
+	MLX5_SET(set_action_in, action, data, mapped_id);
+
+	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+					      1, action);
+	if (IS_ERR(modify_hdr)) {
+		err = PTR_ERR(modify_hdr);
+		goto err_header_alloc;
+	}
+
+	sa_entry->rx_mapped_id = mapped_id;
+	flow_act->modify_hdr = modify_hdr;
+	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+	return 0;
+
+err_header_alloc:
+	xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+	return err;
+}
+
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+	if (sa_entry->rx_mapped_id)
+		xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+			    sa_entry->rx_mapped_id);
+}
+
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+					  u32 *ipsec_obj_id)
+{
+	struct mlx5e_ipsec *ipsec = priv->ipsec;
+	void *val;
+
+	val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+	if (!val)
+		return -ENOENT;
+
+	*ipsec_obj_id = xa_to_value(val);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 1d6104648d32..44df34032d1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -5,6 +5,7 @@
 #define __MLX5_ESW_IPSEC_FS_H__
 
 struct mlx5e_ipsec;
+struct mlx5e_ipsec_sa_entry;
 
 #ifdef CONFIG_MLX5_ESWITCH
 void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
@@ -16,6 +17,11 @@ void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
 				       struct mlx5e_ipsec_rx_create_attr *attr);
 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
 					   struct mlx5_flow_destination *dest);
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+					  struct mlx5_flow_act *flow_act);
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+					  u32 *ipsec_obj_id);
 #else
 static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
 						    struct mlx5e_ipsec_rx *rx) {}
@@ -35,5 +41,19 @@ static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ips
 {
 	return -EINVAL;
 }
+
+static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+							struct mlx5_flow_act *flow_act)
+{
+	return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
+
+static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+							u32 *ipsec_obj_id)
+{
+	return -EINVAL;
+}
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index e2701ed0200e..950d2431a53c 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -144,6 +144,9 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
 	GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
 		ESW_TUN_OPTS_OFFSET + 1)
 
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+
 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 07/12] net/mlx5e: Refactor IPsec TX tables creation
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (5 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 06/12] net/mlx5e: Handle IPsec offload for RX datapath " Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 08/12] net/mlx5e: Support IPsec packet offload for TX in switchdev mode Leon Romanovsky
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

Add attribute for IPsec TX creation, pass all needed parameters in it,
so tx_create() can be used by eswitch.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  8 ++++++
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 26 ++++++++++++++-----
 2 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index bc8e200f4059..a3cd3986b2cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -205,6 +205,14 @@ struct mlx5e_ipsec_rx {
 	struct xarray ipsec_obj_id_map;
 };
 
+struct mlx5e_ipsec_tx_create_attr {
+	int prio;
+	int pol_level;
+	int sa_level;
+	int cnt_level;
+	enum mlx5_flow_namespace_type chains_ns;
+};
+
 struct mlx5e_ipsec {
 	struct mlx5_core_dev *mdev;
 	struct xarray sadb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 0d386fbfb18c..f89174dbed92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -569,15 +569,29 @@ static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
 	mlx5_destroy_flow_table(tx->ft.status);
 }
 
-static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				     struct mlx5e_ipsec_tx *tx,
+				     struct mlx5e_ipsec_tx_create_attr *attr)
+{
+	attr->prio = 0;
+	attr->pol_level = 0;
+	attr->sa_level = 1;
+	attr->cnt_level = 2;
+	attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
+}
+
+static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 		     struct mlx5_ipsec_fs *roce)
 {
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5e_ipsec_tx_create_attr attr;
 	struct mlx5_flow_destination dest = {};
 	struct mlx5_flow_table *ft;
 	u32 flags = 0;
 	int err;
 
-	ft = ipsec_ft_create(tx->ns, 2, 0, 1, 0);
+	ipsec_tx_create_attr_set(ipsec, tx, &attr);
+	ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
 	if (IS_ERR(ft))
 		return PTR_ERR(ft);
 	tx->ft.status = ft;
@@ -590,7 +604,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
 		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
 	if (tx->allow_tunnel_mode)
 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
-	ft = ipsec_ft_create(tx->ns, 1, 0, 4, flags);
+	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_sa_ft;
@@ -599,7 +613,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
 
 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
 		tx->chains = ipsec_chains_create(
-			mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 0,
+			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
 			&tx->ft.pol);
 		if (IS_ERR(tx->chains)) {
 			err = PTR_ERR(tx->chains);
@@ -609,7 +623,7 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
 		goto connect_roce;
 	}
 
-	ft = ipsec_ft_create(tx->ns, 0, 0, 2, 0);
+	ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
 	if (IS_ERR(ft)) {
 		err = PTR_ERR(ft);
 		goto err_pol_ft;
@@ -656,7 +670,7 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	if (tx->ft.refcnt)
 		goto skip;
 
-	err = tx_create(mdev, tx, ipsec->roce);
+	err = tx_create(ipsec, tx, ipsec->roce);
 	if (err)
 		return err;
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 08/12] net/mlx5e: Support IPsec packet offload for TX in switchdev mode
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (6 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 07/12] net/mlx5e: Refactor IPsec TX tables creation Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination Leon Romanovsky
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

The IPsec encryption is done at the last, so add new prio for IPsec
offload in FDB, and put it just lower than the slow path prio and
higher than the per-vport prio.
Three levels are added for TX. The first one is for ip xfrm policy.
The sa table is created in the second level for ip xfrm state. The
status table is created at the last to count the number of packets
encrypted.
The rules, which forward packets to uplink, are changed to forward
them to IPsec TX tables first. These rules are restored after those
tables are destroyed, which is done immediately when there is no
reference to them, just as what does in legacy mode. The support for
slow path is added here, by refreshing uplink's channels. But, the
handling for TC fast path, which is more complicated, will be added
later. Besides, reg c4 is used instead to match reqid.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 70 ++++++++++++++++---
 .../mellanox/mlx5/core/esw/ipsec_fs.c         | 16 +++++
 .../mellanox/mlx5/core/esw/ipsec_fs.h         |  5 ++
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |  1 +
 .../mellanox/mlx5/core/eswitch_offloads.c     | 11 +++
 .../net/ethernet/mellanox/mlx5/core/fs_core.c |  6 ++
 include/linux/mlx5/fs.h                       |  1 +
 7 files changed, 101 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f89174dbed92..a2f3ba811af8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -10,6 +10,7 @@
 #include "lib/ipsec_fs_roce.h"
 #include "lib/fs_chains.h"
 #include "esw/ipsec_fs.h"
+#include "en_rep.h"
 
 #define NUM_IPSEC_FTE BIT(15)
 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
@@ -23,6 +24,7 @@ struct mlx5e_ipsec_fc {
 struct mlx5e_ipsec_tx {
 	struct mlx5e_ipsec_ft ft;
 	struct mlx5e_ipsec_miss pol;
+	struct mlx5e_ipsec_miss sa;
 	struct mlx5e_ipsec_rule status;
 	struct mlx5_flow_namespace *ns;
 	struct mlx5e_ipsec_fc *fc;
@@ -550,7 +552,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_
 }
 
 /* IPsec TX flow steering */
-static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 		       struct mlx5_ipsec_fs *roce)
 {
 	mlx5_ipsec_fs_roce_tx_destroy(roce);
@@ -562,9 +564,13 @@ static void tx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
 		mlx5_destroy_flow_table(tx->ft.pol);
 	}
 
+	if (tx == ipsec->tx_esw) {
+		mlx5_del_flow_rules(tx->sa.rule);
+		mlx5_destroy_flow_group(tx->sa.group);
+	}
 	mlx5_destroy_flow_table(tx->ft.sa);
 	if (tx->allow_tunnel_mode)
-		mlx5_eswitch_unblock_encap(mdev);
+		mlx5_eswitch_unblock_encap(ipsec->mdev);
 	mlx5_del_flow_rules(tx->status.rule);
 	mlx5_destroy_flow_table(tx->ft.status);
 }
@@ -573,6 +579,11 @@ static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
 				     struct mlx5e_ipsec_tx *tx,
 				     struct mlx5e_ipsec_tx_create_attr *attr)
 {
+	if (tx == ipsec->tx_esw) {
+		mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
+		return;
+	}
+
 	attr->prio = 0;
 	attr->pol_level = 0;
 	attr->sa_level = 1;
@@ -611,6 +622,15 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 	}
 	tx->ft.sa = ft;
 
+	if (tx == ipsec->tx_esw) {
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+		dest.vport.num = MLX5_VPORT_UPLINK;
+		err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
+		if (err)
+			goto err_sa_miss;
+		memset(&dest, 0, sizeof(dest));
+	}
+
 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
 		tx->chains = ipsec_chains_create(
 			mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
@@ -652,6 +672,11 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 		mlx5_destroy_flow_table(tx->ft.pol);
 	}
 err_pol_ft:
+	if (tx == ipsec->tx_esw) {
+		mlx5_del_flow_rules(tx->sa.rule);
+		mlx5_destroy_flow_group(tx->sa.group);
+	}
+err_sa_miss:
 	mlx5_destroy_flow_table(tx->ft.sa);
 err_sa_ft:
 	if (tx->allow_tunnel_mode)
@@ -662,6 +687,25 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 	return err;
 }
 
+static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
+				       struct mlx5_flow_table *ft)
+{
+#ifdef CONFIG_MLX5_ESWITCH
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	struct mlx5e_rep_priv *uplink_rpriv;
+	struct mlx5e_priv *priv;
+
+	esw->offloads.ft_ipsec_tx_pol = ft;
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+	priv = netdev_priv(uplink_rpriv->netdev);
+	if (!priv->channels.num)
+		return;
+
+	mlx5e_rep_deactivate_channels(priv);
+	mlx5e_rep_activate_channels(priv);
+#endif
+}
+
 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		  struct mlx5e_ipsec_tx *tx)
 {
@@ -674,6 +718,9 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	if (err)
 		return err;
 
+	if (tx == ipsec->tx_esw)
+		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
+
 skip:
 	tx->ft.refcnt++;
 	return 0;
@@ -684,7 +731,10 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
 	if (--tx->ft.refcnt)
 		return;
 
-	tx_destroy(ipsec->mdev, tx, ipsec->roce);
+	if (tx == ipsec->tx_esw)
+		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
+
+	tx_destroy(ipsec, tx, ipsec->roce);
 }
 
 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
@@ -842,15 +892,15 @@ static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
 		 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
 }
 
-static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
 {
 	/* Pass policy check before choosing this SA */
 	spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
 
-	MLX5_SET(fte_match_param, spec->match_criteria,
-		 misc_parameters_2.metadata_reg_c_0, reqid);
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+			 misc_parameters_2.metadata_reg_c_4);
 	MLX5_SET(fte_match_param, spec->match_value,
-		 misc_parameters_2.metadata_reg_c_0, reqid);
+		 misc_parameters_2.metadata_reg_c_4, reqid);
 }
 
 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
@@ -902,7 +952,7 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
 		break;
 	case XFRM_DEV_OFFLOAD_OUT:
 		MLX5_SET(set_action_in, action, field,
-			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+			 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
 		break;
 	default:
 		return -EINVAL;
@@ -1235,7 +1285,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 		break;
 	case XFRM_DEV_OFFLOAD_PACKET:
 		if (attrs->reqid)
-			setup_fte_reg_c0(spec, attrs->reqid);
+			setup_fte_reg_c4(spec, attrs->reqid);
 		err = setup_pkt_reformat(ipsec, attrs, &flow_act);
 		if (err)
 			goto err_pkt_reformat;
@@ -1346,6 +1396,8 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 	}
 
 	flow_act.flags |= FLOW_ACT_NO_APPEND;
+	if (tx == ipsec->tx_esw && tx->chains)
+		flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
 	dest[dstn].ft = tx->ft.sa;
 	dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
 	dstn++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 0675587c1a79..e60cd3dc1b13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -12,6 +12,12 @@ enum {
 	MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
 };
 
+enum {
+	MLX5_ESW_IPSEC_TX_POL_FT_LEVEL,
+	MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL,
+	MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
+};
+
 static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
 					     struct mlx5e_ipsec_rx *rx)
 {
@@ -251,3 +257,13 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
 
 	return 0;
 }
+
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				       struct mlx5e_ipsec_tx_create_attr *attr)
+{
+	attr->prio = FDB_CRYPTO_EGRESS;
+	attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL;
+	attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL;
+	attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
+	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 44df34032d1e..275684f99ed3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -22,6 +22,8 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
 void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
 int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
 					  u32 *ipsec_obj_id);
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+				       struct mlx5e_ipsec_tx_create_attr *attr);
 #else
 static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
 						    struct mlx5e_ipsec_rx *rx) {}
@@ -55,5 +57,8 @@ static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv,
 {
 	return -EINVAL;
 }
+
+static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+						     struct mlx5e_ipsec_tx_create_attr *attr) {}
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ae0dc8a3060d..1070cfa72678 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -254,6 +254,7 @@ struct mlx5_esw_offload {
 	struct mlx5_flow_group *vport_rx_group;
 	struct mlx5_flow_group *vport_rx_drop_group;
 	struct mlx5_flow_handle *vport_rx_drop_rule;
+	struct mlx5_flow_table *ft_ipsec_tx_pol;
 	struct xarray vport_reps;
 	struct list_head peer_flows[MLX5_MAX_PORTS];
 	struct mutex peer_mutex;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index bdfe609cc9ec..e8bf18098203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -884,6 +884,17 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
 
+	if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
+		dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
+		flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	} else {
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+		dest.vport.num = rep->vport;
+		dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
+		dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+	}
+
 	if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
 	    rep->vport == MLX5_VPORT_UPLINK)
 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 8ae1854d6b73..830ff8480fe1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3015,6 +3015,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
 		goto out_err;
 	}
 
+	maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_EGRESS, 3);
+	if (IS_ERR(maj_prio)) {
+		err = PTR_ERR(maj_prio);
+		goto out_err;
+	}
+
 	/* We put this priority last, knowing that nothing will get here
 	 * unless explicitly forwarded to. This is possible because the
 	 * slow path tables have catch all rules and nothing gets passed
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 6b1fa94f69c8..c302ec34255b 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -115,6 +115,7 @@ enum {
 	FDB_TC_MISS,
 	FDB_BR_OFFLOAD,
 	FDB_SLOW_PATH,
+	FDB_CRYPTO_EGRESS,
 	FDB_PER_VPORT,
 };
 
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (7 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 08/12] net/mlx5e: Support IPsec packet offload for TX in switchdev mode Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-13  0:32   ` Jakub Kicinski
  2023-07-11  9:29 ` [PATCH net-next 10/12] net/mlx5e: Make IPsec offload work together with eswitch and TC Leon Romanovsky
                   ` (2 subsequent siblings)
  11 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

The rule destination must be comprared with the old_dest passed in.

Fixes: 74491de93712 ("net/mlx5: Add multi dest support")
Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 830ff8480fe1..59df6156246e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1066,7 +1066,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
 	}
 
 	for (i = 0; i < handle->num_rules; i++) {
-		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
+		if (mlx5_flow_dests_cmp(old_dest, &handle->rule[i]->dest_attr))
 			return _mlx5_modify_rule_destination(handle->rule[i],
 							     new_dest);
 	}
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 10/12] net/mlx5e: Make IPsec offload work together with eswitch and TC
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (8 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 11/12] net/mlx5e: Modify and restore TC rules for IPSec TX rules Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 12/12] net/mlx5e: Add get IPsec offload stats for uplink representor Leon Romanovsky
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

The eswitch mode is not allowed to change if there are any IPsec rules.
Besides, by using mlx5_esw_try_lock() to get eswitch mode lock, IPsec
rules are not allowed to be offloaded if there are any TC rules.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    | 59 ++++++++++++----
 .../net/ethernet/mellanox/mlx5/core/eswitch.h | 14 ++++
 .../mellanox/mlx5/core/eswitch_offloads.c     | 70 +++++++++++++++++++
 3 files changed, 128 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index a2f3ba811af8..2e5eb427f824 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -254,8 +254,6 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	mlx5_del_flow_rules(rx->sa.rule);
 	mlx5_destroy_flow_group(rx->sa.group);
 	mlx5_destroy_flow_table(rx->ft.sa);
-	if (rx->allow_tunnel_mode)
-		mlx5_eswitch_unblock_encap(mdev);
 	if (rx == ipsec->rx_esw) {
 		mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
 	} else {
@@ -359,8 +357,6 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 		goto err_add;
 
 	/* Create FT */
-	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
-		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
 	if (rx->allow_tunnel_mode)
 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
 	ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
@@ -415,8 +411,6 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 err_fs:
 	mlx5_destroy_flow_table(rx->ft.sa);
 err_fs_ft:
-	if (rx->allow_tunnel_mode)
-		mlx5_eswitch_unblock_encap(mdev);
 	mlx5_del_flow_rules(rx->status.rule);
 	mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
 err_add:
@@ -434,13 +428,26 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	if (rx->ft.refcnt)
 		goto skip;
 
+	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+		rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+
+	err = mlx5_eswitch_block_mode_trylock(mdev);
+	if (err)
+		goto err_out;
+
 	err = rx_create(mdev, ipsec, rx, family);
+	mlx5_eswitch_block_mode_unlock(mdev, err);
 	if (err)
-		return err;
+		goto err_out;
 
 skip:
 	rx->ft.refcnt++;
 	return 0;
+
+err_out:
+	if (rx->allow_tunnel_mode)
+		mlx5_eswitch_unblock_encap(mdev);
+	return err;
 }
 
 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
@@ -449,7 +456,12 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
 	if (--rx->ft.refcnt)
 		return;
 
+	mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
 	rx_destroy(ipsec->mdev, ipsec, rx, family);
+	mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
+
+	if (rx->allow_tunnel_mode)
+		mlx5_eswitch_unblock_encap(ipsec->mdev);
 }
 
 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
@@ -569,8 +581,6 @@ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 		mlx5_destroy_flow_group(tx->sa.group);
 	}
 	mlx5_destroy_flow_table(tx->ft.sa);
-	if (tx->allow_tunnel_mode)
-		mlx5_eswitch_unblock_encap(ipsec->mdev);
 	mlx5_del_flow_rules(tx->status.rule);
 	mlx5_destroy_flow_table(tx->ft.status);
 }
@@ -611,8 +621,6 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 	if (err)
 		goto err_status_rule;
 
-	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
-		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
 	if (tx->allow_tunnel_mode)
 		flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
 	ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
@@ -679,8 +687,6 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
 err_sa_miss:
 	mlx5_destroy_flow_table(tx->ft.sa);
 err_sa_ft:
-	if (tx->allow_tunnel_mode)
-		mlx5_eswitch_unblock_encap(mdev);
 	mlx5_del_flow_rules(tx->status.rule);
 err_status_rule:
 	mlx5_destroy_flow_table(tx->ft.status);
@@ -714,16 +720,32 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
 	if (tx->ft.refcnt)
 		goto skip;
 
-	err = tx_create(ipsec, tx, ipsec->roce);
+	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+		tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+
+	err = mlx5_eswitch_block_mode_trylock(mdev);
 	if (err)
-		return err;
+		goto err_out;
+
+	err = tx_create(ipsec, tx, ipsec->roce);
+	if (err) {
+		mlx5_eswitch_block_mode_unlock(mdev, err);
+		goto err_out;
+	}
 
 	if (tx == ipsec->tx_esw)
 		ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
 
+	mlx5_eswitch_block_mode_unlock(mdev, err);
+
 skip:
 	tx->ft.refcnt++;
 	return 0;
+
+err_out:
+	if (tx->allow_tunnel_mode)
+		mlx5_eswitch_unblock_encap(mdev);
+	return err;
 }
 
 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
@@ -731,10 +753,17 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
 	if (--tx->ft.refcnt)
 		return;
 
+	mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
+
 	if (tx == ipsec->tx_esw)
 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
 
 	tx_destroy(ipsec, tx, ipsec->roce);
+
+	mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
+
+	if (tx->allow_tunnel_mode)
+		mlx5_eswitch_unblock_encap(ipsec->mdev);
 }
 
 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 1070cfa72678..ddd16fd4a3a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -270,6 +270,7 @@ struct mlx5_esw_offload {
 	u8 inline_mode;
 	atomic64_t num_flows;
 	u64 num_block_encap;
+	u64 num_block_mode;
 	enum devlink_eswitch_encap_mode encap;
 	struct ida vport_metadata_ida;
 	unsigned int host_number; /* ECPF supports one external host */
@@ -789,6 +790,11 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
 
+int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev);
+void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err);
+void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev);
+
 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
 {
 	if (mlx5_esw_allowed(esw))
@@ -867,6 +873,14 @@ static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
 {
 }
+
+static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; }
+
+static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {}
+
+static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {}
+
+static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {}
 #endif /* CONFIG_MLX5_ESWITCH */
 
 #endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index e8bf18098203..52e974e2d74a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3513,6 +3513,69 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
 	return net_eq(devl_net, netdev_net);
 }
 
+int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev)
+{
+	struct devlink *devlink = priv_to_devlink(dev);
+	struct mlx5_eswitch *esw;
+	int err;
+
+	devl_lock(devlink);
+	esw = mlx5_devlink_eswitch_get(devlink);
+	if (IS_ERR(esw)) {
+		/* Failure means no eswitch => not possible to change eswitch mode */
+		devl_unlock(devlink);
+		return 0;
+	}
+
+	err = mlx5_esw_try_lock(esw);
+	if (err < 0) {
+		devl_unlock(devlink);
+		return err;
+	}
+
+	return 0;
+}
+
+void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err)
+{
+	struct devlink *devlink = priv_to_devlink(dev);
+	struct mlx5_eswitch *esw;
+
+	esw = mlx5_devlink_eswitch_get(devlink);
+	if (IS_ERR(esw))
+		return;
+
+	if (!err)
+		esw->offloads.num_block_mode++;
+	mlx5_esw_unlock(esw);
+	devl_unlock(devlink);
+}
+
+void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev)
+{
+	struct devlink *devlink = priv_to_devlink(dev);
+	struct mlx5_eswitch *esw;
+
+	esw = mlx5_devlink_eswitch_get(devlink);
+	if (IS_ERR(esw))
+		return;
+
+	down_write(&esw->mode_lock);
+}
+
+void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev)
+{
+	struct devlink *devlink = priv_to_devlink(dev);
+	struct mlx5_eswitch *esw;
+
+	esw = mlx5_devlink_eswitch_get(devlink);
+	if (IS_ERR(esw))
+		return;
+
+	esw->offloads.num_block_mode--;
+	up_write(&esw->mode_lock);
+}
+
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
 				  struct netlink_ext_ack *extack)
 {
@@ -3546,6 +3609,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
 	if (cur_mlx5_mode == mlx5_mode)
 		goto unlock;
 
+	if (esw->offloads.num_block_mode) {
+		NL_SET_ERR_MSG_MOD(extack,
+				   "Can't change eswitch mode when IPsec SA and/or policies are configured");
+		err = -EOPNOTSUPP;
+		goto unlock;
+	}
+
 	mlx5_eswitch_disable_locked(esw);
 	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
 		if (mlx5_devlink_trap_get_num_active(esw->dev)) {
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 11/12] net/mlx5e: Modify and restore TC rules for IPSec TX rules
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (9 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 10/12] net/mlx5e: Make IPsec offload work together with eswitch and TC Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  2023-07-11  9:29 ` [PATCH net-next 12/12] net/mlx5e: Add get IPsec offload stats for uplink representor Leon Romanovsky
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

After IPsec policy/state TX rules are added, any TC flow rule, which
forwards packets to uplink, is modified to forward to IPsec TX tables.
As these tables are destroyed dynamically, whenever there is no
reference to them, the destinations of this kind of rules must be
restored to uplink.

There is a special case for packet encapsulation, as the
packet_reformat_id in the extended destination is used to reformat
packets, but only for the VPORT destination. To forward packet to
IPsec table and do encapsulation in one FTE, move the
packet_reformat_id to flow context, instead of using the extended
destination. As a limitation, multiple encapsulations with table
forwarding, and one together with other VPORT destinations, are not
allowed, so add a check when offloading TC rules.

TC rules are not allowed before IPsec TX rule is added, so only need
to restore TC rules after flush IPSec TX rules. As they are saved in
the vport_rep rhashtables, we walk all the rules in the rhashtables,
and find TC rules with destinations pointing to IPsec tables, and
modify them one by one. To avoid concurrent issue, this handling is
done under the protection of eswitch mode_lock.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec_fs.c    |  4 +-
 .../mellanox/mlx5/core/esw/ipsec_fs.c         | 56 +++++++++++
 .../mellanox/mlx5/core/esw/ipsec_fs.h         |  3 +
 .../net/ethernet/mellanox/mlx5/core/eswitch.h |  2 +
 .../mellanox/mlx5/core/eswitch_offloads.c     | 93 ++++++++++++++++++-
 5 files changed, 154 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 2e5eb427f824..c71230c3957e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -755,8 +755,10 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
 
 	mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
 
-	if (tx == ipsec->tx_esw)
+	if (tx == ipsec->tx_esw) {
+		mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
 		ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
+	}
 
 	tx_destroy(ipsec, tx, ipsec->roce);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index e60cd3dc1b13..455746952260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -5,6 +5,9 @@
 #include "eswitch.h"
 #include "en_accel/ipsec.h"
 #include "esw/ipsec_fs.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#include "en/tc_priv.h"
+#endif
 
 enum {
 	MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
@@ -267,3 +270,56 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
 	attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
 	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
 }
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
+					    struct mlx5e_tc_flow *flow)
+{
+	struct mlx5_esw_flow_attr *esw_attr;
+	struct mlx5_flow_attr *attr;
+	int err;
+
+	attr = flow->attr;
+	esw_attr = attr->esw_attr;
+	if (esw_attr->out_count - esw_attr->split_count > 1)
+		return 0;
+
+	err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
+					      esw_attr->out_count - 1);
+
+	return err;
+}
+#endif
+
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep;
+	struct mlx5e_rep_priv *rpriv;
+	struct rhashtable_iter iter;
+	struct mlx5e_tc_flow *flow;
+	unsigned long i;
+	int err;
+
+	xa_for_each(&esw->offloads.vport_reps, i, rep) {
+		rpriv = rep->rep_data[REP_ETH].priv;
+		if (!rpriv || !rpriv->netdev)
+			continue;
+
+		rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+		rhashtable_walk_start(&iter);
+		while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+			if (IS_ERR(flow))
+				continue;
+
+			err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
+			if (err)
+				mlx5_core_warn_once(mdev,
+						    "Faided to modify flow dests for IPsec");
+		}
+		rhashtable_walk_stop(&iter);
+		rhashtable_walk_exit(&iter);
+	}
+#endif
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index 275684f99ed3..0c90f7a8b0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -24,6 +24,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
 					  u32 *ipsec_obj_id);
 void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
 				       struct mlx5e_ipsec_tx_create_attr *attr);
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
 #else
 static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
 						    struct mlx5e_ipsec_rx *rx) {}
@@ -60,5 +61,7 @@ static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv,
 
 static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
 						     struct mlx5e_ipsec_tx_create_attr *attr) {}
+
+static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index ddd16fd4a3a1..def16c23aed7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -816,6 +816,8 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
 	return esw->fdb_table.offloads.slow_fdb;
 }
 
+int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+				    struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
 #else  /* CONFIG_MLX5_ESWITCH */
 /* eswitch API stubs */
 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 52e974e2d74a..0cf2810e439a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -424,10 +424,49 @@ esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 l
 	mlx5_chains_put_table(chains, chain, prio, level);
 }
 
+static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
+{
+	return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
+}
+
+static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
+					      struct mlx5_esw_flow_attr *esw_attr,
+					      int attr_idx)
+{
+	if (esw->offloads.ft_ipsec_tx_pol &&
+	    esw_attr->dests[attr_idx].rep &&
+	    esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+	    esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+	    esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
+		return true;
+
+	return false;
+}
+
+static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
+					   struct mlx5_esw_flow_attr *esw_attr)
+{
+	int i;
+
+	if (!esw->offloads.ft_ipsec_tx_pol)
+		return true;
+
+	for (i = 0; i < esw_attr->split_count; i++)
+		if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
+			return false;
+
+	for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+		if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
+		    (esw_attr->out_count - esw_attr->split_count > 1))
+			return false;
+
+	return true;
+}
+
 static void
-esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
-		     struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
-		     int attr_idx, int dest_idx, bool pkt_reformat)
+esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+			 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+			 int attr_idx, int dest_idx, bool pkt_reformat)
 {
 	dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
 	dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
@@ -449,6 +488,33 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
 	}
 }
 
+static void
+esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+			 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+			 int attr_idx, int dest_idx, bool pkt_reformat)
+{
+	dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
+	dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	if (pkt_reformat &&
+	    esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
+		flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+		flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+	}
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+		     struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+		     int attr_idx, int dest_idx, bool pkt_reformat)
+{
+	if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+		esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
+					 attr_idx, dest_idx, pkt_reformat);
+	else
+		esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
+					 attr_idx, dest_idx, pkt_reformat);
+}
+
 static int
 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
 		      struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
@@ -575,6 +641,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
 		return ERR_PTR(-EOPNOTSUPP);
 
+	if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
+		return ERR_PTR(-EOPNOTSUPP);
+
 	dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
 	if (!dest)
 		return ERR_PTR(-ENOMEM);
@@ -4385,3 +4454,21 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
 	mutex_unlock(&esw->state_lock);
 	return err;
 }
+
+int
+mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+				struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
+{
+	struct mlx5_flow_destination new_dest = {};
+	struct mlx5_flow_destination old_dest = {};
+
+	if (esw_attr->dests[attr_idx].rep->vport != MLX5_VPORT_UPLINK)
+		return 0;
+
+	esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+	if (old_dest.type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+		return 0;
+
+	esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+	return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
+}
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH net-next 12/12] net/mlx5e: Add get IPsec offload stats for uplink representor
  2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
                   ` (10 preceding siblings ...)
  2023-07-11  9:29 ` [PATCH net-next 11/12] net/mlx5e: Modify and restore TC rules for IPSec TX rules Leon Romanovsky
@ 2023-07-11  9:29 ` Leon Romanovsky
  11 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-11  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Jianbo Liu, Eric Dumazet, Mark Bloch, netdev, Paolo Abeni,
	Saeed Mahameed, David S . Miller

From: Jianbo Liu <jianbol@nvidia.com>

As IPsec offload is supported in switchdev mode, HW stats can be can be
obtained from uplink rep.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 152b62138450..373f37584619 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1341,6 +1341,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
 	&MLX5E_STATS_GRP(channels),
 	&MLX5E_STATS_GRP(per_port_buff_congest),
 #ifdef CONFIG_MLX5_EN_IPSEC
+	&MLX5E_STATS_GRP(ipsec_hw),
 	&MLX5E_STATS_GRP(ipsec_sw),
 #endif
 	&MLX5E_STATS_GRP(ptp),
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-11  9:29 ` [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination Leon Romanovsky
@ 2023-07-13  0:32   ` Jakub Kicinski
  2023-07-13  6:33     ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-13  0:32 UTC (permalink / raw)
  To: Saeed Mahameed
  Cc: Leon Romanovsky, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller

On Tue, 11 Jul 2023 12:29:07 +0300 Leon Romanovsky wrote:
> From: Jianbo Liu <jianbol@nvidia.com>
> 
> The rule destination must be comprared with the old_dest passed in.
> 
> Fixes: 74491de93712 ("net/mlx5: Add multi dest support")

This says Fixes, should I quickly toss it into net so it makes
tomorrow's PR?  The commit message is pretty useless :(

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13  0:32   ` Jakub Kicinski
@ 2023-07-13  6:33     ` Leon Romanovsky
  2023-07-13 17:04       ` Jakub Kicinski
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-13  6:33 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller

On Wed, Jul 12, 2023 at 05:32:59PM -0700, Jakub Kicinski wrote:
> On Tue, 11 Jul 2023 12:29:07 +0300 Leon Romanovsky wrote:
> > From: Jianbo Liu <jianbol@nvidia.com>
> > 
> > The rule destination must be comprared with the old_dest passed in.
> > 
> > Fixes: 74491de93712 ("net/mlx5: Add multi dest support")
> 
> This says Fixes, should I quickly toss it into net so it makes
> tomorrow's PR?

This is a fix, but it useful for this series only, which actually
needs to modify flow steering rule destinations on the fly.

There is no other code in mlx5 which needs this fix.

Thanks

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13  6:33     ` Leon Romanovsky
@ 2023-07-13 17:04       ` Jakub Kicinski
  2023-07-13 17:43         ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-13 17:04 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, 13 Jul 2023 09:33:45 +0300 Leon Romanovsky wrote:
> > This says Fixes, should I quickly toss it into net so it makes
> > tomorrow's PR?  
> 
> This is a fix, but it useful for this series only, which actually
> needs to modify flow steering rule destinations on the fly.
> 
> There is no other code in mlx5 which needs this fix.

Reads like "can't be triggered with current code", in which case 
the right thing to do is to add "can't be triggered with current
code" to the commit message, rather than the Fixes tag.

I had a look thru the series yesterday, and it looks good to me
(tho I'm no ipsec expert). Thanks for putting in the work!

Could you add some info about how the code in the series can be
exercised / example configurations? And please CC Simon, it'd be
great to get him / someone at Corigine to review.

And obviously Steffen, why did you not CC Steffen?! :o

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13 17:04       ` Jakub Kicinski
@ 2023-07-13 17:43         ` Leon Romanovsky
  2023-07-13 18:05           ` Jakub Kicinski
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-13 17:43 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, Jul 13, 2023 at 10:04:01AM -0700, Jakub Kicinski wrote:
> On Thu, 13 Jul 2023 09:33:45 +0300 Leon Romanovsky wrote:
> > > This says Fixes, should I quickly toss it into net so it makes
> > > tomorrow's PR?  
> > 
> > This is a fix, but it useful for this series only, which actually
> > needs to modify flow steering rule destinations on the fly.
> > 
> > There is no other code in mlx5 which needs this fix.
> 
> Reads like "can't be triggered with current code", in which case 
> the right thing to do is to add "can't be triggered with current
> code" to the commit message, rather than the Fixes tag.

The code is wrong, so comes Fixes line, but I can remove it.

> 
> I had a look thru the series yesterday, and it looks good to me
> (tho I'm no ipsec expert). Thanks for putting in the work!
> 
> Could you add some info about how the code in the series can be
> exercised / example configurations? And please CC Simon, it'd be
> great to get him / someone at Corigine to review.
> 
> And obviously Steffen, why did you not CC Steffen?! :o

It works exactly like "regular" IPsec, nothing special, except
now users can switch to switchdev before adding IPsec rules.

 devlink dev eswitch set pci/0000:06:00.0 mode switchdev

Same configurations as here:
https://lore.kernel.org/netdev/cover.1670005543.git.leonro@nvidia.com/
Packet offload mode:
  ip xfrm state offload packet dev <if-name> dir <in|out>
  ip xfrm policy .... offload packet dev <if-name>
Crypto offload mode:
  ip xfrm state offload crypto dev <if-name> dir <in|out>
or (backward compatibility)
  ip xfrm state offload dev <if-name> dir <in|out>

I didn't add Steffen as it is more flow steering magic series
and not IPsec :).

I'll resubmit on Sunday.

Thanks

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13 17:43         ` Leon Romanovsky
@ 2023-07-13 18:05           ` Jakub Kicinski
  2023-07-13 18:58             ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-13 18:05 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, 13 Jul 2023 20:43:17 +0300 Leon Romanovsky wrote:
> > Reads like "can't be triggered with current code", in which case 
> > the right thing to do is to add "can't be triggered with current
> > code" to the commit message, rather than the Fixes tag.  
> 
> The code is wrong, so comes Fixes line, but I can remove it.

Yes, perhaps after death we will inhabit a world with clear,
non-conflicting rules, where law can be followed to the letter
and "truth" and "good" are clearly and objectively defined.

Until the sweat release, tho, let's apply common sense, and 
not add Fixes tags to patches which can't possibly be of interest 
to backporters.

Please and thank you...

> > I had a look thru the series yesterday, and it looks good to me
> > (tho I'm no ipsec expert). Thanks for putting in the work!
> > 
> > Could you add some info about how the code in the series can be
> > exercised / example configurations? And please CC Simon, it'd be
> > great to get him / someone at Corigine to review.
> > 
> > And obviously Steffen, why did you not CC Steffen?! :o  
> 
> It works exactly like "regular" IPsec, nothing special, except
> now users can switch to switchdev before adding IPsec rules.
> 
>  devlink dev eswitch set pci/0000:06:00.0 mode switchdev
> 
> Same configurations as here:
> https://lore.kernel.org/netdev/cover.1670005543.git.leonro@nvidia.com/
> Packet offload mode:
>   ip xfrm state offload packet dev <if-name> dir <in|out>
>   ip xfrm policy .... offload packet dev <if-name>
> Crypto offload mode:
>   ip xfrm state offload crypto dev <if-name> dir <in|out>
> or (backward compatibility)
>   ip xfrm state offload dev <if-name> dir <in|out>

I see, so all policy based IPsec?
Does the order of processing in the device match the kernel?
TC packet rewrites or IPsec comes first?

> I didn't add Steffen as it is more flow steering magic series
> and not IPsec :).
> 
> I'll resubmit on Sunday.

Thanks!

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13 18:05           ` Jakub Kicinski
@ 2023-07-13 18:58             ` Leon Romanovsky
  2023-07-14  3:17               ` Jakub Kicinski
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-13 18:58 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, Jul 13, 2023 at 11:05:56AM -0700, Jakub Kicinski wrote:
> On Thu, 13 Jul 2023 20:43:17 +0300 Leon Romanovsky wrote:
> > > Reads like "can't be triggered with current code", in which case 
> > > the right thing to do is to add "can't be triggered with current
> > > code" to the commit message, rather than the Fixes tag.  
> > 
> > The code is wrong, so comes Fixes line, but I can remove it.
> 
> Yes, perhaps after death we will inhabit a world with clear,
> non-conflicting rules, where law can be followed to the letter
> and "truth" and "good" are clearly and objectively defined.
> 
> Until the sweat release, tho, let's apply common sense, and 
> not add Fixes tags to patches which can't possibly be of interest 
> to backporters.
> 
> Please and thank you...

Sure

> 
> > > I had a look thru the series yesterday, and it looks good to me
> > > (tho I'm no ipsec expert). Thanks for putting in the work!
> > > 
> > > Could you add some info about how the code in the series can be
> > > exercised / example configurations? And please CC Simon, it'd be
> > > great to get him / someone at Corigine to review.
> > > 
> > > And obviously Steffen, why did you not CC Steffen?! :o  
> > 
> > It works exactly like "regular" IPsec, nothing special, except
> > now users can switch to switchdev before adding IPsec rules.
> > 
> >  devlink dev eswitch set pci/0000:06:00.0 mode switchdev
> > 
> > Same configurations as here:
> > https://lore.kernel.org/netdev/cover.1670005543.git.leonro@nvidia.com/
> > Packet offload mode:
> >   ip xfrm state offload packet dev <if-name> dir <in|out>
> >   ip xfrm policy .... offload packet dev <if-name>
> > Crypto offload mode:
> >   ip xfrm state offload crypto dev <if-name> dir <in|out>
> > or (backward compatibility)
> >   ip xfrm state offload dev <if-name> dir <in|out>
> 
> I see, so all policy based IPsec?

Yes, it is.

> Does the order of processing in the device match the kernel?

Yes and this it why this fix was needed to make sure that we update
destinations properly.

> TC packet rewrites or IPsec comes first?

In theory, we support any order, but in real life I don't think that TC
before IPsec is really valuable.

> 
> > I didn't add Steffen as it is more flow steering magic series
> > and not IPsec :).
> > 
> > I'll resubmit on Sunday.
> 
> Thanks!

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-13 18:58             ` Leon Romanovsky
@ 2023-07-14  3:17               ` Jakub Kicinski
  2023-07-14 18:40                 ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-14  3:17 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, 13 Jul 2023 21:58:33 +0300 Leon Romanovsky wrote:
> > TC packet rewrites or IPsec comes first?  
> 
> In theory, we support any order, but in real life I don't think that TC
> before IPsec is really valuable.

I asked the question poorly. To clearer, you're saying that:

a)  host <-> TC <-> IPsec <-> "wire"/switch
  or
b)  host <-> IPsec <-> TC <-> "wire"/switch

?

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-14  3:17               ` Jakub Kicinski
@ 2023-07-14 18:40                 ` Leon Romanovsky
  2023-07-14 19:16                   ` Jakub Kicinski
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-14 18:40 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Thu, Jul 13, 2023 at 08:17:27PM -0700, Jakub Kicinski wrote:
> On Thu, 13 Jul 2023 21:58:33 +0300 Leon Romanovsky wrote:
> > > TC packet rewrites or IPsec comes first?  
> > 
> > In theory, we support any order, but in real life I don't think that TC
> > before IPsec is really valuable.
> 
> I asked the question poorly. To clearer, you're saying that:
> 
> a)  host <-> TC <-> IPsec <-> "wire"/switch
>   or
> b)  host <-> IPsec <-> TC <-> "wire"/switch
> 
> ?

It depends on configuration order, if user configures TC first, it will
be a), if he/she configures IPsec first, it will be b).

I just think that option b) is really matters.

Thanks

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-14 18:40                 ` Leon Romanovsky
@ 2023-07-14 19:16                   ` Jakub Kicinski
  2023-07-14 20:32                     ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-14 19:16 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Fri, 14 Jul 2023 21:40:13 +0300 Leon Romanovsky wrote:
> > > In theory, we support any order, but in real life I don't think that TC
> > > before IPsec is really valuable.  
> > 
> > I asked the question poorly. To clearer, you're saying that:
> > 
> > a)  host <-> TC <-> IPsec <-> "wire"/switch
> >   or
> > b)  host <-> IPsec <-> TC <-> "wire"/switch
> > 
> > ?  
> 
> It depends on configuration order, if user configures TC first, it will
> be a), if he/she configures IPsec first, it will be b).
> 
> I just think that option b) is really matters.

And only b) matches what happens in the kernel with policy based IPsec,
right? So can we reject a) from happening? IIUC what you're saying -
the result depending on order of configuration may be a major source
of surprises / hard to debug problems for the user.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-14 19:16                   ` Jakub Kicinski
@ 2023-07-14 20:32                     ` Leon Romanovsky
  2023-07-15  3:30                       ` Jakub Kicinski
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-14 20:32 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Fri, Jul 14, 2023 at 12:16:33PM -0700, Jakub Kicinski wrote:
> On Fri, 14 Jul 2023 21:40:13 +0300 Leon Romanovsky wrote:
> > > > In theory, we support any order, but in real life I don't think that TC
> > > > before IPsec is really valuable.  
> > > 
> > > I asked the question poorly. To clearer, you're saying that:
> > > 
> > > a)  host <-> TC <-> IPsec <-> "wire"/switch
> > >   or
> > > b)  host <-> IPsec <-> TC <-> "wire"/switch
> > > 
> > > ?  
> > 
> > It depends on configuration order, if user configures TC first, it will
> > be a), if he/she configures IPsec first, it will be b).
> > 
> > I just think that option b) is really matters.
> 
> And only b) matches what happens in the kernel with policy based IPsec,
> right? 

Can you please clarify what do you mean "policy based IPsec"?

> So can we reject a) from happening? 

Technically yes.

> IIUC what you're saying -
> the result depending on order of configuration may be a major source
> of surprises / hard to debug problems for the user.

When I reviewed patches, I came exactly to an opposite conclusion :)

My rationale was that users who configure IPsec and TC are advanced
users who knows their data flow and if they find a) option valuable,
they can do it.

For example, a) allows to limit amount of data sent to IPsec engine.

I believe both a) and b) should be supported.

Thanks

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-14 20:32                     ` Leon Romanovsky
@ 2023-07-15  3:30                       ` Jakub Kicinski
  2023-07-16 10:39                         ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Jakub Kicinski @ 2023-07-15  3:30 UTC (permalink / raw)
  To: Leon Romanovsky
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Fri, 14 Jul 2023 23:32:58 +0300 Leon Romanovsky wrote:
> On Fri, Jul 14, 2023 at 12:16:33PM -0700, Jakub Kicinski wrote:
> > On Fri, 14 Jul 2023 21:40:13 +0300 Leon Romanovsky wrote:  
> > > It depends on configuration order, if user configures TC first, it will
> > > be a), if he/she configures IPsec first, it will be b).
> > > 
> > > I just think that option b) is really matters.  
> > 
> > And only b) matches what happens in the kernel with policy based IPsec,
> > right?   
> 
> Can you please clarify what do you mean "policy based IPsec"?

I mean without a separate xfrm netdev on which you can install TC
rules of its own.

> > IIUC what you're saying -
> > the result depending on order of configuration may be a major source
> > of surprises / hard to debug problems for the user.  
> 
> When I reviewed patches, I came exactly to an opposite conclusion :)
> 
> My rationale was that users who configure IPsec and TC are advanced
> users who knows their data flow and if they find a) option valuable,
> they can do it.
> 
> For example, a) allows to limit amount of data sent to IPsec engine.
> 
> I believe both a) and b) should be supported.

What does it take to switch between the modes?
Even if we want both modes we should have an explicit switch, I reckon.
Or at least a way to read back what mode we ended up in.


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-15  3:30                       ` Jakub Kicinski
@ 2023-07-16 10:39                         ` Leon Romanovsky
  2023-07-19  9:29                           ` Leon Romanovsky
  0 siblings, 1 reply; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-16 10:39 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Fri, Jul 14, 2023 at 08:30:32PM -0700, Jakub Kicinski wrote:
> On Fri, 14 Jul 2023 23:32:58 +0300 Leon Romanovsky wrote:
> > On Fri, Jul 14, 2023 at 12:16:33PM -0700, Jakub Kicinski wrote:
> > > On Fri, 14 Jul 2023 21:40:13 +0300 Leon Romanovsky wrote:  
> > > > It depends on configuration order, if user configures TC first, it will
> > > > be a), if he/she configures IPsec first, it will be b).
> > > > 
> > > > I just think that option b) is really matters.  
> > > 
> > > And only b) matches what happens in the kernel with policy based IPsec,
> > > right?   
> > 
> > Can you please clarify what do you mean "policy based IPsec"?
> 
> I mean without a separate xfrm netdev on which you can install TC
> rules of its own.

I call it software IPsec.

> 
> > > IIUC what you're saying -
> > > the result depending on order of configuration may be a major source
> > > of surprises / hard to debug problems for the user.  
> > 
> > When I reviewed patches, I came exactly to an opposite conclusion :)
> > 
> > My rationale was that users who configure IPsec and TC are advanced
> > users who knows their data flow and if they find a) option valuable,
> > they can do it.
> > 
> > For example, a) allows to limit amount of data sent to IPsec engine.
> > 
> > I believe both a) and b) should be supported.
> 
> What does it take to switch between the modes?
> Even if we want both modes we should have an explicit switch, I reckon.
> Or at least a way to read back what mode we ended up in.

I had several internal discussions about how TC and IPsec should work
together, and will need some time to think about proper implementation.

For now I'll add patch which makes TC and IPsec mutually exclusive.

Thanks

> 

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination
  2023-07-16 10:39                         ` Leon Romanovsky
@ 2023-07-19  9:29                           ` Leon Romanovsky
  0 siblings, 0 replies; 26+ messages in thread
From: Leon Romanovsky @ 2023-07-19  9:29 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Saeed Mahameed, Jianbo Liu, Eric Dumazet, Mark Bloch, netdev,
	Paolo Abeni, David S . Miller, Simon Horman

On Sun, Jul 16, 2023 at 01:39:47PM +0300, Leon Romanovsky wrote:
> On Fri, Jul 14, 2023 at 08:30:32PM -0700, Jakub Kicinski wrote:
> > On Fri, 14 Jul 2023 23:32:58 +0300 Leon Romanovsky wrote:
> > > On Fri, Jul 14, 2023 at 12:16:33PM -0700, Jakub Kicinski wrote:
> > > > On Fri, 14 Jul 2023 21:40:13 +0300 Leon Romanovsky wrote:  
> > > > > It depends on configuration order, if user configures TC first, it will
> > > > > be a), if he/she configures IPsec first, it will be b).
> > > > > 
> > > > > I just think that option b) is really matters.  
> > > > 
> > > > And only b) matches what happens in the kernel with policy based IPsec,
> > > > right?   
> > > 
> > > Can you please clarify what do you mean "policy based IPsec"?
> > 
> > I mean without a separate xfrm netdev on which you can install TC
> > rules of its own.
> 
> I call it software IPsec.
> 
> > 
> > > > IIUC what you're saying -
> > > > the result depending on order of configuration may be a major source
> > > > of surprises / hard to debug problems for the user.  
> > > 
> > > When I reviewed patches, I came exactly to an opposite conclusion :)
> > > 
> > > My rationale was that users who configure IPsec and TC are advanced
> > > users who knows their data flow and if they find a) option valuable,
> > > they can do it.
> > > 
> > > For example, a) allows to limit amount of data sent to IPsec engine.
> > > 
> > > I believe both a) and b) should be supported.
> > 
> > What does it take to switch between the modes?
> > Even if we want both modes we should have an explicit switch, I reckon.
> > Or at least a way to read back what mode we ended up in.
> 
> I had several internal discussions about how TC and IPsec should work
> together, and will need some time to think about proper implementation.
> 
> For now I'll add patch which makes TC and IPsec mutually exclusive.

Even this so called trivial patch is not so trivial in mlx5 current
implementation. Jianbo is working on it.

Thanks

> 
> Thanks
> 
> > 
> 

^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2023-07-19  9:29 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-11  9:28 [PATCH net-next 00/12] mlx5 IPsec packet offload support in eswitch mode Leon Romanovsky
2023-07-11  9:28 ` [PATCH net-next 01/12] net/mlx5e: Add function to get IPsec offload namespace Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 02/12] net/mlx5e: Change the parameter of IPsec RX skb handle function Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 03/12] net/mlx5e: Prepare IPsec packet offload for switchdev mode Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 04/12] net/mlx5e: Refactor IPsec RX tables creation and destruction Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 05/12] net/mlx5e: Support IPsec packet offload for RX in switchdev mode Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 06/12] net/mlx5e: Handle IPsec offload for RX datapath " Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 07/12] net/mlx5e: Refactor IPsec TX tables creation Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 08/12] net/mlx5e: Support IPsec packet offload for TX in switchdev mode Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 09/12] net/mlx5: Compare with old_dest param to modify rule destination Leon Romanovsky
2023-07-13  0:32   ` Jakub Kicinski
2023-07-13  6:33     ` Leon Romanovsky
2023-07-13 17:04       ` Jakub Kicinski
2023-07-13 17:43         ` Leon Romanovsky
2023-07-13 18:05           ` Jakub Kicinski
2023-07-13 18:58             ` Leon Romanovsky
2023-07-14  3:17               ` Jakub Kicinski
2023-07-14 18:40                 ` Leon Romanovsky
2023-07-14 19:16                   ` Jakub Kicinski
2023-07-14 20:32                     ` Leon Romanovsky
2023-07-15  3:30                       ` Jakub Kicinski
2023-07-16 10:39                         ` Leon Romanovsky
2023-07-19  9:29                           ` Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 10/12] net/mlx5e: Make IPsec offload work together with eswitch and TC Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 11/12] net/mlx5e: Modify and restore TC rules for IPSec TX rules Leon Romanovsky
2023-07-11  9:29 ` [PATCH net-next 12/12] net/mlx5e: Add get IPsec offload stats for uplink representor Leon Romanovsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.