netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: "David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: Leon Romanovsky <leonro@nvidia.com>,
	Steffen Klassert <steffen.klassert@secunet.com>,
	Herbert Xu <herbert@gondor.apana.org.au>,
	netdev@vger.kernel.org, Saeed Mahameed <saeedm@nvidia.com>,
	Raed Salem <raeds@nvidia.com>, Emeel Hakim <ehakim@nvidia.com>,
	Simon Horman <simon.horman@corigine.com>
Subject: [PATCH net-next v1 07/10] net/mlx5e: Listen to ARP events to update IPsec L2 headers in tunnel mode
Date: Thu, 13 Apr 2023 15:29:25 +0300	[thread overview]
Message-ID: <b08025ba8fe3e117adebbbb69032e3d97de506bb.1681388425.git.leonro@nvidia.com> (raw)
In-Reply-To: <cover.1681388425.git.leonro@nvidia.com>

From: Leon Romanovsky <leonro@nvidia.com>

In IPsec packet offload mode all header manipulations are performed by
hardware, which is responsible to add/remove L2 header with source and
destinations MACs.

CX-7 devices don't support offload of in-kernel routing functionality,
as such HW needs external help to fill other side MAC as it isn't
available for HW.

As a solution, let's listen to neigh ARP updates and reconfigure IPsec
rules on the fly once new MAC data information arrives.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 132 +++++++++++++++++-
 .../mellanox/mlx5/core/en_accel/ipsec.h       |   5 +
 2 files changed, 130 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 36f3ffd54355..b64281fd4142 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -35,12 +35,14 @@
 #include <crypto/aead.h>
 #include <linux/inetdevice.h>
 #include <linux/netdevice.h>
+#include <net/netevent.h>
 
 #include "en.h"
 #include "ipsec.h"
 #include "ipsec_rxtx.h"
 
 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
+#define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
 
 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
 {
@@ -251,7 +253,7 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
 	struct neighbour *n;
 	u8 addr[ETH_ALEN];
 
-	if (attrs->mode != XFRM_MODE_TUNNEL &&
+	if (attrs->mode != XFRM_MODE_TUNNEL ||
 	    attrs->type != XFRM_DEV_OFFLOAD_PACKET)
 		return;
 
@@ -267,6 +269,8 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
 			if (IS_ERR(n))
 				return;
 			neigh_event_send(n, NULL);
+			attrs->drop = true;
+			break;
 		}
 		neigh_ha_snapshot(addr, n, netdev);
 		ether_addr_copy(attrs->smac, addr);
@@ -279,6 +283,8 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
 			if (IS_ERR(n))
 				return;
 			neigh_event_send(n, NULL);
+			attrs->drop = true;
+			break;
 		}
 		neigh_ha_snapshot(addr, n, netdev);
 		ether_addr_copy(attrs->dmac, addr);
@@ -507,34 +513,81 @@ static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
 	sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
 }
 
+static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
+{
+	struct mlx5e_ipsec_work *work =
+		container_of(_work, struct mlx5e_ipsec_work, work);
+	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
+	struct mlx5e_ipsec_netevent_data *data = work->data;
+	struct mlx5_accel_esp_xfrm_attrs *attrs;
+
+	attrs = &sa_entry->attrs;
+
+	switch (attrs->dir) {
+	case XFRM_DEV_OFFLOAD_IN:
+		ether_addr_copy(attrs->smac, data->addr);
+		break;
+	case XFRM_DEV_OFFLOAD_OUT:
+		ether_addr_copy(attrs->dmac, data->addr);
+		break;
+	default:
+		WARN_ON_ONCE(true);
+	}
+	attrs->drop = false;
+	mlx5e_accel_ipsec_fs_modify(sa_entry);
+}
+
 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
 	struct xfrm_state *x = sa_entry->x;
 	struct mlx5e_ipsec_work *work;
+	void *data = NULL;
 
 	switch (x->xso.type) {
 	case XFRM_DEV_OFFLOAD_CRYPTO:
 		if (!(x->props.flags & XFRM_STATE_ESN))
 			return 0;
 		break;
+	case XFRM_DEV_OFFLOAD_PACKET:
+		if (x->props.mode != XFRM_MODE_TUNNEL)
+			return 0;
+		break;
 	default:
-		return 0;
+		break;
 	}
 
 	work = kzalloc(sizeof(*work), GFP_KERNEL);
 	if (!work)
 		return -ENOMEM;
 
-	work->data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
-	if (!work->data) {
-		kfree(work);
-		return -ENOMEM;
+	switch (x->xso.type) {
+	case XFRM_DEV_OFFLOAD_CRYPTO:
+		data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+		if (!data)
+			goto free_work;
+
+		INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
+		break;
+	case XFRM_DEV_OFFLOAD_PACKET:
+		data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
+			       GFP_KERNEL);
+		if (!data)
+			goto free_work;
+
+		INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
+		break;
+	default:
+		break;
 	}
 
-	INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
+	work->data = data;
 	work->sa_entry = sa_entry;
 	sa_entry->work = work;
 	return 0;
+
+free_work:
+	kfree(work);
+	return -ENOMEM;
 }
 
 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -629,6 +682,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
 	if (sa_entry->dwork)
 		queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
 				   MLX5_IPSEC_RESCHED);
+
+	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
+	    x->props.mode == XFRM_MODE_TUNNEL)
+		xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+			    MLX5E_IPSEC_TUNNEL_SA);
+
 out:
 	x->xso.offload_handle = (unsigned long)sa_entry;
 	return 0;
@@ -651,6 +710,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 {
 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
 	struct mlx5e_ipsec_sa_entry *old;
 
@@ -659,6 +719,12 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 
 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
 	WARN_ON(old != sa_entry);
+
+	if (attrs->mode == XFRM_MODE_TUNNEL &&
+	    attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+		/* Make sure that no ARP requests are running in parallel */
+		flush_workqueue(ipsec->wq);
+
 }
 
 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
@@ -683,6 +749,46 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
 	kfree(sa_entry);
 }
 
+static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
+				      unsigned long event, void *ptr)
+{
+	struct mlx5_accel_esp_xfrm_attrs *attrs;
+	struct mlx5e_ipsec_netevent_data *data;
+	struct mlx5e_ipsec_sa_entry *sa_entry;
+	struct mlx5e_ipsec *ipsec;
+	struct neighbour *n = ptr;
+	struct net_device *netdev;
+	struct xfrm_state *x;
+	unsigned long idx;
+
+	if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
+		return NOTIFY_DONE;
+
+	ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
+	xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
+		attrs = &sa_entry->attrs;
+
+		if (attrs->family == AF_INET) {
+			if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
+			    !neigh_key_eq32(n, &attrs->daddr.a4))
+				continue;
+		} else {
+			if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
+			    !neigh_key_eq128(n, &attrs->daddr.a4))
+				continue;
+		}
+
+		x = sa_entry->x;
+		netdev = x->xso.real_dev;
+		data = sa_entry->work->data;
+
+		neigh_ha_snapshot(data->addr, n, netdev);
+		queue_work(ipsec->wq, &sa_entry->work->work);
+	}
+
+	return NOTIFY_DONE;
+}
+
 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 {
 	struct mlx5e_ipsec *ipsec;
@@ -711,6 +817,13 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 			goto err_aso;
 	}
 
+	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
+		ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
+		ret = register_netevent_notifier(&ipsec->netevent_nb);
+		if (ret)
+			goto clear_aso;
+	}
+
 	ret = mlx5e_accel_ipsec_fs_init(ipsec);
 	if (ret)
 		goto err_fs_init;
@@ -721,6 +834,9 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 	return;
 
 err_fs_init:
+	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+		unregister_netevent_notifier(&ipsec->netevent_nb);
+clear_aso:
 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
 		mlx5e_ipsec_aso_cleanup(ipsec);
 err_aso:
@@ -739,6 +855,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
 		return;
 
 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
+	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
+		unregister_netevent_notifier(&ipsec->netevent_nb);
 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
 		mlx5e_ipsec_aso_cleanup(ipsec);
 	destroy_workqueue(ipsec->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 77384ffa4451..d06c896eadb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -144,6 +144,10 @@ struct mlx5e_ipsec_work {
 	void *data;
 };
 
+struct mlx5e_ipsec_netevent_data {
+	u8 addr[ETH_ALEN];
+};
+
 struct mlx5e_ipsec_dwork {
 	struct delayed_work dwork;
 	struct mlx5e_ipsec_sa_entry *sa_entry;
@@ -169,6 +173,7 @@ struct mlx5e_ipsec {
 	struct mlx5e_ipsec_tx *tx;
 	struct mlx5e_ipsec_aso *aso;
 	struct notifier_block nb;
+	struct notifier_block netevent_nb;
 	struct mlx5_ipsec_fs *roce;
 };
 
-- 
2.39.2


  parent reply	other threads:[~2023-04-13 12:30 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-13 12:29 [PATCH net-next v1 00/10] Support tunnel mode in mlx5 IPsec packet offload Leon Romanovsky
2023-04-13 12:29 ` [PATCH net-next v1 01/10] net/mlx5e: Add IPsec packet offload tunnel bits Leon Romanovsky
2023-04-13 12:29 ` [PATCH net-next v1 02/10] net/mlx5e: Check IPsec packet offload tunnel capabilities Leon Romanovsky
2023-04-13 12:29 ` [PATCH net-next v1 03/10] net/mlx5e: Configure IPsec SA tables to support tunnel mode Leon Romanovsky
2023-04-13 12:29 ` [PATCH net-next v1 04/10] net/mlx5e: Prepare IPsec packet reformat code for " Leon Romanovsky
2023-04-14 22:40   ` Samudrala, Sridhar
2023-04-15  8:49     ` Leon Romanovsky
2023-04-17 13:32   ` Simon Horman
2023-04-13 12:29 ` [PATCH net-next v1 05/10] net/mlx5e: Support IPsec RX packet offload in " Leon Romanovsky
2023-04-17 13:33   ` Simon Horman
2023-04-13 12:29 ` [PATCH net-next v1 06/10] net/mlx5e: Support IPsec TX " Leon Romanovsky
2023-04-17 13:23   ` Simon Horman
2023-04-18  6:48     ` Leon Romanovsky
2023-04-18  7:09       ` Simon Horman
2023-04-18  7:58         ` Leon Romanovsky
2023-04-13 12:29 ` Leon Romanovsky [this message]
2023-04-17 13:34   ` [PATCH net-next v1 07/10] net/mlx5e: Listen to ARP events to update IPsec L2 headers " Simon Horman
2023-04-13 12:29 ` [PATCH net-next v1 08/10] net/mlx5: Allow blocking encap changes in eswitch Leon Romanovsky
2023-04-17 13:34   ` Simon Horman
2023-04-13 12:29 ` [PATCH net-next v1 09/10] net/mlx5e: Create IPsec table with tunnel support only when encap is disabled Leon Romanovsky
2023-04-17 13:35   ` Simon Horman
2023-04-13 12:29 ` [PATCH net-next v1 10/10] net/mlx5e: Accept tunnel mode for IPsec packet offload Leon Romanovsky
2023-04-17 13:36   ` Simon Horman
2023-04-16 14:41 ` [PATCH net-next v1 00/10] Support tunnel mode in mlx5 " Samudrala, Sridhar
2023-04-17  4:05 ` Jakub Kicinski
2023-04-17 13:10   ` Simon Horman
2023-04-17 13:38     ` Simon Horman
2023-04-17 17:58       ` Leon Romanovsky
2023-04-17 19:25       ` Jakub Kicinski
2023-04-18  2:00 ` patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b08025ba8fe3e117adebbbb69032e3d97de506bb.1681388425.git.leonro@nvidia.com \
    --to=leon@kernel.org \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=ehakim@nvidia.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=kuba@kernel.org \
    --cc=leonro@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=raeds@nvidia.com \
    --cc=saeedm@nvidia.com \
    --cc=simon.horman@corigine.com \
    --cc=steffen.klassert@secunet.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).