netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Saeed Mahameed <saeedm@mellanox.com>
To: "David S. Miller" <davem@davemloft.net>
Cc: "netdev@vger.kernel.org" <netdev@vger.kernel.org>,
	Vlad Buslov <vladbu@mellanox.com>,
	Jianbo Liu <jianbol@mellanox.com>, Roi Dayan <roid@mellanox.com>,
	Saeed Mahameed <saeedm@mellanox.com>
Subject: [net-next 03/11] net/mlx5e: Extend neigh hash entry with rcu
Date: Wed, 21 Aug 2019 23:28:38 +0000	[thread overview]
Message-ID: <20190821232806.21847-4-saeedm@mellanox.com> (raw)
In-Reply-To: <20190821232806.21847-1-saeedm@mellanox.com>

From: Vlad Buslov <vladbu@mellanox.com>

To remove dependency on rtnl lock and to allow unlocked iteration over list
of neigh hash entries, extend nhe with rcu. Change operations on neigh list
to their rcu counterparts and free neigh hash entry with rcu timeout.

Introduce mlx5e_get_next_nhe() helper that is used to iterate over rcu
neigh list with reference to nhe taken.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Reviewed-by: Jianbo Liu <jianbol@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
---
 .../net/ethernet/mellanox/mlx5/core/en_rep.c  | 68 ++++++++++++-------
 .../net/ethernet/mellanox/mlx5/core/en_rep.h  |  2 +
 2 files changed, 46 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 23087f9abe74..a294dc6b5a0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -535,28 +535,56 @@ static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
 {
 	if (refcount_dec_and_test(&nhe->refcnt)) {
 		mlx5e_rep_neigh_entry_remove(nhe);
-		kfree(nhe);
+		kfree_rcu(nhe, rcu);
 	}
 }
 
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+		   struct mlx5e_neigh_hash_entry *nhe)
+{
+	struct mlx5e_neigh_hash_entry *next = NULL;
+
+	rcu_read_lock();
+
+	for (next = nhe ?
+		     list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+					   &nhe->neigh_list,
+					   struct mlx5e_neigh_hash_entry,
+					   neigh_list) :
+		     list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+					    struct mlx5e_neigh_hash_entry,
+					    neigh_list);
+	     next;
+	     next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+					  &next->neigh_list,
+					  struct mlx5e_neigh_hash_entry,
+					  neigh_list))
+		if (mlx5e_rep_neigh_entry_hold(next))
+			break;
+
+	rcu_read_unlock();
+
+	if (nhe)
+		mlx5e_rep_neigh_entry_release(nhe);
+
+	return next;
+}
+
 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
 {
 	struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
 						    neigh_update.neigh_stats_work.work);
 	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_neigh_hash_entry *nhe;
+	struct mlx5e_neigh_hash_entry *nhe = NULL;
 
 	rtnl_lock();
 	if (!list_empty(&rpriv->neigh_update.neigh_list))
 		mlx5e_rep_queue_neigh_stats_work(priv);
 
-	list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list) {
-		if (mlx5e_rep_neigh_entry_hold(nhe)) {
-			mlx5e_tc_update_neigh_used_value(nhe);
-			mlx5e_rep_neigh_entry_release(nhe);
-		}
-	}
+	while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
+		mlx5e_tc_update_neigh_used_value(nhe);
 
 	rtnl_unlock();
 }
@@ -883,13 +911,9 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
 		m_neigh.family = n->ops->family;
 		memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
 
-		/* We are in atomic context and can't take RTNL mutex, so use
-		 * spin_lock_bh to lookup the neigh table. bh is used since
-		 * netevent can be called from a softirq context.
-		 */
-		spin_lock_bh(&neigh_update->encap_lock);
+		rcu_read_lock();
 		nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
-		spin_unlock_bh(&neigh_update->encap_lock);
+		rcu_read_unlock();
 		if (!nhe)
 			return NOTIFY_DONE;
 
@@ -910,19 +934,15 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
 #endif
 			return NOTIFY_DONE;
 
-		/* We are in atomic context and can't take RTNL mutex,
-		 * so use spin_lock_bh to walk the neigh list and look for
-		 * the relevant device. bh is used since netevent can be
-		 * called from a softirq context.
-		 */
-		spin_lock_bh(&neigh_update->encap_lock);
-		list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+		rcu_read_lock();
+		list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+					neigh_list) {
 			if (p->dev == nhe->m_neigh.dev) {
 				found = true;
 				break;
 			}
 		}
-		spin_unlock_bh(&neigh_update->encap_lock);
+		rcu_read_unlock();
 		if (!found)
 			return NOTIFY_DONE;
 
@@ -995,7 +1015,7 @@ static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
 	if (err)
 		return err;
 
-	list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+	list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
 
 	return err;
 }
@@ -1006,7 +1026,7 @@ static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
 
 	spin_lock_bh(&rpriv->neigh_update.encap_lock);
 
-	list_del(&nhe->neigh_list);
+	list_del_rcu(&nhe->neigh_list);
 
 	rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
 			       &nhe->rhash_node,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index f5bc9772be98..d057e401b0de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -138,6 +138,8 @@ struct mlx5e_neigh_hash_entry {
 	 * 'used' value and avoid neigh deleting by the kernel.
 	 */
 	unsigned long reported_lastuse;
+
+	struct rcu_head rcu;
 };
 
 enum {
-- 
2.21.0


  parent reply	other threads:[~2019-08-21 23:28 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-21 23:28 [pull request][net-next 00/11] Mellanox, mlx5 tc flow handling for concurrent execution (Part 3/3) Saeed Mahameed
2019-08-21 23:28 ` [net-next 01/11] net/mlx5e: Extract code that queues neigh update work into function Saeed Mahameed
2019-08-21 23:28 ` [net-next 02/11] net/mlx5e: Always take reference to neigh entry Saeed Mahameed
2019-08-21 23:28 ` Saeed Mahameed [this message]
2019-08-21 23:28 ` [net-next 04/11] net/mlx5e: Refactor mlx5e_neigh_update_table->encap_lock Saeed Mahameed
2019-08-21 23:28 ` [net-next 05/11] net/mlx5e: Protect neigh hash encap list with spinlock and rcu Saeed Mahameed
2019-08-21 23:28 ` [net-next 06/11] net/mlx5e: Refactor neigh used value update for concurrent execution Saeed Mahameed
2019-08-21 23:28 ` [net-next 07/11] net/mlx5e: Refactor neigh " Saeed Mahameed
2019-08-21 23:28 ` [net-next 08/11] net/mlx5e: Only access fully initialized flows in neigh update Saeed Mahameed
2019-08-21 23:28 ` [net-next 09/11] net/mlx5e: Add tc flower tracepoints Saeed Mahameed
2019-08-21 23:28 ` [net-next 10/11] net/mlx5e: Add trace point for neigh used value update Saeed Mahameed
2019-08-21 23:28 ` [net-next 11/11] net/mlx5e: Add trace point for neigh update Saeed Mahameed
2019-08-22  3:23 ` [pull request][net-next 00/11] Mellanox, mlx5 tc flow handling for concurrent execution (Part 3/3) David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190821232806.21847-4-saeedm@mellanox.com \
    --to=saeedm@mellanox.com \
    --cc=davem@davemloft.net \
    --cc=jianbol@mellanox.com \
    --cc=netdev@vger.kernel.org \
    --cc=roid@mellanox.com \
    --cc=vladbu@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).