All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: "David S . Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>
Cc: Leon Romanovsky <leonro@nvidia.com>,
	Ido Schimmel <idosch@nvidia.com>, Jiri Pirko <jiri@nvidia.com>,
	netdev <netdev@vger.kernel.org>
Subject: [RFC PATCH 05/16] devlink: Be explicit with devlink port protection
Date: Mon,  8 Nov 2021 19:05:27 +0200	[thread overview]
Message-ID: <e7989c996889c1ceb70a0a288f7b0a0e8ffda24b.1636390483.git.leonro@nvidia.com> (raw)
In-Reply-To: <cover.1636390483.git.leonro@nvidia.com>

From: Leon Romanovsky <leonro@nvidia.com>

Devlink port flows use devlink instance lock to protect from parallel
addition and deletion from port_list. So instead of using global lock,
let's introduce specific protection lock with much more clear scope
that will protect port_list.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 net/core/devlink.c | 61 ++++++++++++++++++++++++++++++----------------
 1 file changed, 40 insertions(+), 21 deletions(-)

diff --git a/net/core/devlink.c b/net/core/devlink.c
index b41ab8751635..d88e882616bc 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -41,6 +41,7 @@ struct devlink_dev_stats {
 struct devlink {
 	u32 index;
 	struct list_head port_list;
+	struct mutex port_list_lock; /* protects port_list */
 	struct list_head rate_list;
 	struct list_head sb_list;
 	struct list_head dpipe_table_list;
@@ -228,13 +229,17 @@ static struct devlink *devlink_get_from_attrs(struct net *net,
 static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
 						      unsigned int port_index)
 {
-	struct devlink_port *devlink_port;
+	struct devlink_port *devlink_port, *res = NULL;
 
+	mutex_lock(&devlink->port_list_lock);
 	list_for_each_entry(devlink_port, &devlink->port_list, list) {
-		if (devlink_port->index == port_index)
-			return devlink_port;
+		if (devlink_port->index == port_index) {
+			res = devlink_port;
+			break;
+		}
 	}
-	return NULL;
+	mutex_unlock(&devlink->port_list_lock);
+	return res;
 }
 
 static struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
@@ -1341,7 +1346,7 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->port_list_lock);
 		list_for_each_entry(devlink_port, &devlink->port_list, list) {
 			if (idx < start) {
 				idx++;
@@ -1353,13 +1358,13 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
 						   cb->nlh->nlmsg_seq,
 						   NLM_F_MULTI, cb->extack);
 			if (err) {
-				mutex_unlock(&devlink->lock);
+				mutex_unlock(&devlink->port_list_lock);
 				devlink_put(devlink);
 				goto out;
 			}
 			idx++;
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->port_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -2567,8 +2572,9 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 {
 	struct devlink_port *devlink_port;
 	u16 tc_index;
-	int err;
+	int err = 0;
 
+	mutex_lock(&devlink->port_list_lock);
 	list_for_each_entry(devlink_port, &devlink->port_list, list) {
 		for (tc_index = 0;
 		     tc_index < devlink_sb->ingress_tc_count; tc_index++) {
@@ -2585,7 +2591,7 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 							      portid, seq,
 							      NLM_F_MULTI);
 			if (err)
-				return err;
+				goto out;
 			(*p_idx)++;
 		}
 		for (tc_index = 0;
@@ -2603,11 +2609,13 @@ static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 							      portid, seq,
 							      NLM_F_MULTI);
 			if (err)
-				return err;
+				goto out;
 			(*p_idx)++;
 		}
 	}
-	return 0;
+out:
+	mutex_unlock(&devlink->port_list_lock);
+	return err;
 }
 
 static int
@@ -4938,7 +4946,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->port_list_lock);
 		list_for_each_entry(devlink_port, &devlink->port_list, list) {
 			list_for_each_entry(param_item,
 					    &devlink_port->param_list, list) {
@@ -4956,14 +4964,14 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
 				if (err == -EOPNOTSUPP) {
 					err = 0;
 				} else if (err) {
-					mutex_unlock(&devlink->lock);
+					mutex_unlock(&devlink->port_list_lock);
 					devlink_put(devlink);
 					goto out;
 				}
 				idx++;
 			}
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->port_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -5481,6 +5489,7 @@ static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
 		(*idx)++;
 	}
 
+	mutex_lock(&devlink->port_list_lock);
 	list_for_each_entry(port, &devlink->port_list, list) {
 		err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
 							    start);
@@ -5489,6 +5498,7 @@ static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
 	}
 
 out:
+	mutex_unlock(&devlink->port_list_lock);
 	mutex_unlock(&devlink->lock);
 	return err;
 }
@@ -7254,7 +7264,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			goto retry_port;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->port_list_lock);
 		list_for_each_entry(port, &devlink->port_list, list) {
 			mutex_lock(&port->reporters_lock);
 			list_for_each_entry(reporter, &port->reporter_list, list) {
@@ -7269,7 +7279,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
 					cb->nlh->nlmsg_seq, NLM_F_MULTI);
 				if (err) {
 					mutex_unlock(&port->reporters_lock);
-					mutex_unlock(&devlink->lock);
+					mutex_unlock(&devlink->port_list_lock);
 					devlink_put(devlink);
 					goto out;
 				}
@@ -7277,7 +7287,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
 			}
 			mutex_unlock(&port->reporters_lock);
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->port_list_lock);
 retry_port:
 		devlink_put(devlink);
 	}
@@ -8978,7 +8988,10 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
 	devlink->ops = ops;
 	xa_init_flags(&devlink->snapshot_ids, XA_FLAGS_ALLOC);
 	write_pnet(&devlink->_net, net);
+
 	INIT_LIST_HEAD(&devlink->port_list);
+	mutex_init(&devlink->port_list_lock);
+
 	INIT_LIST_HEAD(&devlink->rate_list);
 	INIT_LIST_HEAD(&devlink->sb_list);
 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
@@ -9021,8 +9034,10 @@ static void devlink_notify_register(struct devlink *devlink)
 	struct devlink_region *region;
 
 	devlink_notify(devlink, DEVLINK_CMD_NEW);
+	mutex_lock(&devlink->port_list_lock);
 	list_for_each_entry(devlink_port, &devlink->port_list, list)
 		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+	mutex_unlock(&devlink->port_list_lock);
 
 	list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
 		devlink_trap_policer_notify(devlink, policer_item,
@@ -9077,8 +9092,11 @@ static void devlink_notify_unregister(struct devlink *devlink)
 		devlink_trap_policer_notify(devlink, policer_item,
 					    DEVLINK_CMD_TRAP_POLICER_DEL);
 
+	mutex_lock(&devlink->port_list_lock);
 	list_for_each_entry_reverse(devlink_port, &devlink->port_list, list)
 		devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+	mutex_unlock(&devlink->port_list_lock);
+
 	devlink_notify(devlink, DEVLINK_CMD_DEL);
 }
 
@@ -9129,6 +9147,7 @@ void devlink_free(struct devlink *devlink)
 	ASSERT_DEVLINK_NOT_REGISTERED(devlink);
 
 	mutex_destroy(&devlink->reporters_lock);
+	mutex_destroy(&devlink->port_list_lock);
 	mutex_destroy(&devlink->lock);
 	WARN_ON(!list_empty(&devlink->trap_policer_list));
 	WARN_ON(!list_empty(&devlink->trap_group_list));
@@ -9211,9 +9230,9 @@ void devlink_port_register(struct devlink *devlink,
 	INIT_LIST_HEAD(&devlink_port->region_list);
 	INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
 
-	mutex_lock(&devlink->lock);
+	mutex_lock(&devlink->port_list_lock);
 	list_add_tail(&devlink_port->list, &devlink->port_list);
-	mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink->port_list_lock);
 
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
 	devlink_port_type_warn_schedule(devlink_port);
@@ -9232,9 +9251,9 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
 	devlink_port_type_warn_cancel(devlink_port);
 	devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
 
-	mutex_lock(&devlink->lock);
+	mutex_lock(&devlink->port_list_lock);
 	list_del(&devlink_port->list);
-	mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink->port_list_lock);
 
 	WARN_ON(!list_empty(&devlink_port->reporter_list));
 	WARN_ON(!list_empty(&devlink_port->region_list));
-- 
2.33.1


  parent reply	other threads:[~2021-11-08 17:06 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-08 17:05 [RFC PATCH 00/16] Allow parallel devlink execution Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 01/16] devlink: Remove misleading internal_flags from health reporter dump Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 02/16] devlink: Delete useless checks of holding devlink lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 03/16] devlink: Simplify devlink resources unregister call Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 04/16] devlink: Clean registration of devlink port Leon Romanovsky
2021-11-08 17:05 ` Leon Romanovsky [this message]
2021-11-08 17:05 ` [RFC PATCH 06/16] devlink: Reshuffle resource registration logic Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 07/16] devlink: Inline sb related functions Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 08/16] devlink: Protect resource list with specific lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 09/16] devlink: Protect all traps lists with specialized lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 10/16] devlink: Separate region list protection to be done " Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 11/16] devlink: Protect all rate operations " Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 12/16] devlink: Protect all sb " Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 13/16] devlink: Convert dpipe to use dpipe_lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 14/16] devlink: Require devlink lock during device reload Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 15/16] devlink: Use xarray locking mechanism instead big devlink lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 16/16] devlink: Open devlink to parallel operations Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e7989c996889c1ceb70a0a288f7b0a0e8ffda24b.1636390483.git.leonro@nvidia.com \
    --to=leon@kernel.org \
    --cc=davem@davemloft.net \
    --cc=idosch@nvidia.com \
    --cc=jiri@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=leonro@nvidia.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.