All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: "David S . Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>
Cc: Leon Romanovsky <leonro@nvidia.com>,
	Ido Schimmel <idosch@nvidia.com>, Jiri Pirko <jiri@nvidia.com>,
	netdev <netdev@vger.kernel.org>
Subject: [RFC PATCH 12/16] devlink: Protect all sb operations with specialized lock
Date: Mon,  8 Nov 2021 19:05:34 +0200	[thread overview]
Message-ID: <8b8ef7aac1827f75714ea48c5d9b3898b49e4103.1636390483.git.leonro@nvidia.com> (raw)
In-Reply-To: <cover.1636390483.git.leonro@nvidia.com>

From: Leon Romanovsky <leonro@nvidia.com>

Separate sb related list protection from main devlink instance lock
to rely on specialized lock.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 net/core/devlink.c | 36 ++++++++++++++++++++----------------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/net/core/devlink.c b/net/core/devlink.c
index 008826bc108d..19f1802f1e5d 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -45,6 +45,7 @@ struct devlink {
 	struct list_head rate_list;
 	struct mutex rate_list_lock; /* protects rate_list */
 	struct list_head sb_list;
+	struct mutex sb_list_lock; /* protects sb_list */
 	struct list_head dpipe_table_list;
 	struct list_head resource_list;
 	struct mutex resource_list_lock; /* protects resource_list */
@@ -1985,7 +1986,7 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->sb_list_lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			if (idx < start) {
 				idx++;
@@ -1997,13 +1998,13 @@ static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
 						 cb->nlh->nlmsg_seq,
 						 NLM_F_MULTI);
 			if (err) {
-				mutex_unlock(&devlink->lock);
+				mutex_unlock(&devlink->sb_list_lock);
 				devlink_put(devlink);
 				goto out;
 			}
 			idx++;
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->sb_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -2138,7 +2139,7 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
 		    !devlink->ops->sb_pool_get)
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->sb_list_lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
 						   devlink_sb,
@@ -2147,12 +2148,12 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
 			if (err == -EOPNOTSUPP) {
 				err = 0;
 			} else if (err) {
-				mutex_unlock(&devlink->lock);
+				mutex_unlock(&devlink->sb_list_lock);
 				devlink_put(devlink);
 				goto out;
 			}
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->sb_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -2359,7 +2360,7 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
 		    !devlink->ops->sb_port_pool_get)
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->sb_list_lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_port_pool_get_dumpit(msg, start, &idx,
 							devlink, devlink_sb,
@@ -2368,12 +2369,12 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
 			if (err == -EOPNOTSUPP) {
 				err = 0;
 			} else if (err) {
-				mutex_unlock(&devlink->lock);
+				mutex_unlock(&devlink->sb_list_lock);
 				devlink_put(devlink);
 				goto out;
 			}
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->sb_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -2611,7 +2612,7 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 		    !devlink->ops->sb_tc_pool_bind_get)
 			goto retry;
 
-		mutex_lock(&devlink->lock);
+		mutex_lock(&devlink->sb_list_lock);
 		list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
 			err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
 							   devlink,
@@ -2621,12 +2622,12 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
 			if (err == -EOPNOTSUPP) {
 				err = 0;
 			} else if (err) {
-				mutex_unlock(&devlink->lock);
+				mutex_unlock(&devlink->sb_list_lock);
 				devlink_put(devlink);
 				goto out;
 			}
 		}
-		mutex_unlock(&devlink->lock);
+		mutex_unlock(&devlink->sb_list_lock);
 retry:
 		devlink_put(devlink);
 	}
@@ -9022,6 +9023,8 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
 	mutex_init(&devlink->rate_list_lock);
 
 	INIT_LIST_HEAD(&devlink->sb_list);
+	mutex_init(&devlink->sb_list_lock);
+
 	INIT_LIST_HEAD_RCU(&devlink->dpipe_table_list);
 
 	INIT_LIST_HEAD(&devlink->resource_list);
@@ -9201,6 +9204,7 @@ void devlink_free(struct devlink *devlink)
 	mutex_destroy(&devlink->traps_lock);
 	mutex_destroy(&devlink->region_list_lock);
 	mutex_destroy(&devlink->rate_list_lock);
+	mutex_destroy(&devlink->sb_list_lock);
 	mutex_destroy(&devlink->lock);
 	WARN_ON(!list_empty(&devlink->trap_policer_list));
 	WARN_ON(!list_empty(&devlink->trap_group_list));
@@ -9715,9 +9719,9 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
 	devlink_sb->ingress_tc_count = ingress_tc_count;
 	devlink_sb->egress_tc_count = egress_tc_count;
 
-	mutex_lock(&devlink->lock);
+	mutex_lock(&devlink->sb_list_lock);
 	list_add_tail(&devlink_sb->list, &devlink->sb_list);
-	mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink->sb_list_lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(devlink_sb_register);
@@ -9729,9 +9733,9 @@ void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
 	devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
 	WARN_ON(!devlink_sb);
 
-	mutex_lock(&devlink->lock);
+	mutex_lock(&devlink->sb_list_lock);
 	list_del(&devlink_sb->list);
-	mutex_unlock(&devlink->lock);
+	mutex_unlock(&devlink->sb_list_lock);
 	kfree(devlink_sb);
 }
 EXPORT_SYMBOL_GPL(devlink_sb_unregister);
-- 
2.33.1


  parent reply	other threads:[~2021-11-08 17:06 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-08 17:05 [RFC PATCH 00/16] Allow parallel devlink execution Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 01/16] devlink: Remove misleading internal_flags from health reporter dump Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 02/16] devlink: Delete useless checks of holding devlink lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 03/16] devlink: Simplify devlink resources unregister call Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 04/16] devlink: Clean registration of devlink port Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 05/16] devlink: Be explicit with devlink port protection Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 06/16] devlink: Reshuffle resource registration logic Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 07/16] devlink: Inline sb related functions Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 08/16] devlink: Protect resource list with specific lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 09/16] devlink: Protect all traps lists with specialized lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 10/16] devlink: Separate region list protection to be done " Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 11/16] devlink: Protect all rate operations " Leon Romanovsky
2021-11-08 17:05 ` Leon Romanovsky [this message]
2021-11-08 17:05 ` [RFC PATCH 13/16] devlink: Convert dpipe to use dpipe_lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 14/16] devlink: Require devlink lock during device reload Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 15/16] devlink: Use xarray locking mechanism instead big devlink lock Leon Romanovsky
2021-11-08 17:05 ` [RFC PATCH 16/16] devlink: Open devlink to parallel operations Leon Romanovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8b8ef7aac1827f75714ea48c5d9b3898b49e4103.1636390483.git.leonro@nvidia.com \
    --to=leon@kernel.org \
    --cc=davem@davemloft.net \
    --cc=idosch@nvidia.com \
    --cc=jiri@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=leonro@nvidia.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.