All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing
@ 2018-02-19  9:58 Kirill Tkhai
  2018-02-19  9:58 ` [PATCH 1/3] net: Kill net_mutex Kirill Tkhai
                   ` (4 more replies)
  0 siblings, 5 replies; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-19  9:58 UTC (permalink / raw)
  To: davem, nicolas.dichtel, vyasevic, ktkhai, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

[1/3] kills net_mutex and makes net_sem be taken for write instead.
      This is made to take less locks (1 instead of 2) for the time
      before all pernet_operations are converted.

[2-3/3] simplifies dead net cleanup queueing, and makes llist api
        be used for that.

---

Kirill Tkhai (3):
      net: Kill net_mutex
      net: Make cleanup_list and net::cleanup_list of llist type
      net: Queue net_cleanup_work only if there is first net added


 include/linux/rtnetlink.h   |    1 -
 include/net/net_namespace.h |   12 +++++--
 net/core/net_namespace.c    |   75 ++++++++++++++++++++++---------------------
 3 files changed, 47 insertions(+), 41 deletions(-)

--
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/3] net: Kill net_mutex
  2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
@ 2018-02-19  9:58 ` Kirill Tkhai
  2018-02-20 23:18   ` Stephen Hemminger
  2018-02-19  9:58 ` [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type Kirill Tkhai
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-19  9:58 UTC (permalink / raw)
  To: davem, nicolas.dichtel, vyasevic, ktkhai, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

We take net_mutex, when there are !async pernet_operations
registered, and read locking of net_sem is not enough. But
we may get rid of taking the mutex, and just change the logic
to write lock net_sem in such cases. This obviously reduces
the number of lock operations, we do.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 include/linux/rtnetlink.h   |    1 -
 include/net/net_namespace.h |   11 ++++++---
 net/core/net_namespace.c    |   53 ++++++++++++++++++++++++++-----------------
 3 files changed, 39 insertions(+), 26 deletions(-)

diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index e9ee9ad0a681..3573b4bf2fdf 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -35,7 +35,6 @@ extern int rtnl_trylock(void);
 extern int rtnl_is_locked(void);
 
 extern wait_queue_head_t netdev_unregistering_wq;
-extern struct mutex net_mutex;
 extern struct rw_semaphore net_sem;
 
 #ifdef CONFIG_PROVE_LOCKING
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9158ec1ad06f..115b01b92f4d 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -60,8 +60,11 @@ struct net {
 
 	struct list_head	list;		/* list of network namespaces */
 	struct list_head	cleanup_list;	/* namespaces on death row */
-	struct list_head	exit_list;	/* Use only net_mutex */
-
+	struct list_head	exit_list;	/* To linked to call pernet exit
+						 * methods on dead net (net_sem
+						 * read locked), or to unregister
+						 * pernet ops (net_sem wr locked).
+						 */
 	struct user_namespace   *user_ns;	/* Owning user namespace */
 	struct ucounts		*ucounts;
 	spinlock_t		nsid_lock;
@@ -89,7 +92,7 @@ struct net {
 	/* core fib_rules */
 	struct list_head	rules_ops;
 
-	struct list_head	fib_notifier_ops;  /* protected by net_mutex */
+	struct list_head	fib_notifier_ops;  /* protected by net_sem */
 
 	struct net_device       *loopback_dev;          /* The loopback */
 	struct netns_core	core;
@@ -316,7 +319,7 @@ struct pernet_operations {
 	/*
 	 * Indicates above methods are allowed to be executed in parallel
 	 * with methods of any other pernet_operations, i.e. they are not
-	 * need synchronization via net_mutex.
+	 * need write locked net_sem.
 	 */
 	bool async;
 };
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index bcab9a938d6f..e89a516620dd 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -29,8 +29,6 @@
 
 static LIST_HEAD(pernet_list);
 static struct list_head *first_device = &pernet_list;
-/* Used only if there are !async pernet_operations registered */
-DEFINE_MUTEX(net_mutex);
 
 LIST_HEAD(net_namespace_list);
 EXPORT_SYMBOL_GPL(net_namespace_list);
@@ -407,6 +405,7 @@ struct net *copy_net_ns(unsigned long flags,
 {
 	struct ucounts *ucounts;
 	struct net *net;
+	unsigned write;
 	int rv;
 
 	if (!(flags & CLONE_NEWNET))
@@ -424,20 +423,26 @@ struct net *copy_net_ns(unsigned long flags,
 	refcount_set(&net->passive, 1);
 	net->ucounts = ucounts;
 	get_user_ns(user_ns);
-
-	rv = down_read_killable(&net_sem);
+again:
+	write = READ_ONCE(nr_sync_pernet_ops);
+	if (write)
+		rv = down_write_killable(&net_sem);
+	else
+		rv = down_read_killable(&net_sem);
 	if (rv < 0)
 		goto put_userns;
-	if (nr_sync_pernet_ops) {
-		rv = mutex_lock_killable(&net_mutex);
-		if (rv < 0)
-			goto up_read;
+
+	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
+		up_read(&net_sem);
+		goto again;
 	}
 	rv = setup_net(net, user_ns);
-	if (nr_sync_pernet_ops)
-		mutex_unlock(&net_mutex);
-up_read:
-	up_read(&net_sem);
+
+	if (write)
+		up_write(&net_sem);
+	else
+		up_read(&net_sem);
+
 	if (rv < 0) {
 put_userns:
 		put_user_ns(user_ns);
@@ -485,15 +490,23 @@ static void cleanup_net(struct work_struct *work)
 	struct net *net, *tmp, *last;
 	struct list_head net_kill_list;
 	LIST_HEAD(net_exit_list);
+	unsigned write;
 
 	/* Atomically snapshot the list of namespaces to cleanup */
 	spin_lock_irq(&cleanup_list_lock);
 	list_replace_init(&cleanup_list, &net_kill_list);
 	spin_unlock_irq(&cleanup_list_lock);
+again:
+	write = READ_ONCE(nr_sync_pernet_ops);
+	if (write)
+		down_write(&net_sem);
+	else
+		down_read(&net_sem);
 
-	down_read(&net_sem);
-	if (nr_sync_pernet_ops)
-		mutex_lock(&net_mutex);
+	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
+		up_read(&net_sem);
+		goto again;
+	}
 
 	/* Don't let anyone else find us. */
 	rtnl_lock();
@@ -528,14 +541,14 @@ static void cleanup_net(struct work_struct *work)
 	list_for_each_entry_reverse(ops, &pernet_list, list)
 		ops_exit_list(ops, &net_exit_list);
 
-	if (nr_sync_pernet_ops)
-		mutex_unlock(&net_mutex);
-
 	/* Free the net generic variables */
 	list_for_each_entry_reverse(ops, &pernet_list, list)
 		ops_free_list(ops, &net_exit_list);
 
-	up_read(&net_sem);
+	if (write)
+		up_write(&net_sem);
+	else
+		up_read(&net_sem);
 
 	/* Ensure there are no outstanding rcu callbacks using this
 	 * network namespace.
@@ -563,8 +576,6 @@ static void cleanup_net(struct work_struct *work)
 void net_ns_barrier(void)
 {
 	down_write(&net_sem);
-	mutex_lock(&net_mutex);
-	mutex_unlock(&net_mutex);
 	up_write(&net_sem);
 }
 EXPORT_SYMBOL(net_ns_barrier);

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type
  2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
  2018-02-19  9:58 ` [PATCH 1/3] net: Kill net_mutex Kirill Tkhai
@ 2018-02-19  9:58 ` Kirill Tkhai
  2018-02-20 19:42   ` Cong Wang
  2018-02-19  9:58 ` [PATCH 3/3] net: Queue net_cleanup_work only if there is first net added Kirill Tkhai
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-19  9:58 UTC (permalink / raw)
  To: davem, nicolas.dichtel, vyasevic, ktkhai, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

This simplifies cleanup queueing and makes cleanup lists
to use llist primitives. Since llist has its own cmpxchg()
ordering, cleanup_list_lock is not more need.

Also, struct llist_node is smaller, than struct list_head,
so we save some bytes in struct net with this patch.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 include/net/net_namespace.h |    3 ++-
 net/core/net_namespace.c    |   20 ++++++--------------
 2 files changed, 8 insertions(+), 15 deletions(-)

diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 115b01b92f4d..d4417495773a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,12 +59,13 @@ struct net {
 	atomic64_t		cookie_gen;
 
 	struct list_head	list;		/* list of network namespaces */
-	struct list_head	cleanup_list;	/* namespaces on death row */
 	struct list_head	exit_list;	/* To linked to call pernet exit
 						 * methods on dead net (net_sem
 						 * read locked), or to unregister
 						 * pernet ops (net_sem wr locked).
 						 */
+	struct llist_node	cleanup_list;	/* namespaces on death row */
+
 	struct user_namespace   *user_ns;	/* Owning user namespace */
 	struct ucounts		*ucounts;
 	spinlock_t		nsid_lock;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e89a516620dd..abf8a46e94e2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -481,21 +481,18 @@ static void unhash_nsid(struct net *net, struct net *last)
 	spin_unlock_bh(&net->nsid_lock);
 }
 
-static DEFINE_SPINLOCK(cleanup_list_lock);
-static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
+static LLIST_HEAD(cleanup_list);
 
 static void cleanup_net(struct work_struct *work)
 {
 	const struct pernet_operations *ops;
 	struct net *net, *tmp, *last;
-	struct list_head net_kill_list;
+	struct llist_node *net_kill_list;
 	LIST_HEAD(net_exit_list);
 	unsigned write;
 
 	/* Atomically snapshot the list of namespaces to cleanup */
-	spin_lock_irq(&cleanup_list_lock);
-	list_replace_init(&cleanup_list, &net_kill_list);
-	spin_unlock_irq(&cleanup_list_lock);
+	net_kill_list = llist_del_all(&cleanup_list);
 again:
 	write = READ_ONCE(nr_sync_pernet_ops);
 	if (write)
@@ -510,7 +507,7 @@ static void cleanup_net(struct work_struct *work)
 
 	/* Don't let anyone else find us. */
 	rtnl_lock();
-	list_for_each_entry(net, &net_kill_list, cleanup_list)
+	llist_for_each_entry(net, net_kill_list, cleanup_list)
 		list_del_rcu(&net->list);
 	/* Cache last net. After we unlock rtnl, no one new net
 	 * added to net_namespace_list can assign nsid pointer
@@ -525,7 +522,7 @@ static void cleanup_net(struct work_struct *work)
 	last = list_last_entry(&net_namespace_list, struct net, list);
 	rtnl_unlock();
 
-	list_for_each_entry(net, &net_kill_list, cleanup_list) {
+	llist_for_each_entry(net, net_kill_list, cleanup_list) {
 		unhash_nsid(net, last);
 		list_add_tail(&net->exit_list, &net_exit_list);
 	}
@@ -585,12 +582,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
 void __put_net(struct net *net)
 {
 	/* Cleanup the network namespace in process context */
-	unsigned long flags;
-
-	spin_lock_irqsave(&cleanup_list_lock, flags);
-	list_add(&net->cleanup_list, &cleanup_list);
-	spin_unlock_irqrestore(&cleanup_list_lock, flags);
-
+	llist_add(&net->cleanup_list, &cleanup_list);
 	queue_work(netns_wq, &net_cleanup_work);
 }
 EXPORT_SYMBOL_GPL(__put_net);

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH 3/3] net: Queue net_cleanup_work only if there is first net added
  2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
  2018-02-19  9:58 ` [PATCH 1/3] net: Kill net_mutex Kirill Tkhai
  2018-02-19  9:58 ` [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type Kirill Tkhai
@ 2018-02-19  9:58 ` Kirill Tkhai
  2018-02-19 13:52 ` [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
  2018-02-20 18:24 ` David Miller
  4 siblings, 0 replies; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-19  9:58 UTC (permalink / raw)
  To: davem, nicolas.dichtel, vyasevic, ktkhai, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

When llist_add() returns false, cleanup_net() hasn't made its
llist_del_all(), while the work has already been scheduled
by the first queuer. So, we may skip queue_work() in this case.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 net/core/net_namespace.c |    4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index abf8a46e94e2..27a55236ad64 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -582,8 +582,8 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
 void __put_net(struct net *net)
 {
 	/* Cleanup the network namespace in process context */
-	llist_add(&net->cleanup_list, &cleanup_list);
-	queue_work(netns_wq, &net_cleanup_work);
+	if (llist_add(&net->cleanup_list, &cleanup_list))
+		queue_work(netns_wq, &net_cleanup_work);
 }
 EXPORT_SYMBOL_GPL(__put_net);
 

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing
  2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
                   ` (2 preceding siblings ...)
  2018-02-19  9:58 ` [PATCH 3/3] net: Queue net_cleanup_work only if there is first net added Kirill Tkhai
@ 2018-02-19 13:52 ` Kirill Tkhai
  2018-02-20 18:24 ` David Miller
  4 siblings, 0 replies; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-19 13:52 UTC (permalink / raw)
  To: davem, nicolas.dichtel, vyasevic, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

The topic has to contain [net-next]. Sorry for I forgot to add it.

Thanks.

On 19.02.2018 12:58, Kirill Tkhai wrote:
> [1/3] kills net_mutex and makes net_sem be taken for write instead.
>       This is made to take less locks (1 instead of 2) for the time
>       before all pernet_operations are converted.
> 
> [2-3/3] simplifies dead net cleanup queueing, and makes llist api
>         be used for that.
> 
> ---
> 
> Kirill Tkhai (3):
>       net: Kill net_mutex
>       net: Make cleanup_list and net::cleanup_list of llist type
>       net: Queue net_cleanup_work only if there is first net added
> 
> 
>  include/linux/rtnetlink.h   |    1 -
>  include/net/net_namespace.h |   12 +++++--
>  net/core/net_namespace.c    |   75 ++++++++++++++++++++++---------------------
>  3 files changed, 47 insertions(+), 41 deletions(-)
> 
> --
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing
  2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
                   ` (3 preceding siblings ...)
  2018-02-19 13:52 ` [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
@ 2018-02-20 18:24 ` David Miller
  4 siblings, 0 replies; 10+ messages in thread
From: David Miller @ 2018-02-20 18:24 UTC (permalink / raw)
  To: ktkhai
  Cc: nicolas.dichtel, vyasevic, paulmck, vyasevich, mark.rutland,
	gregkh, leonro, avagin, fw, roman.kapl, netdev

From: Kirill Tkhai <ktkhai@virtuozzo.com>
Date: Mon, 19 Feb 2018 12:58:28 +0300

> [1/3] kills net_mutex and makes net_sem be taken for write instead.
>       This is made to take less locks (1 instead of 2) for the time
>       before all pernet_operations are converted.
> 
> [2-3/3] simplifies dead net cleanup queueing, and makes llist api
>         be used for that.

Series applied, thank you.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type
  2018-02-19  9:58 ` [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type Kirill Tkhai
@ 2018-02-20 19:42   ` Cong Wang
  2018-02-21  8:30     ` Kirill Tkhai
  0 siblings, 1 reply; 10+ messages in thread
From: Cong Wang @ 2018-02-20 19:42 UTC (permalink / raw)
  To: Kirill Tkhai
  Cc: David Miller, Nicolas Dichtel, vyasevic, Paul E. McKenney,
	Vladislav Yasevich, mark.rutland, Greg KH, leonro, avagin,
	Florian Westphal, roman.kapl, Linux Kernel Network Developers

On Mon, Feb 19, 2018 at 1:58 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>  void __put_net(struct net *net)
>  {
>         /* Cleanup the network namespace in process context */
> -       unsigned long flags;
> -
> -       spin_lock_irqsave(&cleanup_list_lock, flags);
> -       list_add(&net->cleanup_list, &cleanup_list);
> -       spin_unlock_irqrestore(&cleanup_list_lock, flags);
> -
> +       llist_add(&net->cleanup_list, &cleanup_list);
>         queue_work(netns_wq, &net_cleanup_work);
>  }

Is llist safe against IRQ too?

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/3] net: Kill net_mutex
  2018-02-19  9:58 ` [PATCH 1/3] net: Kill net_mutex Kirill Tkhai
@ 2018-02-20 23:18   ` Stephen Hemminger
  2018-02-21 10:16     ` Kirill Tkhai
  0 siblings, 1 reply; 10+ messages in thread
From: Stephen Hemminger @ 2018-02-20 23:18 UTC (permalink / raw)
  To: Kirill Tkhai
  Cc: davem, nicolas.dichtel, vyasevic, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

On Mon, 19 Feb 2018 12:58:38 +0300
Kirill Tkhai <ktkhai@virtuozzo.com> wrote:

> +	struct list_head	exit_list;	/* To linked to call pernet exit
> +						 * methods on dead net (net_sem
> +						 * read locked), or to unregister
> +						 * pernet ops (net_sem wr locked).
> +						 */

Sorry, that comment is completely unparseable.
Either you know what it does, and therefore comment is unnecessary
Or change comment to a valid explanation of the semantics of the list.

Maybe comments about locking model are best left to where
it is used in the code.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type
  2018-02-20 19:42   ` Cong Wang
@ 2018-02-21  8:30     ` Kirill Tkhai
  0 siblings, 0 replies; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-21  8:30 UTC (permalink / raw)
  To: Cong Wang
  Cc: David Miller, Nicolas Dichtel, vyasevic, Paul E. McKenney,
	Vladislav Yasevich, mark.rutland, Greg KH, leonro, avagin,
	Florian Westphal, roman.kapl, Linux Kernel Network Developers

On 20.02.2018 22:42, Cong Wang wrote:
> On Mon, Feb 19, 2018 at 1:58 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
>>  void __put_net(struct net *net)
>>  {
>>         /* Cleanup the network namespace in process context */
>> -       unsigned long flags;
>> -
>> -       spin_lock_irqsave(&cleanup_list_lock, flags);
>> -       list_add(&net->cleanup_list, &cleanup_list);
>> -       spin_unlock_irqrestore(&cleanup_list_lock, flags);
>> -
>> +       llist_add(&net->cleanup_list, &cleanup_list);
>>         queue_work(netns_wq, &net_cleanup_work);
>>  }
> 
> Is llist safe against IRQ too?

Yes, it's safe and it's aimed for the cases like this. There is no "locked"
state like spinlock has, there is single cmpxchg().

You may find examples it's used in ./kernel directory.

Kirill

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 1/3] net: Kill net_mutex
  2018-02-20 23:18   ` Stephen Hemminger
@ 2018-02-21 10:16     ` Kirill Tkhai
  0 siblings, 0 replies; 10+ messages in thread
From: Kirill Tkhai @ 2018-02-21 10:16 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: davem, nicolas.dichtel, vyasevic, paulmck, vyasevich,
	mark.rutland, gregkh, leonro, avagin, fw, roman.kapl, netdev

Hi, Stephen,

On 21.02.2018 02:18, Stephen Hemminger wrote:
> On Mon, 19 Feb 2018 12:58:38 +0300
> Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> 
>> +	struct list_head	exit_list;	/* To linked to call pernet exit
>> +						 * methods on dead net (net_sem
>> +						 * read locked), or to unregister
>> +						 * pernet ops (net_sem wr locked).
>> +						 */
> 
> Sorry, that comment is completely unparseable.
> Either you know what it does, and therefore comment is unnecessary
> Or change comment to a valid explanation of the semantics of the list.
> 
> Maybe comments about locking model are best left to where
> it is used in the code.

Let's improve it :) It's used to call pernet exit methods, and net ns logic
guarantees, we never call exit methods for the same net in parallel. How
about writing this directly without mention of net_sem? Something like this:

/* To link net to call pernet exit methods */

Or maybe you have better variant?

Thanks,
Kirill

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2018-02-21 10:16 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-19  9:58 [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
2018-02-19  9:58 ` [PATCH 1/3] net: Kill net_mutex Kirill Tkhai
2018-02-20 23:18   ` Stephen Hemminger
2018-02-21 10:16     ` Kirill Tkhai
2018-02-19  9:58 ` [PATCH 2/3] net: Make cleanup_list and net::cleanup_list of llist type Kirill Tkhai
2018-02-20 19:42   ` Cong Wang
2018-02-21  8:30     ` Kirill Tkhai
2018-02-19  9:58 ` [PATCH 3/3] net: Queue net_cleanup_work only if there is first net added Kirill Tkhai
2018-02-19 13:52 ` [PATCH 0/3] net: Get rid of net_mutex and simplify cleanup_list queueing Kirill Tkhai
2018-02-20 18:24 ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.