All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock
@ 2018-08-09  3:07 Andrei Vagin
  2018-08-09  9:15 ` Jason Wang
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Andrei Vagin @ 2018-08-09  3:07 UTC (permalink / raw)
  To: David S. Miller
  Cc: netdev, Andrei Vagin, Nambiar, Amritha, Michael S. Tsirkin, Jason Wang

From: Andrei Vagin <avagin@gmail.com>

The definition of static_key_slow_inc() has cpus_read_lock in place. In the
virtio_net driver, XPS queues are initialized after setting the queue:cpu
affinity in virtnet_set_affinity() which is already protected within
cpus_read_lock. Lockdep prints a warning when we are trying to acquire
cpus_read_lock when it is already held.

This patch adds an ability to call __netif_set_xps_queue under
cpus_read_lock().

============================================
WARNING: possible recursive locking detected
4.18.0-rc3-next-20180703+ #1 Not tainted
--------------------------------------------
swapper/0/1 is trying to acquire lock:
00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: static_key_slow_inc+0xe/0x20

but task is already holding lock:
00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(cpu_hotplug_lock.rw_sem);
  lock(cpu_hotplug_lock.rw_sem);

 *** DEADLOCK ***

 May be due to missing lock nesting notation

3 locks held by swapper/0/1:
 #0: 00000000244bc7da (&dev->mutex){....}, at: __driver_attach+0x5a/0x110
 #1: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0
 #2: 000000005cd8463f (xps_map_mutex){+.+.}, at: __netif_set_xps_queue+0x8d/0xc60

v2: move cpus_read_lock() out of __netif_set_xps_queue()

Cc: "Nambiar, Amritha" <amritha.nambiar@intel.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Fixes: 8af2c06ff4b1 ("net-sysfs: Add interface for Rx queue(s) map per Tx queue")

Signed-off-by: Andrei Vagin <avagin@gmail.com>
---
 drivers/net/virtio_net.c |  4 +++-
 net/core/dev.c           | 20 +++++++++++++++-----
 net/core/net-sysfs.c     |  4 ++++
 3 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 62311dde6e71..39a7f4452587 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1903,9 +1903,11 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
 
 	i = 0;
 	for_each_online_cpu(cpu) {
+		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
+
 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
-		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
+		__netif_set_xps_queue(vi->dev, mask, i, false);
 		i++;
 	}
 
diff --git a/net/core/dev.c b/net/core/dev.c
index f68122f0ab02..325fc5088370 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2176,6 +2176,7 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
 	if (!static_key_false(&xps_needed))
 		return;
 
+	cpus_read_lock();
 	mutex_lock(&xps_map_mutex);
 
 	if (static_key_false(&xps_rxqs_needed)) {
@@ -2199,10 +2200,11 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
 
 out_no_maps:
 	if (static_key_enabled(&xps_rxqs_needed))
-		static_key_slow_dec(&xps_rxqs_needed);
+		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
 
-	static_key_slow_dec(&xps_needed);
+	static_key_slow_dec_cpuslocked(&xps_needed);
 	mutex_unlock(&xps_map_mutex);
+	cpus_read_unlock();
 }
 
 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
@@ -2250,6 +2252,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
 	return new_map;
 }
 
+/* Must be called under cpus_read_lock */
 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
 			  u16 index, bool is_rxqs_map)
 {
@@ -2317,9 +2320,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
 	if (!new_dev_maps)
 		goto out_no_new_maps;
 
-	static_key_slow_inc(&xps_needed);
+	static_key_slow_inc_cpuslocked(&xps_needed);
 	if (is_rxqs_map)
-		static_key_slow_inc(&xps_rxqs_needed);
+		static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
 
 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
 	     j < nr_ids;) {
@@ -2448,11 +2451,18 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
 	kfree(new_dev_maps);
 	return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
 
 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
 			u16 index)
 {
-	return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
+	int ret;
+
+	cpus_read_lock();
+	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
+	cpus_read_unlock();
+
+	return ret;
 }
 EXPORT_SYMBOL(netif_set_xps_queue);
 
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 0a95bcf64cdc..bd67c4d0fcfd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -26,6 +26,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
+#include <linux/cpu.h>
 
 #include "net-sysfs.h"
 
@@ -1400,7 +1401,10 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
 		return err;
 	}
 
+	cpus_read_lock();
 	err = __netif_set_xps_queue(dev, mask, index, true);
+	cpus_read_unlock();
+
 	kfree(mask);
 	return err ? : len;
 }
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock
  2018-08-09  3:07 [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock Andrei Vagin
@ 2018-08-09  9:15 ` Jason Wang
  2018-08-09 21:26 ` David Miller
  2018-08-09 22:13 ` Michael S. Tsirkin
  2 siblings, 0 replies; 4+ messages in thread
From: Jason Wang @ 2018-08-09  9:15 UTC (permalink / raw)
  To: Andrei Vagin, David S. Miller
  Cc: netdev, Andrei Vagin, Nambiar, Amritha, Michael S. Tsirkin



On 2018年08月09日 11:07, Andrei Vagin wrote:
> From: Andrei Vagin <avagin@gmail.com>
>
> The definition of static_key_slow_inc() has cpus_read_lock in place. In the
> virtio_net driver, XPS queues are initialized after setting the queue:cpu
> affinity in virtnet_set_affinity() which is already protected within
> cpus_read_lock. Lockdep prints a warning when we are trying to acquire
> cpus_read_lock when it is already held.
>
> This patch adds an ability to call __netif_set_xps_queue under
> cpus_read_lock().
>
> ============================================
> WARNING: possible recursive locking detected
> 4.18.0-rc3-next-20180703+ #1 Not tainted
> --------------------------------------------
> swapper/0/1 is trying to acquire lock:
> 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: static_key_slow_inc+0xe/0x20
>
> but task is already holding lock:
> 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0
>
> other info that might help us debug this:
>   Possible unsafe locking scenario:
>
>         CPU0
>         ----
>    lock(cpu_hotplug_lock.rw_sem);
>    lock(cpu_hotplug_lock.rw_sem);
>
>   *** DEADLOCK ***
>
>   May be due to missing lock nesting notation
>
> 3 locks held by swapper/0/1:
>   #0: 00000000244bc7da (&dev->mutex){....}, at: __driver_attach+0x5a/0x110
>   #1: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0
>   #2: 000000005cd8463f (xps_map_mutex){+.+.}, at: __netif_set_xps_queue+0x8d/0xc60
>
> v2: move cpus_read_lock() out of __netif_set_xps_queue()
>
> Cc: "Nambiar, Amritha" <amritha.nambiar@intel.com>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Fixes: 8af2c06ff4b1 ("net-sysfs: Add interface for Rx queue(s) map per Tx queue")
>
> Signed-off-by: Andrei Vagin <avagin@gmail.com>
> ---
>   drivers/net/virtio_net.c |  4 +++-
>   net/core/dev.c           | 20 +++++++++++++++-----
>   net/core/net-sysfs.c     |  4 ++++
>   3 files changed, 22 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 62311dde6e71..39a7f4452587 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1903,9 +1903,11 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
>   
>   	i = 0;
>   	for_each_online_cpu(cpu) {
> +		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
> +
>   		virtqueue_set_affinity(vi->rq[i].vq, cpu);
>   		virtqueue_set_affinity(vi->sq[i].vq, cpu);
> -		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
> +		__netif_set_xps_queue(vi->dev, mask, i, false);
>   		i++;
>   	}
>   
> diff --git a/net/core/dev.c b/net/core/dev.c
> index f68122f0ab02..325fc5088370 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -2176,6 +2176,7 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
>   	if (!static_key_false(&xps_needed))
>   		return;
>   
> +	cpus_read_lock();
>   	mutex_lock(&xps_map_mutex);
>   
>   	if (static_key_false(&xps_rxqs_needed)) {
> @@ -2199,10 +2200,11 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
>   
>   out_no_maps:
>   	if (static_key_enabled(&xps_rxqs_needed))
> -		static_key_slow_dec(&xps_rxqs_needed);
> +		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
>   
> -	static_key_slow_dec(&xps_needed);
> +	static_key_slow_dec_cpuslocked(&xps_needed);
>   	mutex_unlock(&xps_map_mutex);
> +	cpus_read_unlock();
>   }
>   
>   static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
> @@ -2250,6 +2252,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
>   	return new_map;
>   }
>   
> +/* Must be called under cpus_read_lock */
>   int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>   			  u16 index, bool is_rxqs_map)
>   {
> @@ -2317,9 +2320,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>   	if (!new_dev_maps)
>   		goto out_no_new_maps;
>   
> -	static_key_slow_inc(&xps_needed);
> +	static_key_slow_inc_cpuslocked(&xps_needed);
>   	if (is_rxqs_map)
> -		static_key_slow_inc(&xps_rxqs_needed);
> +		static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
>   
>   	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
>   	     j < nr_ids;) {
> @@ -2448,11 +2451,18 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>   	kfree(new_dev_maps);
>   	return -ENOMEM;
>   }
> +EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
>   
>   int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
>   			u16 index)
>   {
> -	return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
> +	int ret;
> +
> +	cpus_read_lock();
> +	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
> +	cpus_read_unlock();
> +
> +	return ret;
>   }
>   EXPORT_SYMBOL(netif_set_xps_queue);
>   
> diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
> index 0a95bcf64cdc..bd67c4d0fcfd 100644
> --- a/net/core/net-sysfs.c
> +++ b/net/core/net-sysfs.c
> @@ -26,6 +26,7 @@
>   #include <linux/pm_runtime.h>
>   #include <linux/of.h>
>   #include <linux/of_net.h>
> +#include <linux/cpu.h>
>   
>   #include "net-sysfs.h"
>   
> @@ -1400,7 +1401,10 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
>   		return err;
>   	}
>   
> +	cpus_read_lock();
>   	err = __netif_set_xps_queue(dev, mask, index, true);
> +	cpus_read_unlock();
> +
>   	kfree(mask);
>   	return err ? : len;
>   }

Acked-by: Jason Wang <jasowang@redhat.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock
  2018-08-09  3:07 [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock Andrei Vagin
  2018-08-09  9:15 ` Jason Wang
@ 2018-08-09 21:26 ` David Miller
  2018-08-09 22:13 ` Michael S. Tsirkin
  2 siblings, 0 replies; 4+ messages in thread
From: David Miller @ 2018-08-09 21:26 UTC (permalink / raw)
  To: avagin; +Cc: netdev, avagin, amritha.nambiar, mst, jasowang

From: Andrei Vagin <avagin@openvz.org>
Date: Wed,  8 Aug 2018 20:07:35 -0700

> From: Andrei Vagin <avagin@gmail.com>
> 
> The definition of static_key_slow_inc() has cpus_read_lock in place. In the
> virtio_net driver, XPS queues are initialized after setting the queue:cpu
> affinity in virtnet_set_affinity() which is already protected within
> cpus_read_lock. Lockdep prints a warning when we are trying to acquire
> cpus_read_lock when it is already held.
> 
> This patch adds an ability to call __netif_set_xps_queue under
> cpus_read_lock().
 ...

Applied, thank you.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock
  2018-08-09  3:07 [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock Andrei Vagin
  2018-08-09  9:15 ` Jason Wang
  2018-08-09 21:26 ` David Miller
@ 2018-08-09 22:13 ` Michael S. Tsirkin
  2 siblings, 0 replies; 4+ messages in thread
From: Michael S. Tsirkin @ 2018-08-09 22:13 UTC (permalink / raw)
  To: Andrei Vagin
  Cc: David S. Miller, netdev, Andrei Vagin, Nambiar, Amritha, Jason Wang

On Wed, Aug 08, 2018 at 08:07:35PM -0700, Andrei Vagin wrote:
> From: Andrei Vagin <avagin@gmail.com>
> 
> The definition of static_key_slow_inc() has cpus_read_lock in place. In the
> virtio_net driver, XPS queues are initialized after setting the queue:cpu
> affinity in virtnet_set_affinity() which is already protected within
> cpus_read_lock. Lockdep prints a warning when we are trying to acquire
> cpus_read_lock when it is already held.
> 
> This patch adds an ability to call __netif_set_xps_queue under
> cpus_read_lock().
> 
> ============================================
> WARNING: possible recursive locking detected
> 4.18.0-rc3-next-20180703+ #1 Not tainted
> --------------------------------------------
> swapper/0/1 is trying to acquire lock:
> 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: static_key_slow_inc+0xe/0x20
> 
> but task is already holding lock:
> 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0
> 
> other info that might help us debug this:
>  Possible unsafe locking scenario:
> 
>        CPU0
>        ----
>   lock(cpu_hotplug_lock.rw_sem);
>   lock(cpu_hotplug_lock.rw_sem);
> 
>  *** DEADLOCK ***
> 
>  May be due to missing lock nesting notation
> 
> 3 locks held by swapper/0/1:
>  #0: 00000000244bc7da (&dev->mutex){....}, at: __driver_attach+0x5a/0x110
>  #1: 00000000cf973d46 (cpu_hotplug_lock.rw_sem){++++}, at: init_vqs+0x513/0x5a0
>  #2: 000000005cd8463f (xps_map_mutex){+.+.}, at: __netif_set_xps_queue+0x8d/0xc60
> 
> v2: move cpus_read_lock() out of __netif_set_xps_queue()

FYI you change log should go after -- below, not before it.

> Cc: "Nambiar, Amritha" <amritha.nambiar@intel.com>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Fixes: 8af2c06ff4b1 ("net-sysfs: Add interface for Rx queue(s) map per Tx queue")
> 
> Signed-off-by: Andrei Vagin <avagin@gmail.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/net/virtio_net.c |  4 +++-
>  net/core/dev.c           | 20 +++++++++++++++-----
>  net/core/net-sysfs.c     |  4 ++++
>  3 files changed, 22 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 62311dde6e71..39a7f4452587 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1903,9 +1903,11 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
>  
>  	i = 0;
>  	for_each_online_cpu(cpu) {
> +		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
> +
>  		virtqueue_set_affinity(vi->rq[i].vq, cpu);
>  		virtqueue_set_affinity(vi->sq[i].vq, cpu);
> -		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
> +		__netif_set_xps_queue(vi->dev, mask, i, false);
>  		i++;
>  	}
>  
> diff --git a/net/core/dev.c b/net/core/dev.c
> index f68122f0ab02..325fc5088370 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -2176,6 +2176,7 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
>  	if (!static_key_false(&xps_needed))
>  		return;
>  
> +	cpus_read_lock();
>  	mutex_lock(&xps_map_mutex);
>  
>  	if (static_key_false(&xps_rxqs_needed)) {
> @@ -2199,10 +2200,11 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
>  
>  out_no_maps:
>  	if (static_key_enabled(&xps_rxqs_needed))
> -		static_key_slow_dec(&xps_rxqs_needed);
> +		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
>  
> -	static_key_slow_dec(&xps_needed);
> +	static_key_slow_dec_cpuslocked(&xps_needed);
>  	mutex_unlock(&xps_map_mutex);
> +	cpus_read_unlock();
>  }
>  
>  static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
> @@ -2250,6 +2252,7 @@ static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
>  	return new_map;
>  }
>  
> +/* Must be called under cpus_read_lock */
>  int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>  			  u16 index, bool is_rxqs_map)
>  {
> @@ -2317,9 +2320,9 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>  	if (!new_dev_maps)
>  		goto out_no_new_maps;
>  
> -	static_key_slow_inc(&xps_needed);
> +	static_key_slow_inc_cpuslocked(&xps_needed);
>  	if (is_rxqs_map)
> -		static_key_slow_inc(&xps_rxqs_needed);
> +		static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
>  
>  	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
>  	     j < nr_ids;) {
> @@ -2448,11 +2451,18 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
>  	kfree(new_dev_maps);
>  	return -ENOMEM;
>  }
> +EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
>  
>  int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
>  			u16 index)
>  {
> -	return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
> +	int ret;
> +
> +	cpus_read_lock();
> +	ret =  __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
> +	cpus_read_unlock();
> +
> +	return ret;
>  }
>  EXPORT_SYMBOL(netif_set_xps_queue);
>  
> diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
> index 0a95bcf64cdc..bd67c4d0fcfd 100644
> --- a/net/core/net-sysfs.c
> +++ b/net/core/net-sysfs.c
> @@ -26,6 +26,7 @@
>  #include <linux/pm_runtime.h>
>  #include <linux/of.h>
>  #include <linux/of_net.h>
> +#include <linux/cpu.h>
>  
>  #include "net-sysfs.h"
>  
> @@ -1400,7 +1401,10 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
>  		return err;
>  	}
>  
> +	cpus_read_lock();
>  	err = __netif_set_xps_queue(dev, mask, index, true);
> +	cpus_read_unlock();
> +
>  	kfree(mask);
>  	return err ? : len;
>  }
> -- 
> 2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-08-10  0:40 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-09  3:07 [PATCH net-next v2] net: allow to call netif_reset_xps_queues() under cpus_read_lock Andrei Vagin
2018-08-09  9:15 ` Jason Wang
2018-08-09 21:26 ` David Miller
2018-08-09 22:13 ` Michael S. Tsirkin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.