All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/2] virtio_net: Expand affinity to arbitrary numbers of cpu and vq
@ 2018-08-10  0:28 Caleb Raitto
  2018-08-10  0:28 ` [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask Caleb Raitto
  2018-08-10  0:28 ` [PATCH net-next 2/2] virtio_net: Stripe queue affinities across cores Caleb Raitto
  0 siblings, 2 replies; 4+ messages in thread
From: Caleb Raitto @ 2018-08-10  0:28 UTC (permalink / raw)
  To: herbert, mst, davem
  Cc: arei.gonglei, jasowang, netdev, linux-crypto, Caleb Raitto

From: Caleb Raitto <caraitto@google.com>

Virtio-net tries to pin each virtual queue rx and tx interrupt to a cpu if
there are as many queues as cpus.

Expand this heuristic to configure a reasonable affinity setting also
when the number of cpus != the number of virtual queues.

Patch 1 allows vqs to take an affinity mask with more than 1 cpu.
Patch 2 generalizes the algorithm in virtnet_set_affinity beyond
the case where #cpus == #vqs.

Tested:

# 16 vCPU, 16 queue pairs, Debian 9 recent net-next kernel, GCE

# Disable GCE scripts setting affinities during startup.
#
# Add the following to
# /etc/default/instance_configs.cfg.template and reboot:
[InstanceSetup]
set_multiqueue = false

$ cd /proc/irq
$ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list;  done
0-15
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
8
8
9
9
10
10
11
11
12
12
13
13
14
14
15
15
0-15
0-15
0-15
0-15

$ cd /sys/class/net/eth0/queues/
$ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done
0001
0002
0004
0008
0010
0020
0040
0080
0100
0200
0400
0800
1000
2000
4000
8000

# 16 vCPU, 15 queue pairs
$ sudo ethtool -L eth0 combined 15

$ cd /proc/irq
$ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list;  done
0-15
0-1
0-1
2
2
3
3
4
4
5
5
6
6
7
7
8
8
9
9
10
10
11
11
12
12
13
13
14
14
15
15
15
15
0-15
0-15
0-15
0-15

$ cd /sys/class/net/eth0/queues/
$ for i in `seq 0 14` ; do sudo grep ".*" tx-$i/xps_cpus; done
0003
0004
0008
0010
0020
0040
0080
0100
0200
0400
0800
1000
2000
4000
8000

# 16 vCPU, 8 queue pairs
$ sudo ethtool -L eth0 combined 8

$ cd /proc/irq
$ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list;  done
0-15
0-1
0-1
2-3
2-3
4-5
4-5
6-7
6-7
8-9
8-9
10-11
10-11
12-13
12-13
14-15
14-15
9
9
10
10
11
11
12
12
13
13
14
14
15
15
15
15
0-15
0-15
0-15
0-15

$ cd /sys/class/net/eth0/queues/
$ for i in `seq 0 7` ; do sudo grep ".*" tx-$i/xps_cpus; done
0003
000c
0030
00c0
0300
0c00
3000
c000

# 15 vCPU, 16 queue pairs
$ sudo ethtool -L eth0 combined 16
$ sudo sh -c "echo 0 > /sys/devices/system/cpu/cpu15/online"

$ cd /proc/irq
$ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list;  done
0-15
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
8
8
9
9
10
10
11
11
12
12
13
13
14
14
0
0
0-15
0-15
0-15
0-15

$ cd /sys/class/net/eth0/queues/
$ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done
0001
0002
0004
0008
0010
0020
0040
0080
0100
0200
0400
0800
1000
2000
4000
0001

# 8 vCPU, 16 queue pairs
$ for i in `seq 8 15`; \
do sudo sh -c "echo 0 > /sys/devices/system/cpu/cpu$i/online"; done

$ cd /proc/irq
$ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list;  done
0-15
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
0-15
0-15
0-15
0-15

$ cd /sys/class/net/eth0/queues/
$ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done
0001
0002
0004
0008
0010
0020
0040
0080
0001
0002
0004
0008
0010
0020
0040
0080

Caleb Raitto (2):
  virtio_net: Make vp_set_vq_affinity() take a mask.
  virtio_net: Stripe queue affinities across cores.

 drivers/crypto/virtio/virtio_crypto_core.c |  4 +-
 drivers/net/virtio_net.c                   | 46 ++++++++++++++--------
 drivers/virtio/virtio_pci_common.c         |  7 ++--
 drivers/virtio/virtio_pci_common.h         |  2 +-
 include/linux/virtio_config.h              |  7 ++--
 5 files changed, 39 insertions(+), 27 deletions(-)

-- 
2.18.0.597.ga71716f1ad-goog

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask.
  2018-08-10  0:28 [PATCH net-next 0/2] virtio_net: Expand affinity to arbitrary numbers of cpu and vq Caleb Raitto
@ 2018-08-10  0:28 ` Caleb Raitto
  2018-08-10  0:48   ` Gonglei (Arei)
  2018-08-10  0:28 ` [PATCH net-next 2/2] virtio_net: Stripe queue affinities across cores Caleb Raitto
  1 sibling, 1 reply; 4+ messages in thread
From: Caleb Raitto @ 2018-08-10  0:28 UTC (permalink / raw)
  To: herbert, mst, davem
  Cc: arei.gonglei, jasowang, netdev, linux-crypto, Caleb Raitto

From: Caleb Raitto <caraitto@google.com>

Make vp_set_vq_affinity() take a cpumask instead of taking a single CPU.

If there are fewer queues than cores, queue affinity should be able to
map to multiple cores.

Link: https://patchwork.ozlabs.org/patch/948149/
Suggested-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Caleb Raitto <caraitto@google.com>
---
 drivers/crypto/virtio/virtio_crypto_core.c | 4 ++--
 drivers/net/virtio_net.c                   | 8 ++++----
 drivers/virtio/virtio_pci_common.c         | 7 +++----
 drivers/virtio/virtio_pci_common.h         | 2 +-
 include/linux/virtio_config.h              | 7 ++++---
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 83326986c113..7c7198553699 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
 
 	if (vi->affinity_hint_set) {
 		for (i = 0; i < vi->max_data_queues; i++)
-			virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+			virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
 
 		vi->affinity_hint_set = false;
 	}
@@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
 	 *
 	 */
 	for_each_online_cpu(cpu) {
-		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
 		if (++i >= vcrypto->max_data_queues)
 			break;
 	}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 39a7f4452587..43fabc0eb4d2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1878,8 +1878,8 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
 
 	if (vi->affinity_hint_set) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
-			virtqueue_set_affinity(vi->rq[i].vq, -1);
-			virtqueue_set_affinity(vi->sq[i].vq, -1);
+			virtqueue_set_affinity(vi->rq[i].vq, NULL);
+			virtqueue_set_affinity(vi->sq[i].vq, NULL);
 		}
 
 		vi->affinity_hint_set = false;
@@ -1905,8 +1905,8 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
 	for_each_online_cpu(cpu) {
 		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
 
-		virtqueue_set_affinity(vi->rq[i].vq, cpu);
-		virtqueue_set_affinity(vi->sq[i].vq, cpu);
+		virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu));
+		virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu));
 		__netif_set_xps_queue(vi->dev, mask, i, false);
 		i++;
 	}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 705aebd74e56..465a6f5142cc 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev)
  * - OR over all affinities for shared MSI
  * - ignore the affinity request if we're using INTX
  */
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
 {
 	struct virtio_device *vdev = vq->vdev;
 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
@@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 	if (vp_dev->msix_enabled) {
 		mask = vp_dev->msix_affinity_masks[info->msix_vector];
 		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
-		if (cpu == -1)
+		if (!cpu_mask)
 			irq_set_affinity_hint(irq, NULL);
 		else {
-			cpumask_clear(mask);
-			cpumask_set_cpu(cpu, mask);
+			cpumask_copy(mask, cpu_mask);
 			irq_set_affinity_hint(irq, mask);
 		}
 	}
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 135ee3cf7175..02271002c2f3 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev);
  * - OR over all affinities for shared MSI
  * - ignore the affinity request if we're using INTX
  */
-int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
+int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
 
 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
 
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 5559a2d31c46..32baf8e26735 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,7 +79,8 @@ struct virtio_config_ops {
 	u64 (*get_features)(struct virtio_device *vdev);
 	int (*finalize_features)(struct virtio_device *vdev);
 	const char *(*bus_name)(struct virtio_device *vdev);
-	int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+	int (*set_vq_affinity)(struct virtqueue *vq,
+			       const struct cpumask *cpu_mask);
 	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
 			int index);
 };
@@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
  *
  */
 static inline
-int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
 {
 	struct virtio_device *vdev = vq->vdev;
 	if (vdev->config->set_vq_affinity)
-		return vdev->config->set_vq_affinity(vq, cpu);
+		return vdev->config->set_vq_affinity(vq, cpu_mask);
 	return 0;
 }
 
-- 
2.18.0.597.ga71716f1ad-goog

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net-next 2/2] virtio_net: Stripe queue affinities across cores.
  2018-08-10  0:28 [PATCH net-next 0/2] virtio_net: Expand affinity to arbitrary numbers of cpu and vq Caleb Raitto
  2018-08-10  0:28 ` [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask Caleb Raitto
@ 2018-08-10  0:28 ` Caleb Raitto
  1 sibling, 0 replies; 4+ messages in thread
From: Caleb Raitto @ 2018-08-10  0:28 UTC (permalink / raw)
  To: herbert, mst, davem
  Cc: arei.gonglei, jasowang, netdev, linux-crypto, Caleb Raitto,
	Willem de Bruijn

From: Caleb Raitto <caraitto@google.com>

Always set the affinity hint, even if #cpu != #vq.

Handle the case where #cpu > #vq (including when #cpu % #vq != 0) and
when #vq > #cpu (including when #vq % #cpu != 0).

Signed-off-by: Caleb Raitto <caraitto@google.com>
Signed-off-by: Willem de Bruijn <willemb@google.com>
Acked-by: Jon Olson <jonolson@google.com>
---
 drivers/net/virtio_net.c | 42 ++++++++++++++++++++++++++--------------
 1 file changed, 27 insertions(+), 15 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 43fabc0eb4d2..eb00ae6ee475 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <linux/netdevice.h>
+#include <linux/kernel.h>
 #include <linux/pci.h>
 #include <net/route.h>
 #include <net/xdp.h>
@@ -1888,30 +1889,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
 
 static void virtnet_set_affinity(struct virtnet_info *vi)
 {
-	int i;
-	int cpu;
+	cpumask_var_t mask;
+	int stragglers;
+	int group_size;
+	int i, j, cpu;
+	int num_cpu;
+	int stride;
 
-	/* In multiqueue mode, when the number of cpu is equal to the number of
-	 * queue pairs, we let the queue pairs to be private to one cpu by
-	 * setting the affinity hint to eliminate the contention.
-	 */
-	if (vi->curr_queue_pairs == 1 ||
-	    vi->max_queue_pairs != num_online_cpus()) {
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
 		virtnet_clean_affinity(vi, -1);
 		return;
 	}
 
-	i = 0;
-	for_each_online_cpu(cpu) {
-		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
+	num_cpu = num_online_cpus();
+	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
+	stragglers = num_cpu >= vi->curr_queue_pairs ?
+			num_cpu % vi->curr_queue_pairs :
+			0;
+	cpu = cpumask_next(-1, cpu_online_mask);
 
-		virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu));
-		virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu));
-		__netif_set_xps_queue(vi->dev, mask, i, false);
-		i++;
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
+		group_size = stride + (i < stragglers ? 1 : 0);
+
+		for (j = 0; j < group_size; j++) {
+			cpumask_set_cpu(cpu, mask);
+			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
+						nr_cpu_ids, false);
+		}
+		virtqueue_set_affinity(vi->rq[i].vq, mask);
+		virtqueue_set_affinity(vi->sq[i].vq, mask);
+		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
+		cpumask_clear(mask);
 	}
 
 	vi->affinity_hint_set = true;
+	free_cpumask_var(mask);
 }
 
 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
-- 
2.18.0.597.ga71716f1ad-goog

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* RE: [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask.
  2018-08-10  0:28 ` [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask Caleb Raitto
@ 2018-08-10  0:48   ` Gonglei (Arei)
  0 siblings, 0 replies; 4+ messages in thread
From: Gonglei (Arei) @ 2018-08-10  0:48 UTC (permalink / raw)
  To: Caleb Raitto, herbert, mst, davem
  Cc: jasowang, netdev, linux-crypto, Caleb Raitto



> -----Original Message-----
> From: Caleb Raitto [mailto:caleb.raitto@gmail.com]
> Sent: Friday, August 10, 2018 8:29 AM
> To: herbert@gondor.apana.org.au; mst@redhat.com; davem@davemloft.net
> Cc: Gonglei (Arei) <arei.gonglei@huawei.com>; jasowang@redhat.com;
> netdev@vger.kernel.org; linux-crypto@vger.kernel.org; Caleb Raitto
> <caraitto@google.com>
> Subject: [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a
> mask.
> 
> From: Caleb Raitto <caraitto@google.com>
> 
> Make vp_set_vq_affinity() take a cpumask instead of taking a single CPU.
> 
> If there are fewer queues than cores, queue affinity should be able to
> map to multiple cores.
> 

Nice work. Maybe you'd better rename this patch's header

s/virtio_net/virtio/ ?

Except that:

Acked-by: Gongle <arei.gonglei@huawei.com>


Regards,
-Gonglei

> Link: https://patchwork.ozlabs.org/patch/948149/
> Suggested-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Caleb Raitto <caraitto@google.com>
> ---
>  drivers/crypto/virtio/virtio_crypto_core.c | 4 ++--
>  drivers/net/virtio_net.c                   | 8 ++++----
>  drivers/virtio/virtio_pci_common.c         | 7 +++----
>  drivers/virtio/virtio_pci_common.h         | 2 +-
>  include/linux/virtio_config.h              | 7 ++++---
>  5 files changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/crypto/virtio/virtio_crypto_core.c
> b/drivers/crypto/virtio/virtio_crypto_core.c
> index 83326986c113..7c7198553699 100644
> --- a/drivers/crypto/virtio/virtio_crypto_core.c
> +++ b/drivers/crypto/virtio/virtio_crypto_core.c
> @@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct
> virtio_crypto *vi, long hcpu)
> 
>  	if (vi->affinity_hint_set) {
>  		for (i = 0; i < vi->max_data_queues; i++)
> -			virtqueue_set_affinity(vi->data_vq[i].vq, -1);
> +			virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
> 
>  		vi->affinity_hint_set = false;
>  	}
> @@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto
> *vcrypto)
>  	 *
>  	 */
>  	for_each_online_cpu(cpu) {
> -		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
> +		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
>  		if (++i >= vcrypto->max_data_queues)
>  			break;
>  	}
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 39a7f4452587..43fabc0eb4d2 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1878,8 +1878,8 @@ static void virtnet_clean_affinity(struct virtnet_info
> *vi, long hcpu)
> 
>  	if (vi->affinity_hint_set) {
>  		for (i = 0; i < vi->max_queue_pairs; i++) {
> -			virtqueue_set_affinity(vi->rq[i].vq, -1);
> -			virtqueue_set_affinity(vi->sq[i].vq, -1);
> +			virtqueue_set_affinity(vi->rq[i].vq, NULL);
> +			virtqueue_set_affinity(vi->sq[i].vq, NULL);
>  		}
> 
>  		vi->affinity_hint_set = false;
> @@ -1905,8 +1905,8 @@ static void virtnet_set_affinity(struct virtnet_info
> *vi)
>  	for_each_online_cpu(cpu) {
>  		const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
> 
> -		virtqueue_set_affinity(vi->rq[i].vq, cpu);
> -		virtqueue_set_affinity(vi->sq[i].vq, cpu);
> +		virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu));
> +		virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu));
>  		__netif_set_xps_queue(vi->dev, mask, i, false);
>  		i++;
>  	}
> diff --git a/drivers/virtio/virtio_pci_common.c
> b/drivers/virtio/virtio_pci_common.c
> index 705aebd74e56..465a6f5142cc 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev)
>   * - OR over all affinities for shared MSI
>   * - ignore the affinity request if we're using INTX
>   */
> -int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
> +int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
>  {
>  	struct virtio_device *vdev = vq->vdev;
>  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> @@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
>  	if (vp_dev->msix_enabled) {
>  		mask = vp_dev->msix_affinity_masks[info->msix_vector];
>  		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
> -		if (cpu == -1)
> +		if (!cpu_mask)
>  			irq_set_affinity_hint(irq, NULL);
>  		else {
> -			cpumask_clear(mask);
> -			cpumask_set_cpu(cpu, mask);
> +			cpumask_copy(mask, cpu_mask);
>  			irq_set_affinity_hint(irq, mask);
>  		}
>  	}
> diff --git a/drivers/virtio/virtio_pci_common.h
> b/drivers/virtio/virtio_pci_common.h
> index 135ee3cf7175..02271002c2f3 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev);
>   * - OR over all affinities for shared MSI
>   * - ignore the affinity request if we're using INTX
>   */
> -int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
> +int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask
> *cpu_mask);
> 
>  const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int
> index);
> 
> diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
> index 5559a2d31c46..32baf8e26735 100644
> --- a/include/linux/virtio_config.h
> +++ b/include/linux/virtio_config.h
> @@ -79,7 +79,8 @@ struct virtio_config_ops {
>  	u64 (*get_features)(struct virtio_device *vdev);
>  	int (*finalize_features)(struct virtio_device *vdev);
>  	const char *(*bus_name)(struct virtio_device *vdev);
> -	int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
> +	int (*set_vq_affinity)(struct virtqueue *vq,
> +			       const struct cpumask *cpu_mask);
>  	const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
>  			int index);
>  };
> @@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device
> *vdev)
>   *
>   */
>  static inline
> -int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
> +int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask
> *cpu_mask)
>  {
>  	struct virtio_device *vdev = vq->vdev;
>  	if (vdev->config->set_vq_affinity)
> -		return vdev->config->set_vq_affinity(vq, cpu);
> +		return vdev->config->set_vq_affinity(vq, cpu_mask);
>  	return 0;
>  }
> 
> --
> 2.18.0.597.ga71716f1ad-goog

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-08-10  0:48 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-10  0:28 [PATCH net-next 0/2] virtio_net: Expand affinity to arbitrary numbers of cpu and vq Caleb Raitto
2018-08-10  0:28 ` [PATCH net-next 1/2] virtio_net: Make vp_set_vq_affinity() take a mask Caleb Raitto
2018-08-10  0:48   ` Gonglei (Arei)
2018-08-10  0:28 ` [PATCH net-next 2/2] virtio_net: Stripe queue affinities across cores Caleb Raitto

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.