bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH netdev] virtio-net: support XDP_TX when not more queues
@ 2021-01-13  8:08 Xuan Zhuo
  2021-01-16  1:47 ` Jakub Kicinski
  2021-02-10 21:40 ` Michael S. Tsirkin
  0 siblings, 2 replies; 5+ messages in thread
From: Xuan Zhuo @ 2021-01-13  8:08 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, David S. Miller, Jakub Kicinski,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, virtualization, bpf, dust.li

The number of queues implemented by many virtio backends is limited,
especially some machines have a large number of CPUs. In this case, it
is often impossible to allocate a separate queue for XDP_TX.

This patch allows XDP_TX to run by reuse the existing SQ with
__netif_tx_lock() hold when there are not enough queues.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------
 1 file changed, 35 insertions(+), 12 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ba8e637..7a3b2a7 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -195,6 +195,9 @@ struct virtnet_info {
 	/* # of XDP queue pairs currently used by the driver */
 	u16 xdp_queue_pairs;
 
+	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
+	bool xdp_enabled;
+
 	/* I like... big packets and I cannot lie! */
 	bool big_packets;
 
@@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 	return 0;
 }
 
-static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
+static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
 {
 	unsigned int qp;
+	struct netdev_queue *txq;
+
+	if (vi->curr_queue_pairs > nr_cpu_ids) {
+		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+	} else {
+		qp = smp_processor_id() % vi->curr_queue_pairs;
+		txq = netdev_get_tx_queue(vi->dev, qp);
+		__netif_tx_lock(txq, raw_smp_processor_id());
+	}
 
-	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
 	return &vi->sq[qp];
 }
 
+static void virtnet_put_xdp_sq(struct virtnet_info *vi)
+{
+	unsigned int qp;
+	struct netdev_queue *txq;
+
+	if (vi->curr_queue_pairs <= nr_cpu_ids) {
+		qp = smp_processor_id() % vi->curr_queue_pairs;
+		txq = netdev_get_tx_queue(vi->dev, qp);
+		__netif_tx_unlock(txq);
+	}
+}
+
 static int virtnet_xdp_xmit(struct net_device *dev,
 			    int n, struct xdp_frame **frames, u32 flags)
 {
@@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	if (!xdp_prog)
 		return -ENXIO;
 
-	sq = virtnet_xdp_sq(vi);
+	sq = virtnet_get_xdp_sq(vi);
 
 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
 		ret = -EINVAL;
@@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	sq->stats.kicks += kicks;
 	u64_stats_update_end(&sq->stats.syncp);
 
+	virtnet_put_xdp_sq(vi);
 	return ret;
 }
 
 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
 {
-	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
+	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
 }
 
 /* We copy the packet for XDP in the following cases:
@@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 		xdp_do_flush();
 
 	if (xdp_xmit & VIRTIO_XDP_TX) {
-		sq = virtnet_xdp_sq(vi);
+		sq = virtnet_get_xdp_sq(vi);
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
 			u64_stats_update_begin(&sq->stats.syncp);
 			sq->stats.kicks++;
 			u64_stats_update_end(&sq->stats.syncp);
 		}
+		virtnet_put_xdp_sq(vi);
 	}
 
 	return received;
@@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 		xdp_qp = nr_cpu_ids;
 
 	/* XDP requires extra queues for XDP_TX */
-	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
-		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
-		netdev_warn(dev, "request %i queues but max is %i\n",
-			    curr_qp + xdp_qp, vi->max_queue_pairs);
-		return -ENOMEM;
-	}
+	if (curr_qp + xdp_qp > vi->max_queue_pairs)
+		xdp_qp = 0;
 
 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
 	if (!prog && !old_prog)
@@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 	vi->xdp_queue_pairs = xdp_qp;
 
+	vi->xdp_enabled = false;
 	if (prog) {
 		for (i = 0; i < vi->max_queue_pairs; i++) {
 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
 			if (i == 0 && !old_prog)
 				virtnet_clear_guest_offloads(vi);
 		}
+		vi->xdp_enabled = true;
 	}
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,
 	int err;
 
 	if ((dev->features ^ features) & NETIF_F_LRO) {
-		if (vi->xdp_queue_pairs)
+		if (vi->xdp_enabled)
 			return -EBUSY;
 
 		if (features & NETIF_F_LRO)
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH netdev] virtio-net: support XDP_TX when not more queues
  2021-01-13  8:08 [PATCH netdev] virtio-net: support XDP_TX when not more queues Xuan Zhuo
@ 2021-01-16  1:47 ` Jakub Kicinski
  2021-02-10 21:40 ` Michael S. Tsirkin
  1 sibling, 0 replies; 5+ messages in thread
From: Jakub Kicinski @ 2021-01-16  1:47 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, virtualization, bpf, dust.li

On Wed, 13 Jan 2021 16:08:57 +0800 Xuan Zhuo wrote:
> The number of queues implemented by many virtio backends is limited,
> especially some machines have a large number of CPUs. In this case, it
> is often impossible to allocate a separate queue for XDP_TX.
> 
> This patch allows XDP_TX to run by reuse the existing SQ with
> __netif_tx_lock() hold when there are not enough queues.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>

Since reviews are not coming in let me share some of mine.

nit: please put [PATCH net-next] not [PATCH netdev]

> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
>  {
>  	unsigned int qp;
> +	struct netdev_queue *txq;

nit: please order variable declaration lines longest to shortest

> +
> +	if (vi->curr_queue_pairs > nr_cpu_ids) {
> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
> +	} else {
> +		qp = smp_processor_id() % vi->curr_queue_pairs;
> +		txq = netdev_get_tx_queue(vi->dev, qp);
> +		__netif_tx_lock(txq, raw_smp_processor_id());
> +	}
>  
> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
>  	return &vi->sq[qp];
>  }
>  
> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)
> +{
> +	unsigned int qp;
> +	struct netdev_queue *txq;

nit: longest to shortest

> +
> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {
> +		qp = smp_processor_id() % vi->curr_queue_pairs;

Feels a little wasteful to do the modulo calculation twice per packet.

> +		txq = netdev_get_tx_queue(vi->dev, qp);
> +		__netif_tx_unlock(txq);
> +	}
> +}

> +	vi->xdp_enabled = false;
>  	if (prog) {
>  		for (i = 0; i < vi->max_queue_pairs; i++) {
>  			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
>  			if (i == 0 && !old_prog)
>  				virtnet_clear_guest_offloads(vi);
>  		}
> +		vi->xdp_enabled = true;

is xdp_enabled really needed? can't we do the headroom calculation
based on the program pointer being not NULL? Either way xdp_enabled 
should not temporarily switch true -> false -> true when program 
is swapped.

>  	}
>  
>  	for (i = 0; i < vi->max_queue_pairs; i++) {


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH netdev] virtio-net: support XDP_TX when not more queues
  2021-01-13  8:08 [PATCH netdev] virtio-net: support XDP_TX when not more queues Xuan Zhuo
  2021-01-16  1:47 ` Jakub Kicinski
@ 2021-02-10 21:40 ` Michael S. Tsirkin
  2021-02-11 11:00   ` Jesper Dangaard Brouer
  2021-02-18  2:18   ` Jason Wang
  1 sibling, 2 replies; 5+ messages in thread
From: Michael S. Tsirkin @ 2021-02-10 21:40 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Jason Wang, David S. Miller, Jakub Kicinski,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, virtualization, bpf, dust.li

On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:
> The number of queues implemented by many virtio backends is limited,
> especially some machines have a large number of CPUs. In this case, it
> is often impossible to allocate a separate queue for XDP_TX.
> 
> This patch allows XDP_TX to run by reuse the existing SQ with
> __netif_tx_lock() hold when there are not enough queues.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>

I'd like to get some advice on whether this is ok from some
XDP experts - previously my understanding was that it is
preferable to disable XDP for such devices than
use locks on XDP fast path.

> ---
>  drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------
>  1 file changed, 35 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index ba8e637..7a3b2a7 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -195,6 +195,9 @@ struct virtnet_info {
>  	/* # of XDP queue pairs currently used by the driver */
>  	u16 xdp_queue_pairs;
>  
> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> +	bool xdp_enabled;
> +
>  	/* I like... big packets and I cannot lie! */
>  	bool big_packets;
>  
> @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
>  	return 0;
>  }
>  
> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
>  {
>  	unsigned int qp;
> +	struct netdev_queue *txq;
> +
> +	if (vi->curr_queue_pairs > nr_cpu_ids) {
> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
> +	} else {
> +		qp = smp_processor_id() % vi->curr_queue_pairs;
> +		txq = netdev_get_tx_queue(vi->dev, qp);
> +		__netif_tx_lock(txq, raw_smp_processor_id());
> +	}
>  
> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
>  	return &vi->sq[qp];
>  }
>  
> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)
> +{
> +	unsigned int qp;
> +	struct netdev_queue *txq;
> +
> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {
> +		qp = smp_processor_id() % vi->curr_queue_pairs;
> +		txq = netdev_get_tx_queue(vi->dev, qp);
> +		__netif_tx_unlock(txq);
> +	}
> +}
> +
>  static int virtnet_xdp_xmit(struct net_device *dev,
>  			    int n, struct xdp_frame **frames, u32 flags)
>  {
> @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>  	if (!xdp_prog)
>  		return -ENXIO;
>  
> -	sq = virtnet_xdp_sq(vi);
> +	sq = virtnet_get_xdp_sq(vi);
>  
>  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
>  		ret = -EINVAL;
> @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>  	sq->stats.kicks += kicks;
>  	u64_stats_update_end(&sq->stats.syncp);
>  
> +	virtnet_put_xdp_sq(vi);
>  	return ret;
>  }
>  
>  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
>  {
> -	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
> +	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
>  }
>  
>  /* We copy the packet for XDP in the following cases:
> @@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>  		xdp_do_flush();
>  
>  	if (xdp_xmit & VIRTIO_XDP_TX) {
> -		sq = virtnet_xdp_sq(vi);
> +		sq = virtnet_get_xdp_sq(vi);
>  		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
>  			u64_stats_update_begin(&sq->stats.syncp);
>  			sq->stats.kicks++;
>  			u64_stats_update_end(&sq->stats.syncp);
>  		}
> +		virtnet_put_xdp_sq(vi);
>  	}
>  
>  	return received;
> @@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
>  		xdp_qp = nr_cpu_ids;
>  
>  	/* XDP requires extra queues for XDP_TX */
> -	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
> -		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
> -		netdev_warn(dev, "request %i queues but max is %i\n",
> -			    curr_qp + xdp_qp, vi->max_queue_pairs);
> -		return -ENOMEM;
> -	}
> +	if (curr_qp + xdp_qp > vi->max_queue_pairs)
> +		xdp_qp = 0;
>  
>  	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
>  	if (!prog && !old_prog)
> @@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
>  	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
>  	vi->xdp_queue_pairs = xdp_qp;
>  
> +	vi->xdp_enabled = false;
>  	if (prog) {
>  		for (i = 0; i < vi->max_queue_pairs; i++) {
>  			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
>  			if (i == 0 && !old_prog)
>  				virtnet_clear_guest_offloads(vi);
>  		}
> +		vi->xdp_enabled = true;
>  	}
>  
>  	for (i = 0; i < vi->max_queue_pairs; i++) {
> @@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,
>  	int err;
>  
>  	if ((dev->features ^ features) & NETIF_F_LRO) {
> -		if (vi->xdp_queue_pairs)
> +		if (vi->xdp_enabled)
>  			return -EBUSY;
>  
>  		if (features & NETIF_F_LRO)
> -- 
> 1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH netdev] virtio-net: support XDP_TX when not more queues
  2021-02-10 21:40 ` Michael S. Tsirkin
@ 2021-02-11 11:00   ` Jesper Dangaard Brouer
  2021-02-18  2:18   ` Jason Wang
  1 sibling, 0 replies; 5+ messages in thread
From: Jesper Dangaard Brouer @ 2021-02-11 11:00 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: brouer, Xuan Zhuo, netdev, Jason Wang, David S. Miller,
	Jakub Kicinski, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, virtualization, bpf,
	dust.li, Marek Majtyka

On Wed, 10 Feb 2021 16:40:41 -0500
"Michael S. Tsirkin" <mst@redhat.com> wrote:

> On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:
> > The number of queues implemented by many virtio backends is limited,
> > especially some machines have a large number of CPUs. In this case, it
> > is often impossible to allocate a separate queue for XDP_TX.
> > 
> > This patch allows XDP_TX to run by reuse the existing SQ with
> > __netif_tx_lock() hold when there are not enough queues.

I'm a little puzzled about the choice of using the netdevice TXQ
lock __netif_tx_lock() / __netif_tx_unlock().
Can you explain more about this choice?

> > 
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Reviewed-by: Dust Li <dust.li@linux.alibaba.com>  
> 
> I'd like to get some advice on whether this is ok from some
> XDP experts - previously my understanding was that it is
> preferable to disable XDP for such devices than
> use locks on XDP fast path.

I think it is acceptable, because the ndo_xdp_xmit / virtnet_xdp_xmit
takes a bulk of packets (currently 16).

Some drivers already does this.

It would have been nice if we could set a feature flag, that allow
users to see that this driver uses locking in the XDP transmit
(ndo_xdp_xmit) function call... but it seems like a pipe dream :-P

Code related to the locking

> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index ba8e637..7a3b2a7 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
[...]
> > @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
> >  	return 0;
> >  }
> >  
> > -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
> > +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
> >  {
> >  	unsigned int qp;
> > +	struct netdev_queue *txq;
> > +
> > +	if (vi->curr_queue_pairs > nr_cpu_ids) {
> > +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
> > +	} else {
> > +		qp = smp_processor_id() % vi->curr_queue_pairs;
> > +		txq = netdev_get_tx_queue(vi->dev, qp);
> > +		__netif_tx_lock(txq, raw_smp_processor_id());
> > +	}
> >  
> > -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
> >  	return &vi->sq[qp];
> >  }
> >  
> > +static void virtnet_put_xdp_sq(struct virtnet_info *vi)
> > +{
> > +	unsigned int qp;
> > +	struct netdev_queue *txq;
> > +
> > +	if (vi->curr_queue_pairs <= nr_cpu_ids) {
> > +		qp = smp_processor_id() % vi->curr_queue_pairs;
> > +		txq = netdev_get_tx_queue(vi->dev, qp);
> > +		__netif_tx_unlock(txq);
> > +	}
> > +}
> > +
> >  static int virtnet_xdp_xmit(struct net_device *dev,
> >  			    int n, struct xdp_frame **frames, u32 flags)
> >  {
> > @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> >  	if (!xdp_prog)
> >  		return -ENXIO;
> >  
> > -	sq = virtnet_xdp_sq(vi);
> > +	sq = virtnet_get_xdp_sq(vi);
> >  
> >  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
> >  		ret = -EINVAL;
> > @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> >  	sq->stats.kicks += kicks;
> >  	u64_stats_update_end(&sq->stats.syncp);
> >  
> > +	virtnet_put_xdp_sq(vi);
> >  	return ret;
> >  }
> >  



-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH netdev] virtio-net: support XDP_TX when not more queues
  2021-02-10 21:40 ` Michael S. Tsirkin
  2021-02-11 11:00   ` Jesper Dangaard Brouer
@ 2021-02-18  2:18   ` Jason Wang
  1 sibling, 0 replies; 5+ messages in thread
From: Jason Wang @ 2021-02-18  2:18 UTC (permalink / raw)
  To: Michael S. Tsirkin, Xuan Zhuo
  Cc: netdev, David S. Miller, Jakub Kicinski, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	virtualization, bpf, dust.li


On 2021/2/11 5:40 上午, Michael S. Tsirkin wrote:
> On Wed, Jan 13, 2021 at 04:08:57PM +0800, Xuan Zhuo wrote:
>> The number of queues implemented by many virtio backends is limited,
>> especially some machines have a large number of CPUs. In this case, it
>> is often impossible to allocate a separate queue for XDP_TX.
>>
>> This patch allows XDP_TX to run by reuse the existing SQ with
>> __netif_tx_lock() hold when there are not enough queues.
>>
>> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
> I'd like to get some advice on whether this is ok from some
> XDP experts - previously my understanding was that it is
> preferable to disable XDP for such devices than
> use locks on XDP fast path.


I think this is acceptable on the device that changing the number of 
queues is not easy. For virtio-net, it probably requires a lot of 
changes in the management.

Another example is TUN which use TX lock for XDP.

Thanks


>
>> ---
>>   drivers/net/virtio_net.c | 47 +++++++++++++++++++++++++++++++++++------------
>>   1 file changed, 35 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index ba8e637..7a3b2a7 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -195,6 +195,9 @@ struct virtnet_info {
>>   	/* # of XDP queue pairs currently used by the driver */
>>   	u16 xdp_queue_pairs;
>>   
>> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
>> +	bool xdp_enabled;
>> +
>>   	/* I like... big packets and I cannot lie! */
>>   	bool big_packets;
>>   
>> @@ -481,14 +484,34 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
>>   	return 0;
>>   }
>>   
>> -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
>> +static struct send_queue *virtnet_get_xdp_sq(struct virtnet_info *vi)
>>   {
>>   	unsigned int qp;
>> +	struct netdev_queue *txq;
>> +
>> +	if (vi->curr_queue_pairs > nr_cpu_ids) {
>> +		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
>> +	} else {
>> +		qp = smp_processor_id() % vi->curr_queue_pairs;
>> +		txq = netdev_get_tx_queue(vi->dev, qp);
>> +		__netif_tx_lock(txq, raw_smp_processor_id());
>> +	}
>>   
>> -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
>>   	return &vi->sq[qp];
>>   }
>>   
>> +static void virtnet_put_xdp_sq(struct virtnet_info *vi)
>> +{
>> +	unsigned int qp;
>> +	struct netdev_queue *txq;
>> +
>> +	if (vi->curr_queue_pairs <= nr_cpu_ids) {
>> +		qp = smp_processor_id() % vi->curr_queue_pairs;
>> +		txq = netdev_get_tx_queue(vi->dev, qp);
>> +		__netif_tx_unlock(txq);
>> +	}
>> +}
>> +
>>   static int virtnet_xdp_xmit(struct net_device *dev,
>>   			    int n, struct xdp_frame **frames, u32 flags)
>>   {
>> @@ -512,7 +535,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>>   	if (!xdp_prog)
>>   		return -ENXIO;
>>   
>> -	sq = virtnet_xdp_sq(vi);
>> +	sq = virtnet_get_xdp_sq(vi);
>>   
>>   	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
>>   		ret = -EINVAL;
>> @@ -560,12 +583,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>>   	sq->stats.kicks += kicks;
>>   	u64_stats_update_end(&sq->stats.syncp);
>>   
>> +	virtnet_put_xdp_sq(vi);
>>   	return ret;
>>   }
>>   
>>   static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
>>   {
>> -	return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
>> +	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
>>   }
>>   
>>   /* We copy the packet for XDP in the following cases:
>> @@ -1457,12 +1481,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
>>   		xdp_do_flush();
>>   
>>   	if (xdp_xmit & VIRTIO_XDP_TX) {
>> -		sq = virtnet_xdp_sq(vi);
>> +		sq = virtnet_get_xdp_sq(vi);
>>   		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
>>   			u64_stats_update_begin(&sq->stats.syncp);
>>   			sq->stats.kicks++;
>>   			u64_stats_update_end(&sq->stats.syncp);
>>   		}
>> +		virtnet_put_xdp_sq(vi);
>>   	}
>>   
>>   	return received;
>> @@ -2416,12 +2441,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
>>   		xdp_qp = nr_cpu_ids;
>>   
>>   	/* XDP requires extra queues for XDP_TX */
>> -	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
>> -		NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
>> -		netdev_warn(dev, "request %i queues but max is %i\n",
>> -			    curr_qp + xdp_qp, vi->max_queue_pairs);
>> -		return -ENOMEM;
>> -	}
>> +	if (curr_qp + xdp_qp > vi->max_queue_pairs)
>> +		xdp_qp = 0;
>>   
>>   	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
>>   	if (!prog && !old_prog)
>> @@ -2453,12 +2474,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
>>   	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
>>   	vi->xdp_queue_pairs = xdp_qp;
>>   
>> +	vi->xdp_enabled = false;
>>   	if (prog) {
>>   		for (i = 0; i < vi->max_queue_pairs; i++) {
>>   			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
>>   			if (i == 0 && !old_prog)
>>   				virtnet_clear_guest_offloads(vi);
>>   		}
>> +		vi->xdp_enabled = true;
>>   	}
>>   
>>   	for (i = 0; i < vi->max_queue_pairs; i++) {
>> @@ -2526,7 +2549,7 @@ static int virtnet_set_features(struct net_device *dev,
>>   	int err;
>>   
>>   	if ((dev->features ^ features) & NETIF_F_LRO) {
>> -		if (vi->xdp_queue_pairs)
>> +		if (vi->xdp_enabled)
>>   			return -EBUSY;
>>   
>>   		if (features & NETIF_F_LRO)
>> -- 
>> 1.8.3.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-02-18  2:20 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-13  8:08 [PATCH netdev] virtio-net: support XDP_TX when not more queues Xuan Zhuo
2021-01-16  1:47 ` Jakub Kicinski
2021-02-10 21:40 ` Michael S. Tsirkin
2021-02-11 11:00   ` Jesper Dangaard Brouer
2021-02-18  2:18   ` Jason Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).