All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
       [not found] <1617787566.555242-6-xuanzhuo@linux.alibaba.com>
@ 2021-04-07  9:30   ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-04-07  9:30 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Michael S. Tsirkin, David S. Miller, Jakub Kicinski,
	Björn Töpel, Magnus Karlsson, Jonathan Lemon,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, KP Singh, virtualization, bpf, Dust Li, netdev


在 2021/4/7 下午5:26, Xuan Zhuo 写道:
>>> +	__free_old_xmit(sq, false, &stats);
>> Let's use a separate patch for this kind of factoring.
>>
> It is also possible to encounter xsk here, so it should not be a separate patch.
>
> Thanks.


You can do the factoring first and add xsk stuffs on top.

Thanks


>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
@ 2021-04-07  9:30   ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-04-07  9:30 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Song Liu, Martin KaFai Lau, Jesper Dangaard Brouer,
	Daniel Borkmann, Michael S. Tsirkin, Yonghong Song,
	John Fastabend, Alexei Starovoitov, Andrii Nakryiko, netdev,
	Björn Töpel, Dust Li, Jonathan Lemon, KP Singh,
	Jakub Kicinski, bpf, virtualization, David S. Miller,
	Magnus Karlsson


在 2021/4/7 下午5:26, Xuan Zhuo 写道:
>>> +	__free_old_xmit(sq, false, &stats);
>> Let's use a separate patch for this kind of factoring.
>>
> It is also possible to encounter xsk here, so it should not be a separate patch.
>
> Thanks.


You can do the factoring first and add xsk stuffs on top.

Thanks


>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
  2021-03-31  7:11 ` [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk Xuan Zhuo
@ 2021-04-06  7:16     ` Jason Wang
  2021-04-06  7:16     ` Jason Wang
  1 sibling, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-04-06  7:16 UTC (permalink / raw)
  To: Xuan Zhuo, netdev
  Cc: Michael S. Tsirkin, David S. Miller, Jakub Kicinski,
	Björn Töpel, Magnus Karlsson, Jonathan Lemon,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, KP Singh, virtualization, bpf, Dust Li


在 2021/3/31 下午3:11, Xuan Zhuo 写道:
> Based on the last two bit of ptr returned by virtqueue_get_buf, 01
> represents the packet sent by xdp, 10 is the packet sent by xsk, and 00
> is skb by default.
>
> If the xmit work of xsk has not been completed, but the ring is full,
> napi must first exit and wait for the ring to be available, so
> need_wakeup is set. If __free_old_xmit is called first by start_xmit, we
> can quickly wake up napi to execute xsk xmit work.
>
> When recycling, we need to count the number of bytes sent, so put xsk
> desc->len into the ptr pointer. Because ptr does not point to meaningful
> objects in xsk.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>


This needs to be squashed into patch 4.


> ---
>   drivers/net/virtio_net.c | 171 ++++++++++++++++++++++++++-------------
>   1 file changed, 113 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index fac7d0020013..8318b89b2971 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -50,6 +50,9 @@ module_param(xsk_kick_thr, int, 0644);
>   #define VIRTIO_XDP_REDIR	BIT(1)
>   
>   #define VIRTIO_XDP_FLAG	BIT(0)
> +#define VIRTIO_XSK_FLAG	BIT(1)
> +
> +#define VIRTIO_XSK_PTR_SHIFT       4
>   
>   static struct virtio_net_hdr_mrg_rxbuf xsk_hdr;
>   
> @@ -147,6 +150,9 @@ struct send_queue {
>   
>   		/* save the desc for next xmit, when xmit fail. */
>   		struct xdp_desc last_desc;
> +
> +		/* xsk wait for tx inter or softirq */
> +		bool need_wakeup;
>   	} xsk;
>   };
>   
> @@ -266,6 +272,12 @@ struct padded_vnet_hdr {
>   
>   static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			   int budget, bool in_napi);
> +static void virtnet_xsk_complete(struct send_queue *sq, u32 num);
> +
> +static bool is_skb_ptr(void *ptr)
> +{
> +	return !((unsigned long)ptr & (VIRTIO_XDP_FLAG | VIRTIO_XSK_FLAG));
> +}
>   
>   static bool is_xdp_frame(void *ptr)
>   {
> @@ -277,11 +289,58 @@ static void *xdp_to_ptr(struct xdp_frame *ptr)
>   	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
>   }
>   
> +static void *xsk_to_ptr(struct xdp_desc *desc)
> +{
> +	/* save the desc len to ptr */
> +	u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
> +
> +	return (void *)(p | VIRTIO_XSK_FLAG);
> +}
> +
> +static void ptr_to_xsk(void *ptr, struct xdp_desc *desc)
> +{
> +	desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
> +}
> +
>   static struct xdp_frame *ptr_to_xdp(void *ptr)
>   {
>   	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>   }
>   
> +static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +			    struct virtnet_sq_stats *stats)
> +{
> +	unsigned int xsknum = 0;
> +	unsigned int len;
> +	void *ptr;
> +
> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +		if (is_skb_ptr(ptr)) {
> +			struct sk_buff *skb = ptr;
> +
> +			pr_debug("Sent skb %p\n", skb);
> +
> +			stats->bytes += skb->len;
> +			napi_consume_skb(skb, in_napi);
> +		} else if (is_xdp_frame(ptr)) {
> +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +			stats->bytes += frame->len;
> +			xdp_return_frame(frame);
> +		} else {
> +			struct xdp_desc desc;
> +
> +			ptr_to_xsk(ptr, &desc);
> +			stats->bytes += desc.len;
> +			++xsknum;
> +		}
> +		stats->packets++;
> +	}
> +
> +	if (xsknum)
> +		virtnet_xsk_complete(sq, xsknum);
> +}
> +
>   /* Converting between virtqueue no. and kernel tx/rx queue no.
>    * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
>    */
> @@ -543,15 +602,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   			    int n, struct xdp_frame **frames, u32 flags)
>   {
>   	struct virtnet_info *vi = netdev_priv(dev);
> +	struct virtnet_sq_stats stats = {};
>   	struct receive_queue *rq = vi->rq;
>   	struct bpf_prog *xdp_prog;
>   	struct send_queue *sq;
> -	unsigned int len;
> -	int packets = 0;
> -	int bytes = 0;
>   	int nxmit = 0;
>   	int kicks = 0;
> -	void *ptr;
>   	int ret;
>   	int i;
>   
> @@ -570,20 +626,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   	}
>   
>   	/* Free up any pending old buffers before queueing new ones. */
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (likely(is_xdp_frame(ptr))) {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			bytes += frame->len;
> -			xdp_return_frame(frame);
> -		} else {
> -			struct sk_buff *skb = ptr;
> -
> -			bytes += skb->len;
> -			napi_consume_skb(skb, false);
> -		}
> -		packets++;
> -	}
> +	__free_old_xmit(sq, false, &stats);


Let's use a separate patch for this kind of factoring.


>   
>   	for (i = 0; i < n; i++) {
>   		struct xdp_frame *xdpf = frames[i];
> @@ -600,8 +643,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   	}
>   out:
>   	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.bytes += bytes;
> -	sq->stats.packets += packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.packets += stats.packets;
>   	sq->stats.xdp_tx += n;
>   	sq->stats.xdp_tx_drops += n - nxmit;
>   	sq->stats.kicks += kicks;
> @@ -1426,37 +1469,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>   
>   static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
>   {
> -	unsigned int len;
> -	unsigned int packets = 0;
> -	unsigned int bytes = 0;
> -	void *ptr;
> -
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (likely(!is_xdp_frame(ptr))) {
> -			struct sk_buff *skb = ptr;
> +	struct virtnet_sq_stats stats = {};
>   
> -			pr_debug("Sent skb %p\n", skb);
> -
> -			bytes += skb->len;
> -			napi_consume_skb(skb, in_napi);
> -		} else {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			bytes += frame->len;
> -			xdp_return_frame(frame);
> -		}
> -		packets++;
> -	}
> +	__free_old_xmit(sq, in_napi, &stats);
>   
>   	/* Avoid overhead when no packets have been processed
>   	 * happens when called speculatively from start_xmit.
>   	 */
> -	if (!packets)
> +	if (!stats.packets)
>   		return;
>   
>   	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.bytes += bytes;
> -	sq->stats.packets += packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.packets += stats.packets;
>   	u64_stats_update_end(&sq->stats.syncp);
>   }
>   
> @@ -2575,6 +2600,28 @@ static void virtnet_xsk_check_space(struct send_queue *sq)
>   		netif_stop_subqueue(dev, qnum);
>   }
>   
> +static void virtnet_xsk_complete(struct send_queue *sq, u32 num)
> +{
> +	struct xsk_buff_pool *pool;
> +
> +	rcu_read_lock();
> +
> +	pool = rcu_dereference(sq->xsk.pool);
> +	if (!pool) {
> +		rcu_read_unlock();
> +		return;
> +	}
> +
> +	xsk_tx_completed(pool, num);
> +
> +	rcu_read_unlock();
> +
> +	if (sq->xsk.need_wakeup) {
> +		sq->xsk.need_wakeup = false;
> +		virtqueue_napi_schedule(&sq->napi, sq->vq);
> +	}
> +}
> +
>   static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			    struct xdp_desc *desc)
>   {
> @@ -2613,7 +2660,8 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			offset = 0;
>   	}
>   
> -	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, NULL, GFP_ATOMIC);
> +	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, xsk_to_ptr(desc),
> +				   GFP_ATOMIC);
>   	if (unlikely(err))
>   		sq->xsk.last_desc = *desc;
>   
> @@ -2623,13 +2671,13 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   				  struct xsk_buff_pool *pool,
>   				  unsigned int budget,
> -				  bool in_napi, int *done)
> +				  bool in_napi, int *done,
> +				  struct virtnet_sq_stats *stats)
>   {
>   	struct xdp_desc desc;
>   	int err, packet = 0;
>   	int ret = -EAGAIN;
>   	int need_kick = 0;
> -	int kicks = 0;
>   
>   	if (sq->xsk.last_desc.addr) {
>   		err = virtnet_xsk_xmit(sq, pool, &sq->xsk.last_desc);
> @@ -2665,7 +2713,7 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   		if (need_kick > xsk_kick_thr) {
>   			if (virtqueue_kick_prepare(sq->vq) &&
>   			    virtqueue_notify(sq->vq))
> -				++kicks;
> +				++stats->kicks;
>   
>   			need_kick = 0;
>   		}
> @@ -2675,15 +2723,11 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   		if (need_kick) {
>   			if (virtqueue_kick_prepare(sq->vq) &&
>   			    virtqueue_notify(sq->vq))
> -				++kicks;
> -		}
> -		if (kicks) {
> -			u64_stats_update_begin(&sq->stats.syncp);
> -			sq->stats.kicks += kicks;
> -			u64_stats_update_end(&sq->stats.syncp);
> +				++stats->kicks;
>   		}
>   
>   		*done = packet;
> +		stats->xdp_tx += packet;
>   
>   		xsk_tx_release(pool);
>   	}
> @@ -2694,26 +2738,37 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			   int budget, bool in_napi)
>   {
> +	struct virtnet_sq_stats stats = {};
>   	int done = 0;
>   	int err;
>   
> -	free_old_xmit_skbs(sq, in_napi);
> +	sq->xsk.need_wakeup = false;
> +	__free_old_xmit(sq, in_napi, &stats);
>   
> -	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done);
> +	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done, &stats);
>   	/* -EAGAIN: done == budget
>   	 * -EBUSY: done < budget
>   	 *  0    : done < budget
>   	 */
>   	if (err == -EBUSY) {
> -		free_old_xmit_skbs(sq, in_napi);
> +		__free_old_xmit(sq, in_napi, &stats);
>   
>   		/* If the space is enough, let napi run again. */
>   		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
>   			done = budget;


So I don't see how NAPI can be rescheduled here.

Thanks


> +		else
> +			sq->xsk.need_wakeup = true;
>   	}
>   
>   	virtnet_xsk_check_space(sq);
>   
> +	u64_stats_update_begin(&sq->stats.syncp);
> +	sq->stats.packets += stats.packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.kicks += stats.kicks;
> +	sq->stats.xdp_tx += stats.xdp_tx;
> +	u64_stats_update_end(&sq->stats.syncp);
> +
>   	return done;
>   }
>   
> @@ -2991,9 +3046,9 @@ static void free_unused_bufs(struct virtnet_info *vi)
>   	for (i = 0; i < vi->max_queue_pairs; i++) {
>   		struct virtqueue *vq = vi->sq[i].vq;
>   		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
> -			if (!is_xdp_frame(buf))
> +			if (is_skb_ptr(buf))
>   				dev_kfree_skb(buf);
> -			else
> +			else if (is_xdp_frame(buf))
>   				xdp_return_frame(ptr_to_xdp(buf));
>   		}
>   	}


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
@ 2021-04-06  7:16     ` Jason Wang
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Wang @ 2021-04-06  7:16 UTC (permalink / raw)
  To: Xuan Zhuo, netdev
  Cc: Song Liu, Martin KaFai Lau, Jesper Dangaard Brouer,
	Daniel Borkmann, Michael S. Tsirkin, Yonghong Song,
	John Fastabend, Alexei Starovoitov, Andrii Nakryiko,
	Björn Töpel, Dust Li, Jonathan Lemon, KP Singh,
	Jakub Kicinski, bpf, virtualization, David S. Miller,
	Magnus Karlsson


在 2021/3/31 下午3:11, Xuan Zhuo 写道:
> Based on the last two bit of ptr returned by virtqueue_get_buf, 01
> represents the packet sent by xdp, 10 is the packet sent by xsk, and 00
> is skb by default.
>
> If the xmit work of xsk has not been completed, but the ring is full,
> napi must first exit and wait for the ring to be available, so
> need_wakeup is set. If __free_old_xmit is called first by start_xmit, we
> can quickly wake up napi to execute xsk xmit work.
>
> When recycling, we need to count the number of bytes sent, so put xsk
> desc->len into the ptr pointer. Because ptr does not point to meaningful
> objects in xsk.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Dust Li <dust.li@linux.alibaba.com>


This needs to be squashed into patch 4.


> ---
>   drivers/net/virtio_net.c | 171 ++++++++++++++++++++++++++-------------
>   1 file changed, 113 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index fac7d0020013..8318b89b2971 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -50,6 +50,9 @@ module_param(xsk_kick_thr, int, 0644);
>   #define VIRTIO_XDP_REDIR	BIT(1)
>   
>   #define VIRTIO_XDP_FLAG	BIT(0)
> +#define VIRTIO_XSK_FLAG	BIT(1)
> +
> +#define VIRTIO_XSK_PTR_SHIFT       4
>   
>   static struct virtio_net_hdr_mrg_rxbuf xsk_hdr;
>   
> @@ -147,6 +150,9 @@ struct send_queue {
>   
>   		/* save the desc for next xmit, when xmit fail. */
>   		struct xdp_desc last_desc;
> +
> +		/* xsk wait for tx inter or softirq */
> +		bool need_wakeup;
>   	} xsk;
>   };
>   
> @@ -266,6 +272,12 @@ struct padded_vnet_hdr {
>   
>   static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			   int budget, bool in_napi);
> +static void virtnet_xsk_complete(struct send_queue *sq, u32 num);
> +
> +static bool is_skb_ptr(void *ptr)
> +{
> +	return !((unsigned long)ptr & (VIRTIO_XDP_FLAG | VIRTIO_XSK_FLAG));
> +}
>   
>   static bool is_xdp_frame(void *ptr)
>   {
> @@ -277,11 +289,58 @@ static void *xdp_to_ptr(struct xdp_frame *ptr)
>   	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
>   }
>   
> +static void *xsk_to_ptr(struct xdp_desc *desc)
> +{
> +	/* save the desc len to ptr */
> +	u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
> +
> +	return (void *)(p | VIRTIO_XSK_FLAG);
> +}
> +
> +static void ptr_to_xsk(void *ptr, struct xdp_desc *desc)
> +{
> +	desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
> +}
> +
>   static struct xdp_frame *ptr_to_xdp(void *ptr)
>   {
>   	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
>   }
>   
> +static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +			    struct virtnet_sq_stats *stats)
> +{
> +	unsigned int xsknum = 0;
> +	unsigned int len;
> +	void *ptr;
> +
> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +		if (is_skb_ptr(ptr)) {
> +			struct sk_buff *skb = ptr;
> +
> +			pr_debug("Sent skb %p\n", skb);
> +
> +			stats->bytes += skb->len;
> +			napi_consume_skb(skb, in_napi);
> +		} else if (is_xdp_frame(ptr)) {
> +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +			stats->bytes += frame->len;
> +			xdp_return_frame(frame);
> +		} else {
> +			struct xdp_desc desc;
> +
> +			ptr_to_xsk(ptr, &desc);
> +			stats->bytes += desc.len;
> +			++xsknum;
> +		}
> +		stats->packets++;
> +	}
> +
> +	if (xsknum)
> +		virtnet_xsk_complete(sq, xsknum);
> +}
> +
>   /* Converting between virtqueue no. and kernel tx/rx queue no.
>    * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
>    */
> @@ -543,15 +602,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   			    int n, struct xdp_frame **frames, u32 flags)
>   {
>   	struct virtnet_info *vi = netdev_priv(dev);
> +	struct virtnet_sq_stats stats = {};
>   	struct receive_queue *rq = vi->rq;
>   	struct bpf_prog *xdp_prog;
>   	struct send_queue *sq;
> -	unsigned int len;
> -	int packets = 0;
> -	int bytes = 0;
>   	int nxmit = 0;
>   	int kicks = 0;
> -	void *ptr;
>   	int ret;
>   	int i;
>   
> @@ -570,20 +626,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   	}
>   
>   	/* Free up any pending old buffers before queueing new ones. */
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (likely(is_xdp_frame(ptr))) {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			bytes += frame->len;
> -			xdp_return_frame(frame);
> -		} else {
> -			struct sk_buff *skb = ptr;
> -
> -			bytes += skb->len;
> -			napi_consume_skb(skb, false);
> -		}
> -		packets++;
> -	}
> +	__free_old_xmit(sq, false, &stats);


Let's use a separate patch for this kind of factoring.


>   
>   	for (i = 0; i < n; i++) {
>   		struct xdp_frame *xdpf = frames[i];
> @@ -600,8 +643,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>   	}
>   out:
>   	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.bytes += bytes;
> -	sq->stats.packets += packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.packets += stats.packets;
>   	sq->stats.xdp_tx += n;
>   	sq->stats.xdp_tx_drops += n - nxmit;
>   	sq->stats.kicks += kicks;
> @@ -1426,37 +1469,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>   
>   static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
>   {
> -	unsigned int len;
> -	unsigned int packets = 0;
> -	unsigned int bytes = 0;
> -	void *ptr;
> -
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (likely(!is_xdp_frame(ptr))) {
> -			struct sk_buff *skb = ptr;
> +	struct virtnet_sq_stats stats = {};
>   
> -			pr_debug("Sent skb %p\n", skb);
> -
> -			bytes += skb->len;
> -			napi_consume_skb(skb, in_napi);
> -		} else {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			bytes += frame->len;
> -			xdp_return_frame(frame);
> -		}
> -		packets++;
> -	}
> +	__free_old_xmit(sq, in_napi, &stats);
>   
>   	/* Avoid overhead when no packets have been processed
>   	 * happens when called speculatively from start_xmit.
>   	 */
> -	if (!packets)
> +	if (!stats.packets)
>   		return;
>   
>   	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.bytes += bytes;
> -	sq->stats.packets += packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.packets += stats.packets;
>   	u64_stats_update_end(&sq->stats.syncp);
>   }
>   
> @@ -2575,6 +2600,28 @@ static void virtnet_xsk_check_space(struct send_queue *sq)
>   		netif_stop_subqueue(dev, qnum);
>   }
>   
> +static void virtnet_xsk_complete(struct send_queue *sq, u32 num)
> +{
> +	struct xsk_buff_pool *pool;
> +
> +	rcu_read_lock();
> +
> +	pool = rcu_dereference(sq->xsk.pool);
> +	if (!pool) {
> +		rcu_read_unlock();
> +		return;
> +	}
> +
> +	xsk_tx_completed(pool, num);
> +
> +	rcu_read_unlock();
> +
> +	if (sq->xsk.need_wakeup) {
> +		sq->xsk.need_wakeup = false;
> +		virtqueue_napi_schedule(&sq->napi, sq->vq);
> +	}
> +}
> +
>   static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			    struct xdp_desc *desc)
>   {
> @@ -2613,7 +2660,8 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			offset = 0;
>   	}
>   
> -	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, NULL, GFP_ATOMIC);
> +	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, xsk_to_ptr(desc),
> +				   GFP_ATOMIC);
>   	if (unlikely(err))
>   		sq->xsk.last_desc = *desc;
>   
> @@ -2623,13 +2671,13 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
>   static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   				  struct xsk_buff_pool *pool,
>   				  unsigned int budget,
> -				  bool in_napi, int *done)
> +				  bool in_napi, int *done,
> +				  struct virtnet_sq_stats *stats)
>   {
>   	struct xdp_desc desc;
>   	int err, packet = 0;
>   	int ret = -EAGAIN;
>   	int need_kick = 0;
> -	int kicks = 0;
>   
>   	if (sq->xsk.last_desc.addr) {
>   		err = virtnet_xsk_xmit(sq, pool, &sq->xsk.last_desc);
> @@ -2665,7 +2713,7 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   		if (need_kick > xsk_kick_thr) {
>   			if (virtqueue_kick_prepare(sq->vq) &&
>   			    virtqueue_notify(sq->vq))
> -				++kicks;
> +				++stats->kicks;
>   
>   			need_kick = 0;
>   		}
> @@ -2675,15 +2723,11 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   		if (need_kick) {
>   			if (virtqueue_kick_prepare(sq->vq) &&
>   			    virtqueue_notify(sq->vq))
> -				++kicks;
> -		}
> -		if (kicks) {
> -			u64_stats_update_begin(&sq->stats.syncp);
> -			sq->stats.kicks += kicks;
> -			u64_stats_update_end(&sq->stats.syncp);
> +				++stats->kicks;
>   		}
>   
>   		*done = packet;
> +		stats->xdp_tx += packet;
>   
>   		xsk_tx_release(pool);
>   	}
> @@ -2694,26 +2738,37 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
>   static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
>   			   int budget, bool in_napi)
>   {
> +	struct virtnet_sq_stats stats = {};
>   	int done = 0;
>   	int err;
>   
> -	free_old_xmit_skbs(sq, in_napi);
> +	sq->xsk.need_wakeup = false;
> +	__free_old_xmit(sq, in_napi, &stats);
>   
> -	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done);
> +	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done, &stats);
>   	/* -EAGAIN: done == budget
>   	 * -EBUSY: done < budget
>   	 *  0    : done < budget
>   	 */
>   	if (err == -EBUSY) {
> -		free_old_xmit_skbs(sq, in_napi);
> +		__free_old_xmit(sq, in_napi, &stats);
>   
>   		/* If the space is enough, let napi run again. */
>   		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
>   			done = budget;


So I don't see how NAPI can be rescheduled here.

Thanks


> +		else
> +			sq->xsk.need_wakeup = true;
>   	}
>   
>   	virtnet_xsk_check_space(sq);
>   
> +	u64_stats_update_begin(&sq->stats.syncp);
> +	sq->stats.packets += stats.packets;
> +	sq->stats.bytes += stats.bytes;
> +	sq->stats.kicks += stats.kicks;
> +	sq->stats.xdp_tx += stats.xdp_tx;
> +	u64_stats_update_end(&sq->stats.syncp);
> +
>   	return done;
>   }
>   
> @@ -2991,9 +3046,9 @@ static void free_unused_bufs(struct virtnet_info *vi)
>   	for (i = 0; i < vi->max_queue_pairs; i++) {
>   		struct virtqueue *vq = vi->sq[i].vq;
>   		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
> -			if (!is_xdp_frame(buf))
> +			if (is_skb_ptr(buf))
>   				dev_kfree_skb(buf);
> -			else
> +			else if (is_xdp_frame(buf))
>   				xdp_return_frame(ptr_to_xdp(buf));
>   		}
>   	}

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
@ 2021-03-31 13:34 kernel test robot
  0 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2021-03-31 13:34 UTC (permalink / raw)
  To: kbuild

[-- Attachment #1: Type: text/plain, Size: 2433 bytes --]

CC: kbuild-all(a)lists.01.org
In-Reply-To: <20210331071139.15473-9-xuanzhuo@linux.alibaba.com>
References: <20210331071139.15473-9-xuanzhuo@linux.alibaba.com>
TO: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
TO: netdev(a)vger.kernel.org
CC: "Michael S. Tsirkin" <mst@redhat.com>
CC: Jason Wang <jasowang@redhat.com>
CC: Jakub Kicinski <kuba@kernel.org>
CC: "Björn Töpel" <bjorn@kernel.org>
CC: Magnus Karlsson <magnus.karlsson@intel.com>
CC: Jonathan Lemon <jonathan.lemon@gmail.com>
CC: Alexei Starovoitov <ast@kernel.org>
CC: Daniel Borkmann <daniel@iogearbox.net>
CC: Jesper Dangaard Brouer <hawk@kernel.org>

Hi Xuan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Xuan-Zhuo/virtio-net-support-xdp-socket-zero-copy-xmit/20210331-151437
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 28110056f2d07a576ca045a38f80de051b13582a
:::::: branch date: 6 hours ago
:::::: commit date: 6 hours ago
config: i386-randconfig-m021-20210330 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>

New smatch warnings:
drivers/net/virtio_net.c:295 xsk_to_ptr() warn: should 'desc->len << 4' be a 64 bit type?

Old smatch warnings:
drivers/net/virtio_net.c:1972 virtnet_set_rx_mode() warn: is 'buf' large enough for 'struct virtio_net_ctrl_mac'? 0
drivers/net/virtio_net.c:2961 virtnet_config_changed_work() error: uninitialized symbol 'v'.

vim +295 drivers/net/virtio_net.c

5050471d35d131 Toshiaki Makita 2019-01-29  291  
2f0c0d42a79523 Xuan Zhuo       2021-03-31  292  static void *xsk_to_ptr(struct xdp_desc *desc)
2f0c0d42a79523 Xuan Zhuo       2021-03-31  293  {
2f0c0d42a79523 Xuan Zhuo       2021-03-31  294  	/* save the desc len to ptr */
2f0c0d42a79523 Xuan Zhuo       2021-03-31 @295  	u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
2f0c0d42a79523 Xuan Zhuo       2021-03-31  296  
2f0c0d42a79523 Xuan Zhuo       2021-03-31  297  	return (void *)(p | VIRTIO_XSK_FLAG);
2f0c0d42a79523 Xuan Zhuo       2021-03-31  298  }
2f0c0d42a79523 Xuan Zhuo       2021-03-31  299  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 32483 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
  2021-03-31  7:11 ` [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk Xuan Zhuo
@ 2021-03-31 10:09     ` kernel test robot
  2021-04-06  7:16     ` Jason Wang
  1 sibling, 0 replies; 8+ messages in thread
From: kernel test robot @ 2021-03-31 10:09 UTC (permalink / raw)
  To: Xuan Zhuo, netdev
  Cc: kbuild-all, Michael S. Tsirkin, Jason Wang, Jakub Kicinski,
	Björn Töpel, Magnus Karlsson, Jonathan Lemon,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer

[-- Attachment #1: Type: text/plain, Size: 2079 bytes --]

Hi Xuan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Xuan-Zhuo/virtio-net-support-xdp-socket-zero-copy-xmit/20210331-151437
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 28110056f2d07a576ca045a38f80de051b13582a
config: i386-randconfig-r023-20210330 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/2f0c0d42a79523eef5e0ab4407ff99d94607603e
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Xuan-Zhuo/virtio-net-support-xdp-socket-zero-copy-xmit/20210331-151437
        git checkout 2f0c0d42a79523eef5e0ab4407ff99d94607603e
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   drivers/net/virtio_net.c: In function 'xsk_to_ptr':
>> drivers/net/virtio_net.c:297:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
     297 |  return (void *)(p | VIRTIO_XSK_FLAG);
         |         ^
   drivers/net/virtio_net.c: In function 'ptr_to_xsk':
>> drivers/net/virtio_net.c:302:15: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
     302 |  desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
         |               ^


vim +297 drivers/net/virtio_net.c

   291	
   292	static void *xsk_to_ptr(struct xdp_desc *desc)
   293	{
   294		/* save the desc len to ptr */
   295		u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
   296	
 > 297		return (void *)(p | VIRTIO_XSK_FLAG);
   298	}
   299	
   300	static void ptr_to_xsk(void *ptr, struct xdp_desc *desc)
   301	{
 > 302		desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
   303	}
   304	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 35538 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
@ 2021-03-31 10:09     ` kernel test robot
  0 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2021-03-31 10:09 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 2134 bytes --]

Hi Xuan,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Xuan-Zhuo/virtio-net-support-xdp-socket-zero-copy-xmit/20210331-151437
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 28110056f2d07a576ca045a38f80de051b13582a
config: i386-randconfig-r023-20210330 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/2f0c0d42a79523eef5e0ab4407ff99d94607603e
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Xuan-Zhuo/virtio-net-support-xdp-socket-zero-copy-xmit/20210331-151437
        git checkout 2f0c0d42a79523eef5e0ab4407ff99d94607603e
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

   drivers/net/virtio_net.c: In function 'xsk_to_ptr':
>> drivers/net/virtio_net.c:297:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
     297 |  return (void *)(p | VIRTIO_XSK_FLAG);
         |         ^
   drivers/net/virtio_net.c: In function 'ptr_to_xsk':
>> drivers/net/virtio_net.c:302:15: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast]
     302 |  desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
         |               ^


vim +297 drivers/net/virtio_net.c

   291	
   292	static void *xsk_to_ptr(struct xdp_desc *desc)
   293	{
   294		/* save the desc len to ptr */
   295		u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
   296	
 > 297		return (void *)(p | VIRTIO_XSK_FLAG);
   298	}
   299	
   300	static void ptr_to_xsk(void *ptr, struct xdp_desc *desc)
   301	{
 > 302		desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
   303	}
   304	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 35538 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk
  2021-03-31  7:11 [PATCH net-next v3 0/8] virtio-net support xdp socket zero copy xmit Xuan Zhuo
@ 2021-03-31  7:11 ` Xuan Zhuo
  2021-03-31 10:09     ` kernel test robot
  2021-04-06  7:16     ` Jason Wang
  0 siblings, 2 replies; 8+ messages in thread
From: Xuan Zhuo @ 2021-03-31  7:11 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, David S. Miller, Jakub Kicinski,
	Björn Töpel, Magnus Karlsson, Jonathan Lemon,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, KP Singh, virtualization, bpf, Dust Li

Based on the last two bit of ptr returned by virtqueue_get_buf, 01
represents the packet sent by xdp, 10 is the packet sent by xsk, and 00
is skb by default.

If the xmit work of xsk has not been completed, but the ring is full,
napi must first exit and wait for the ring to be available, so
need_wakeup is set. If __free_old_xmit is called first by start_xmit, we
can quickly wake up napi to execute xsk xmit work.

When recycling, we need to count the number of bytes sent, so put xsk
desc->len into the ptr pointer. Because ptr does not point to meaningful
objects in xsk.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 171 ++++++++++++++++++++++++++-------------
 1 file changed, 113 insertions(+), 58 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fac7d0020013..8318b89b2971 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -50,6 +50,9 @@ module_param(xsk_kick_thr, int, 0644);
 #define VIRTIO_XDP_REDIR	BIT(1)
 
 #define VIRTIO_XDP_FLAG	BIT(0)
+#define VIRTIO_XSK_FLAG	BIT(1)
+
+#define VIRTIO_XSK_PTR_SHIFT       4
 
 static struct virtio_net_hdr_mrg_rxbuf xsk_hdr;
 
@@ -147,6 +150,9 @@ struct send_queue {
 
 		/* save the desc for next xmit, when xmit fail. */
 		struct xdp_desc last_desc;
+
+		/* xsk wait for tx inter or softirq */
+		bool need_wakeup;
 	} xsk;
 };
 
@@ -266,6 +272,12 @@ struct padded_vnet_hdr {
 
 static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
 			   int budget, bool in_napi);
+static void virtnet_xsk_complete(struct send_queue *sq, u32 num);
+
+static bool is_skb_ptr(void *ptr)
+{
+	return !((unsigned long)ptr & (VIRTIO_XDP_FLAG | VIRTIO_XSK_FLAG));
+}
 
 static bool is_xdp_frame(void *ptr)
 {
@@ -277,11 +289,58 @@ static void *xdp_to_ptr(struct xdp_frame *ptr)
 	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
 }
 
+static void *xsk_to_ptr(struct xdp_desc *desc)
+{
+	/* save the desc len to ptr */
+	u64 p = desc->len << VIRTIO_XSK_PTR_SHIFT;
+
+	return (void *)(p | VIRTIO_XSK_FLAG);
+}
+
+static void ptr_to_xsk(void *ptr, struct xdp_desc *desc)
+{
+	desc->len = ((u64)ptr) >> VIRTIO_XSK_PTR_SHIFT;
+}
+
 static struct xdp_frame *ptr_to_xdp(void *ptr)
 {
 	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
 }
 
+static void __free_old_xmit(struct send_queue *sq, bool in_napi,
+			    struct virtnet_sq_stats *stats)
+{
+	unsigned int xsknum = 0;
+	unsigned int len;
+	void *ptr;
+
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (is_skb_ptr(ptr)) {
+			struct sk_buff *skb = ptr;
+
+			pr_debug("Sent skb %p\n", skb);
+
+			stats->bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else if (is_xdp_frame(ptr)) {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			stats->bytes += frame->len;
+			xdp_return_frame(frame);
+		} else {
+			struct xdp_desc desc;
+
+			ptr_to_xsk(ptr, &desc);
+			stats->bytes += desc.len;
+			++xsknum;
+		}
+		stats->packets++;
+	}
+
+	if (xsknum)
+		virtnet_xsk_complete(sq, xsknum);
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -543,15 +602,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 			    int n, struct xdp_frame **frames, u32 flags)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtnet_sq_stats stats = {};
 	struct receive_queue *rq = vi->rq;
 	struct bpf_prog *xdp_prog;
 	struct send_queue *sq;
-	unsigned int len;
-	int packets = 0;
-	int bytes = 0;
 	int nxmit = 0;
 	int kicks = 0;
-	void *ptr;
 	int ret;
 	int i;
 
@@ -570,20 +626,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	}
 
 	/* Free up any pending old buffers before queueing new ones. */
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (likely(is_xdp_frame(ptr))) {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			bytes += frame->len;
-			xdp_return_frame(frame);
-		} else {
-			struct sk_buff *skb = ptr;
-
-			bytes += skb->len;
-			napi_consume_skb(skb, false);
-		}
-		packets++;
-	}
+	__free_old_xmit(sq, false, &stats);
 
 	for (i = 0; i < n; i++) {
 		struct xdp_frame *xdpf = frames[i];
@@ -600,8 +643,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	}
 out:
 	u64_stats_update_begin(&sq->stats.syncp);
-	sq->stats.bytes += bytes;
-	sq->stats.packets += packets;
+	sq->stats.bytes += stats.bytes;
+	sq->stats.packets += stats.packets;
 	sq->stats.xdp_tx += n;
 	sq->stats.xdp_tx_drops += n - nxmit;
 	sq->stats.kicks += kicks;
@@ -1426,37 +1469,19 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
-	unsigned int len;
-	unsigned int packets = 0;
-	unsigned int bytes = 0;
-	void *ptr;
-
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (likely(!is_xdp_frame(ptr))) {
-			struct sk_buff *skb = ptr;
+	struct virtnet_sq_stats stats = {};
 
-			pr_debug("Sent skb %p\n", skb);
-
-			bytes += skb->len;
-			napi_consume_skb(skb, in_napi);
-		} else {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			bytes += frame->len;
-			xdp_return_frame(frame);
-		}
-		packets++;
-	}
+	__free_old_xmit(sq, in_napi, &stats);
 
 	/* Avoid overhead when no packets have been processed
 	 * happens when called speculatively from start_xmit.
 	 */
-	if (!packets)
+	if (!stats.packets)
 		return;
 
 	u64_stats_update_begin(&sq->stats.syncp);
-	sq->stats.bytes += bytes;
-	sq->stats.packets += packets;
+	sq->stats.bytes += stats.bytes;
+	sq->stats.packets += stats.packets;
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
@@ -2575,6 +2600,28 @@ static void virtnet_xsk_check_space(struct send_queue *sq)
 		netif_stop_subqueue(dev, qnum);
 }
 
+static void virtnet_xsk_complete(struct send_queue *sq, u32 num)
+{
+	struct xsk_buff_pool *pool;
+
+	rcu_read_lock();
+
+	pool = rcu_dereference(sq->xsk.pool);
+	if (!pool) {
+		rcu_read_unlock();
+		return;
+	}
+
+	xsk_tx_completed(pool, num);
+
+	rcu_read_unlock();
+
+	if (sq->xsk.need_wakeup) {
+		sq->xsk.need_wakeup = false;
+		virtqueue_napi_schedule(&sq->napi, sq->vq);
+	}
+}
+
 static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 			    struct xdp_desc *desc)
 {
@@ -2613,7 +2660,8 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 			offset = 0;
 	}
 
-	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, NULL, GFP_ATOMIC);
+	err = virtqueue_add_outbuf(sq->vq, sq->sg, n, xsk_to_ptr(desc),
+				   GFP_ATOMIC);
 	if (unlikely(err))
 		sq->xsk.last_desc = *desc;
 
@@ -2623,13 +2671,13 @@ static int virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
 static int virtnet_xsk_xmit_batch(struct send_queue *sq,
 				  struct xsk_buff_pool *pool,
 				  unsigned int budget,
-				  bool in_napi, int *done)
+				  bool in_napi, int *done,
+				  struct virtnet_sq_stats *stats)
 {
 	struct xdp_desc desc;
 	int err, packet = 0;
 	int ret = -EAGAIN;
 	int need_kick = 0;
-	int kicks = 0;
 
 	if (sq->xsk.last_desc.addr) {
 		err = virtnet_xsk_xmit(sq, pool, &sq->xsk.last_desc);
@@ -2665,7 +2713,7 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
 		if (need_kick > xsk_kick_thr) {
 			if (virtqueue_kick_prepare(sq->vq) &&
 			    virtqueue_notify(sq->vq))
-				++kicks;
+				++stats->kicks;
 
 			need_kick = 0;
 		}
@@ -2675,15 +2723,11 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
 		if (need_kick) {
 			if (virtqueue_kick_prepare(sq->vq) &&
 			    virtqueue_notify(sq->vq))
-				++kicks;
-		}
-		if (kicks) {
-			u64_stats_update_begin(&sq->stats.syncp);
-			sq->stats.kicks += kicks;
-			u64_stats_update_end(&sq->stats.syncp);
+				++stats->kicks;
 		}
 
 		*done = packet;
+		stats->xdp_tx += packet;
 
 		xsk_tx_release(pool);
 	}
@@ -2694,26 +2738,37 @@ static int virtnet_xsk_xmit_batch(struct send_queue *sq,
 static int virtnet_xsk_run(struct send_queue *sq, struct xsk_buff_pool *pool,
 			   int budget, bool in_napi)
 {
+	struct virtnet_sq_stats stats = {};
 	int done = 0;
 	int err;
 
-	free_old_xmit_skbs(sq, in_napi);
+	sq->xsk.need_wakeup = false;
+	__free_old_xmit(sq, in_napi, &stats);
 
-	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done);
+	err = virtnet_xsk_xmit_batch(sq, pool, budget, in_napi, &done, &stats);
 	/* -EAGAIN: done == budget
 	 * -EBUSY: done < budget
 	 *  0    : done < budget
 	 */
 	if (err == -EBUSY) {
-		free_old_xmit_skbs(sq, in_napi);
+		__free_old_xmit(sq, in_napi, &stats);
 
 		/* If the space is enough, let napi run again. */
 		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
 			done = budget;
+		else
+			sq->xsk.need_wakeup = true;
 	}
 
 	virtnet_xsk_check_space(sq);
 
+	u64_stats_update_begin(&sq->stats.syncp);
+	sq->stats.packets += stats.packets;
+	sq->stats.bytes += stats.bytes;
+	sq->stats.kicks += stats.kicks;
+	sq->stats.xdp_tx += stats.xdp_tx;
+	u64_stats_update_end(&sq->stats.syncp);
+
 	return done;
 }
 
@@ -2991,9 +3046,9 @@ static void free_unused_bufs(struct virtnet_info *vi)
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		struct virtqueue *vq = vi->sq[i].vq;
 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-			if (!is_xdp_frame(buf))
+			if (is_skb_ptr(buf))
 				dev_kfree_skb(buf);
-			else
+			else if (is_xdp_frame(buf))
 				xdp_return_frame(ptr_to_xdp(buf));
 		}
 	}
-- 
2.31.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-04-07  9:31 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1617787566.555242-6-xuanzhuo@linux.alibaba.com>
2021-04-07  9:30 ` [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk Jason Wang
2021-04-07  9:30   ` Jason Wang
2021-03-31 13:34 kernel test robot
  -- strict thread matches above, loose matches on Subject: below --
2021-03-31  7:11 [PATCH net-next v3 0/8] virtio-net support xdp socket zero copy xmit Xuan Zhuo
2021-03-31  7:11 ` [PATCH net-next v3 8/8] virtio-net: free old xmit handle xsk Xuan Zhuo
2021-03-31 10:09   ` kernel test robot
2021-03-31 10:09     ` kernel test robot
2021-04-06  7:16   ` Jason Wang
2021-04-06  7:16     ` Jason Wang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.