All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Jason Wang" <jasowang@redhat.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Andrii Nakryiko" <andrii@kernel.org>,
	"Martin KaFai Lau" <kafai@fb.com>,
	"Song Liu" <songliubraving@fb.com>, "Yonghong Song" <yhs@fb.com>,
	"KP Singh" <kpsingh@kernel.org>,
	"Xuan Zhuo" <xuanzhuo@linux.alibaba.com>,
	virtualization@lists.linux-foundation.org, bpf@vger.kernel.org,
	"dust . li" <dust.li@linux.alibaba.com>
Subject: [PATCH net-next v5 08/15] virtio-net: split the receive_mergeable function
Date: Thu, 10 Jun 2021 16:22:02 +0800	[thread overview]
Message-ID: <20210610082209.91487-9-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20210610082209.91487-1-xuanzhuo@linux.alibaba.com>

receive_mergeable() is too complicated, so this function is split here.
One is to make the function more readable. On the other hand, the two
independent functions will be called separately in subsequent patches.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 181 ++++++++++++++++++++++++---------------
 1 file changed, 111 insertions(+), 70 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3fd87bf2b2ad..989aba600e63 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -733,6 +733,109 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
 	return NULL;
 }
 
+static void merge_drop_follow_bufs(struct net_device *dev,
+				   struct receive_queue *rq,
+				   u16 num_buf,
+				   struct virtnet_rq_stats *stats)
+{
+	struct page *page;
+	unsigned int len;
+	void *buf;
+
+	while (num_buf-- > 1) {
+		buf = virtqueue_get_buf(rq->vq, &len);
+		if (unlikely(!buf)) {
+			pr_debug("%s: rx error: %d buffers missing\n",
+				 dev->name, num_buf);
+			dev->stats.rx_length_errors++;
+			break;
+		}
+		stats->bytes += len;
+		page = virt_to_head_page(buf);
+		put_page(page);
+	}
+}
+
+static struct sk_buff *merge_receive_follow_bufs(struct net_device *dev,
+						 struct virtnet_info *vi,
+						 struct receive_queue *rq,
+						 struct sk_buff *head_skb,
+						 u16 num_buf,
+						 struct virtnet_rq_stats *stats)
+{
+	struct sk_buff *curr_skb;
+	unsigned int truesize;
+	unsigned int len, num;
+	struct page *page;
+	void *buf, *ctx;
+	int offset;
+
+	curr_skb = head_skb;
+	num = num_buf;
+
+	while (--num_buf) {
+		int num_skb_frags;
+
+		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+		if (unlikely(!buf)) {
+			pr_debug("%s: rx error: %d buffers out of %d missing\n",
+				 dev->name, num_buf, num);
+			dev->stats.rx_length_errors++;
+			goto err_buf;
+		}
+
+		stats->bytes += len;
+		page = virt_to_head_page(buf);
+
+		truesize = mergeable_ctx_to_truesize(ctx);
+		if (unlikely(len > truesize)) {
+			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+				 dev->name, len, (unsigned long)ctx);
+			dev->stats.rx_length_errors++;
+			goto err_skb;
+		}
+
+		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+			if (unlikely(!nskb))
+				goto err_skb;
+			if (curr_skb == head_skb)
+				skb_shinfo(curr_skb)->frag_list = nskb;
+			else
+				curr_skb->next = nskb;
+			curr_skb = nskb;
+			head_skb->truesize += nskb->truesize;
+			num_skb_frags = 0;
+		}
+		if (curr_skb != head_skb) {
+			head_skb->data_len += len;
+			head_skb->len += len;
+			head_skb->truesize += truesize;
+		}
+		offset = buf - page_address(page);
+		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+			put_page(page);
+			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+					     len, truesize);
+		} else {
+			skb_add_rx_frag(curr_skb, num_skb_frags, page,
+					offset, len, truesize);
+		}
+	}
+
+	return head_skb;
+
+err_skb:
+	put_page(page);
+	merge_drop_follow_bufs(dev, rq, num_buf, stats);
+err_buf:
+	stats->drops++;
+	dev_kfree_skb(head_skb);
+	return NULL;
+}
+
 static struct sk_buff *receive_small(struct net_device *dev,
 				     struct virtnet_info *vi,
 				     struct receive_queue *rq,
@@ -909,7 +1012,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
 	struct page *page = virt_to_head_page(buf);
 	int offset = buf - page_address(page);
-	struct sk_buff *head_skb, *curr_skb;
+	struct sk_buff *head_skb;
 	struct bpf_prog *xdp_prog;
 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
@@ -1054,65 +1157,15 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 
 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
 			       metasize, !!headroom);
-	curr_skb = head_skb;
-
-	if (unlikely(!curr_skb))
+	if (unlikely(!head_skb))
 		goto err_skb;
-	while (--num_buf) {
-		int num_skb_frags;
 
-		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
-		if (unlikely(!buf)) {
-			pr_debug("%s: rx error: %d buffers out of %d missing\n",
-				 dev->name, num_buf,
-				 virtio16_to_cpu(vi->vdev,
-						 hdr->num_buffers));
-			dev->stats.rx_length_errors++;
-			goto err_buf;
-		}
-
-		stats->bytes += len;
-		page = virt_to_head_page(buf);
-
-		truesize = mergeable_ctx_to_truesize(ctx);
-		if (unlikely(len > truesize)) {
-			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-				 dev->name, len, (unsigned long)ctx);
-			dev->stats.rx_length_errors++;
-			goto err_skb;
-		}
-
-		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
-		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
-			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-
-			if (unlikely(!nskb))
-				goto err_skb;
-			if (curr_skb == head_skb)
-				skb_shinfo(curr_skb)->frag_list = nskb;
-			else
-				curr_skb->next = nskb;
-			curr_skb = nskb;
-			head_skb->truesize += nskb->truesize;
-			num_skb_frags = 0;
-		}
-		if (curr_skb != head_skb) {
-			head_skb->data_len += len;
-			head_skb->len += len;
-			head_skb->truesize += truesize;
-		}
-		offset = buf - page_address(page);
-		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
-			put_page(page);
-			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
-					     len, truesize);
-		} else {
-			skb_add_rx_frag(curr_skb, num_skb_frags, page,
-					offset, len, truesize);
-		}
-	}
+	if (num_buf > 1)
+		head_skb = merge_receive_follow_bufs(dev, vi, rq, head_skb,
+						     num_buf, stats);
+	if (head_skb)
+		ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 
-	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 	return head_skb;
 
 err_xdp:
@@ -1120,19 +1173,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 	stats->xdp_drops++;
 err_skb:
 	put_page(page);
-	while (num_buf-- > 1) {
-		buf = virtqueue_get_buf(rq->vq, &len);
-		if (unlikely(!buf)) {
-			pr_debug("%s: rx error: %d buffers missing\n",
-				 dev->name, num_buf);
-			dev->stats.rx_length_errors++;
-			break;
-		}
-		stats->bytes += len;
-		page = virt_to_head_page(buf);
-		put_page(page);
-	}
-err_buf:
+	merge_drop_follow_bufs(dev, rq, num_buf, stats);
 	stats->drops++;
 	dev_kfree_skb(head_skb);
 xdp_xmit:
-- 
2.31.0


WARNING: multiple messages have this Message-ID (diff)
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "Song Liu" <songliubraving@fb.com>,
	"Martin KaFai Lau" <kafai@fb.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Yonghong Song" <yhs@fb.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Andrii Nakryiko" <andrii@kernel.org>,
	"Björn Töpel" <bjorn@kernel.org>,
	"dust . li" <dust.li@linux.alibaba.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"KP Singh" <kpsingh@kernel.org>,
	"Jakub Kicinski" <kuba@kernel.org>,
	bpf@vger.kernel.org, virtualization@lists.linux-foundation.org,
	"David S. Miller" <davem@davemloft.net>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>
Subject: [PATCH net-next v5 08/15] virtio-net: split the receive_mergeable function
Date: Thu, 10 Jun 2021 16:22:02 +0800	[thread overview]
Message-ID: <20210610082209.91487-9-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20210610082209.91487-1-xuanzhuo@linux.alibaba.com>

receive_mergeable() is too complicated, so this function is split here.
One is to make the function more readable. On the other hand, the two
independent functions will be called separately in subsequent patches.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 181 ++++++++++++++++++++++++---------------
 1 file changed, 111 insertions(+), 70 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3fd87bf2b2ad..989aba600e63 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -733,6 +733,109 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
 	return NULL;
 }
 
+static void merge_drop_follow_bufs(struct net_device *dev,
+				   struct receive_queue *rq,
+				   u16 num_buf,
+				   struct virtnet_rq_stats *stats)
+{
+	struct page *page;
+	unsigned int len;
+	void *buf;
+
+	while (num_buf-- > 1) {
+		buf = virtqueue_get_buf(rq->vq, &len);
+		if (unlikely(!buf)) {
+			pr_debug("%s: rx error: %d buffers missing\n",
+				 dev->name, num_buf);
+			dev->stats.rx_length_errors++;
+			break;
+		}
+		stats->bytes += len;
+		page = virt_to_head_page(buf);
+		put_page(page);
+	}
+}
+
+static struct sk_buff *merge_receive_follow_bufs(struct net_device *dev,
+						 struct virtnet_info *vi,
+						 struct receive_queue *rq,
+						 struct sk_buff *head_skb,
+						 u16 num_buf,
+						 struct virtnet_rq_stats *stats)
+{
+	struct sk_buff *curr_skb;
+	unsigned int truesize;
+	unsigned int len, num;
+	struct page *page;
+	void *buf, *ctx;
+	int offset;
+
+	curr_skb = head_skb;
+	num = num_buf;
+
+	while (--num_buf) {
+		int num_skb_frags;
+
+		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+		if (unlikely(!buf)) {
+			pr_debug("%s: rx error: %d buffers out of %d missing\n",
+				 dev->name, num_buf, num);
+			dev->stats.rx_length_errors++;
+			goto err_buf;
+		}
+
+		stats->bytes += len;
+		page = virt_to_head_page(buf);
+
+		truesize = mergeable_ctx_to_truesize(ctx);
+		if (unlikely(len > truesize)) {
+			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+				 dev->name, len, (unsigned long)ctx);
+			dev->stats.rx_length_errors++;
+			goto err_skb;
+		}
+
+		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+			if (unlikely(!nskb))
+				goto err_skb;
+			if (curr_skb == head_skb)
+				skb_shinfo(curr_skb)->frag_list = nskb;
+			else
+				curr_skb->next = nskb;
+			curr_skb = nskb;
+			head_skb->truesize += nskb->truesize;
+			num_skb_frags = 0;
+		}
+		if (curr_skb != head_skb) {
+			head_skb->data_len += len;
+			head_skb->len += len;
+			head_skb->truesize += truesize;
+		}
+		offset = buf - page_address(page);
+		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+			put_page(page);
+			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+					     len, truesize);
+		} else {
+			skb_add_rx_frag(curr_skb, num_skb_frags, page,
+					offset, len, truesize);
+		}
+	}
+
+	return head_skb;
+
+err_skb:
+	put_page(page);
+	merge_drop_follow_bufs(dev, rq, num_buf, stats);
+err_buf:
+	stats->drops++;
+	dev_kfree_skb(head_skb);
+	return NULL;
+}
+
 static struct sk_buff *receive_small(struct net_device *dev,
 				     struct virtnet_info *vi,
 				     struct receive_queue *rq,
@@ -909,7 +1012,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
 	struct page *page = virt_to_head_page(buf);
 	int offset = buf - page_address(page);
-	struct sk_buff *head_skb, *curr_skb;
+	struct sk_buff *head_skb;
 	struct bpf_prog *xdp_prog;
 	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
 	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
@@ -1054,65 +1157,15 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 
 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
 			       metasize, !!headroom);
-	curr_skb = head_skb;
-
-	if (unlikely(!curr_skb))
+	if (unlikely(!head_skb))
 		goto err_skb;
-	while (--num_buf) {
-		int num_skb_frags;
 
-		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
-		if (unlikely(!buf)) {
-			pr_debug("%s: rx error: %d buffers out of %d missing\n",
-				 dev->name, num_buf,
-				 virtio16_to_cpu(vi->vdev,
-						 hdr->num_buffers));
-			dev->stats.rx_length_errors++;
-			goto err_buf;
-		}
-
-		stats->bytes += len;
-		page = virt_to_head_page(buf);
-
-		truesize = mergeable_ctx_to_truesize(ctx);
-		if (unlikely(len > truesize)) {
-			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
-				 dev->name, len, (unsigned long)ctx);
-			dev->stats.rx_length_errors++;
-			goto err_skb;
-		}
-
-		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
-		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
-			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-
-			if (unlikely(!nskb))
-				goto err_skb;
-			if (curr_skb == head_skb)
-				skb_shinfo(curr_skb)->frag_list = nskb;
-			else
-				curr_skb->next = nskb;
-			curr_skb = nskb;
-			head_skb->truesize += nskb->truesize;
-			num_skb_frags = 0;
-		}
-		if (curr_skb != head_skb) {
-			head_skb->data_len += len;
-			head_skb->len += len;
-			head_skb->truesize += truesize;
-		}
-		offset = buf - page_address(page);
-		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
-			put_page(page);
-			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
-					     len, truesize);
-		} else {
-			skb_add_rx_frag(curr_skb, num_skb_frags, page,
-					offset, len, truesize);
-		}
-	}
+	if (num_buf > 1)
+		head_skb = merge_receive_follow_bufs(dev, vi, rq, head_skb,
+						     num_buf, stats);
+	if (head_skb)
+		ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 
-	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 	return head_skb;
 
 err_xdp:
@@ -1120,19 +1173,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 	stats->xdp_drops++;
 err_skb:
 	put_page(page);
-	while (num_buf-- > 1) {
-		buf = virtqueue_get_buf(rq->vq, &len);
-		if (unlikely(!buf)) {
-			pr_debug("%s: rx error: %d buffers missing\n",
-				 dev->name, num_buf);
-			dev->stats.rx_length_errors++;
-			break;
-		}
-		stats->bytes += len;
-		page = virt_to_head_page(buf);
-		put_page(page);
-	}
-err_buf:
+	merge_drop_follow_bufs(dev, rq, num_buf, stats);
 	stats->drops++;
 	dev_kfree_skb(head_skb);
 xdp_xmit:
-- 
2.31.0

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2021-06-10  8:22 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-10  8:21 [PATCH net-next v5 00/15] virtio-net: support xdp socket zero copy Xuan Zhuo
2021-06-10  8:21 ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 01/15] netdevice: priv_flags extend to 64bit Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 02/15] netdevice: add priv_flags IFF_NOT_USE_DMA_ADDR Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 03/15] virtio-net: " Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-16  9:27   ` Jason Wang
2021-06-16  9:27     ` Jason Wang
2021-06-16 10:27     ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 04/15] xsk: XDP_SETUP_XSK_POOL support option IFF_NOT_USE_DMA_ADDR Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 05/15] virtio: support virtqueue_detach_unused_buf_ctx Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-17  2:48   ` kernel test robot
2021-06-17  2:48     ` kernel test robot
2021-06-10  8:22 ` [PATCH net-next v5 06/15] virtio-net: unify the code for recycling the xmit ptr Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  2:42   ` Jason Wang
2021-06-16  2:42     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 07/15] virtio-net: standalone virtnet_aloc_frag function Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  2:45   ` Jason Wang
2021-06-16  2:45     ` Jason Wang
2021-06-10  8:22 ` Xuan Zhuo [this message]
2021-06-10  8:22   ` [PATCH net-next v5 08/15] virtio-net: split the receive_mergeable function Xuan Zhuo
2021-06-16  7:33   ` Jason Wang
2021-06-16  7:33     ` Jason Wang
2021-06-16  7:52     ` Xuan Zhuo
2021-06-10  8:22 ` [PATCH net-next v5 09/15] virtio-net: virtnet_poll_tx support budget check Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-10  8:22 ` [PATCH net-next v5 10/15] virtio-net: independent directory Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  7:34   ` Jason Wang
2021-06-16  7:34     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 11/15] virtio-net: move to virtio_net.h Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  7:35   ` Jason Wang
2021-06-16  7:35     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 12/15] virtio-net: support AF_XDP zc tx Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  9:26   ` Jason Wang
2021-06-16  9:26     ` Jason Wang
2021-06-16 10:10     ` Magnus Karlsson
2021-06-16 10:19     ` Xuan Zhuo
2021-06-16 12:51       ` Jason Wang
2021-06-16 12:51         ` Jason Wang
2021-06-16 12:57         ` Xuan Zhuo
2021-06-17  2:36           ` Jason Wang
2021-06-17  2:36             ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 13/15] virtio-net: support AF_XDP zc rx Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  2:48   ` kernel test robot
2021-06-17  2:48     ` kernel test robot
2021-06-17  3:23   ` Jason Wang
2021-06-17  3:23     ` Jason Wang
2021-06-17  5:53     ` Xuan Zhuo
2021-06-17  6:03       ` Jason Wang
2021-06-17  6:03         ` Jason Wang
2021-06-17  6:37         ` Xuan Zhuo
2021-06-17  6:58           ` Jason Wang
2021-06-17  6:58             ` Jason Wang
2021-06-21  3:26   ` kernel test robot
2021-06-21  3:26     ` kernel test robot
2021-06-10  8:22 ` [PATCH net-next v5 14/15] virtio-net: xsk direct xmit inside xsk wakeup Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  3:07   ` Jason Wang
2021-06-17  3:07     ` Jason Wang
2021-06-17  5:55     ` Xuan Zhuo
2021-06-17  6:01       ` Jason Wang
2021-06-17  6:01         ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 15/15] virtio-net: xsk zero copy xmit kick by threshold Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  3:08   ` Jason Wang
2021-06-17  3:08     ` Jason Wang
2021-06-17  5:56     ` Xuan Zhuo
2021-06-17  6:00       ` Jason Wang
2021-06-17  6:00         ` Jason Wang
2021-06-17  6:43         ` Xuan Zhuo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210610082209.91487-9-xuanzhuo@linux.alibaba.com \
    --to=xuanzhuo@linux.alibaba.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=dust.li@linux.alibaba.com \
    --cc=hawk@kernel.org \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=kuba@kernel.org \
    --cc=magnus.karlsson@intel.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=songliubraving@fb.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.