All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "Petr Machata" <petrm@nvidia.com>,
	"Menglong Dong" <imagedong@tencent.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Eric Dumazet" <edumazet@google.com>,
	"Kuniyuki Iwashima" <kuniyu@amazon.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	bpf@vger.kernel.org, "Paolo Abeni" <pabeni@redhat.com>,
	virtualization@lists.linux-foundation.org,
	"David S. Miller" <davem@davemloft.net>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>
Subject: [PATCH 10/33] xsk: support virtio DMA map
Date: Thu,  2 Feb 2023 19:00:35 +0800	[thread overview]
Message-ID: <20230202110058.130695-11-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20230202110058.130695-1-xuanzhuo@linux.alibaba.com>

When device is a virtio device, use virtio's DMA interface.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 net/xdp/xsk_buff_pool.c | 59 +++++++++++++++++++++++++++++++----------
 1 file changed, 45 insertions(+), 14 deletions(-)

diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 78e325e195fa..e2785aca8396 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -3,6 +3,7 @@
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
 #include <net/xdp_sock_drv.h>
+#include <linux/virtio.h>
 
 #include "xsk_queue.h"
 #include "xdp_umem.h"
@@ -334,8 +335,12 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
 		dma = &dma_map->dma_pages[i];
 		if (*dma) {
 			*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
-			dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL, attrs);
+			if (is_virtio_device(dma_map->dev))
+				virtio_dma_unmap(dma_map->dev, *dma, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL);
+			else
+				dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
+						     DMA_BIDIRECTIONAL, attrs);
 			*dma = 0;
 		}
 	}
@@ -435,22 +440,40 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 		return 0;
 	}
 
-	pool->dma_sync_for_cpu = dma_sync_for_cpu;
-	pool->dma_sync_for_device = dma_sync_for_device;
+	if (is_virtio_device(dev)) {
+		pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
+		pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;
+
+	} else {
+		pool->dma_sync_for_cpu = dma_sync_for_cpu;
+		pool->dma_sync_for_device = dma_sync_for_device;
+	}
 
 	dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
 	if (!dma_map)
 		return -ENOMEM;
 
 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
-		dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
-					 DMA_BIDIRECTIONAL, attrs);
-		if (dma_mapping_error(dev, dma)) {
-			__xp_dma_unmap(dma_map, attrs);
-			return -ENOMEM;
+		if (is_virtio_device(dev)) {
+			dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
+						  DMA_BIDIRECTIONAL);
+
+			if (virtio_dma_mapping_error(dev, dma))
+				goto err;
+
+			if (virtio_dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
+
+		} else {
+			dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL, attrs);
+
+			if (dma_mapping_error(dev, dma))
+				goto err;
+
+			if (dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
 		}
-		if (dma_need_sync(dev, dma))
-			dma_map->dma_need_sync = true;
 		dma_map->dma_pages[i] = dma;
 	}
 
@@ -464,6 +487,9 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 	}
 
 	return 0;
+err:
+	__xp_dma_unmap(dma_map, attrs);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(xp_dma_map);
 
@@ -546,9 +572,14 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 	xskb->xdp.data_meta = xskb->xdp.data;
 
 	if (pool->dma_need_sync) {
-		dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
-						 pool->frame_len,
-						 DMA_BIDIRECTIONAL);
+		if (is_virtio_device(pool->dev))
+			virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
+								pool->frame_len,
+								DMA_BIDIRECTIONAL);
+		else
+			dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+							 pool->frame_len,
+							 DMA_BIDIRECTIONAL);
 	}
 	return &xskb->xdp;
 }
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

WARNING: multiple messages have this Message-ID (diff)
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Jason Wang" <jasowang@redhat.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	"Menglong Dong" <imagedong@tencent.com>,
	"Kuniyuki Iwashima" <kuniyu@amazon.com>,
	"Petr Machata" <petrm@nvidia.com>,
	virtualization@lists.linux-foundation.org, bpf@vger.kernel.org
Subject: [PATCH 10/33] xsk: support virtio DMA map
Date: Thu,  2 Feb 2023 19:00:35 +0800	[thread overview]
Message-ID: <20230202110058.130695-11-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20230202110058.130695-1-xuanzhuo@linux.alibaba.com>

When device is a virtio device, use virtio's DMA interface.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 net/xdp/xsk_buff_pool.c | 59 +++++++++++++++++++++++++++++++----------
 1 file changed, 45 insertions(+), 14 deletions(-)

diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 78e325e195fa..e2785aca8396 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -3,6 +3,7 @@
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
 #include <net/xdp_sock_drv.h>
+#include <linux/virtio.h>
 
 #include "xsk_queue.h"
 #include "xdp_umem.h"
@@ -334,8 +335,12 @@ static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
 		dma = &dma_map->dma_pages[i];
 		if (*dma) {
 			*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
-			dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
-					     DMA_BIDIRECTIONAL, attrs);
+			if (is_virtio_device(dma_map->dev))
+				virtio_dma_unmap(dma_map->dev, *dma, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL);
+			else
+				dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
+						     DMA_BIDIRECTIONAL, attrs);
 			*dma = 0;
 		}
 	}
@@ -435,22 +440,40 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 		return 0;
 	}
 
-	pool->dma_sync_for_cpu = dma_sync_for_cpu;
-	pool->dma_sync_for_device = dma_sync_for_device;
+	if (is_virtio_device(dev)) {
+		pool->dma_sync_for_cpu = virtio_dma_sync_signle_range_for_cpu;
+		pool->dma_sync_for_device = virtio_dma_sync_signle_range_for_device;
+
+	} else {
+		pool->dma_sync_for_cpu = dma_sync_for_cpu;
+		pool->dma_sync_for_device = dma_sync_for_device;
+	}
 
 	dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
 	if (!dma_map)
 		return -ENOMEM;
 
 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
-		dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
-					 DMA_BIDIRECTIONAL, attrs);
-		if (dma_mapping_error(dev, dma)) {
-			__xp_dma_unmap(dma_map, attrs);
-			return -ENOMEM;
+		if (is_virtio_device(dev)) {
+			dma = virtio_dma_map_page(dev, pages[i], 0, PAGE_SIZE,
+						  DMA_BIDIRECTIONAL);
+
+			if (virtio_dma_mapping_error(dev, dma))
+				goto err;
+
+			if (virtio_dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
+
+		} else {
+			dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
+						 DMA_BIDIRECTIONAL, attrs);
+
+			if (dma_mapping_error(dev, dma))
+				goto err;
+
+			if (dma_need_sync(dev, dma))
+				dma_map->dma_need_sync = true;
 		}
-		if (dma_need_sync(dev, dma))
-			dma_map->dma_need_sync = true;
 		dma_map->dma_pages[i] = dma;
 	}
 
@@ -464,6 +487,9 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 	}
 
 	return 0;
+err:
+	__xp_dma_unmap(dma_map, attrs);
+	return -ENOMEM;
 }
 EXPORT_SYMBOL(xp_dma_map);
 
@@ -546,9 +572,14 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 	xskb->xdp.data_meta = xskb->xdp.data;
 
 	if (pool->dma_need_sync) {
-		dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
-						 pool->frame_len,
-						 DMA_BIDIRECTIONAL);
+		if (is_virtio_device(pool->dev))
+			virtio_dma_sync_signle_range_for_device(pool->dev, xskb->dma, 0,
+								pool->frame_len,
+								DMA_BIDIRECTIONAL);
+		else
+			dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+							 pool->frame_len,
+							 DMA_BIDIRECTIONAL);
 	}
 	return &xskb->xdp;
 }
-- 
2.32.0.3.g01195cf9f


  parent reply	other threads:[~2023-02-02 11:01 UTC|newest]

Thread overview: 155+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-02 11:00 [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:00 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 01/33] virtio_ring: virtqueue_add() support premapped Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 02/33] virtio_ring: split: virtqueue_add_split() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 03/33] virtio_ring: packed: virtqueue_add_packed() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:16   ` Michael S. Tsirkin
2023-02-03  9:16     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 04/33] virtio_ring: introduce virtqueue_add_outbuf_premapped() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 05/33] virtio_ring: introduce virtqueue_add_inbuf_premapped() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 06/33] virtio_ring: introduce virtqueue_reset() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:05   ` Michael S. Tsirkin
2023-02-03  9:05     ` Michael S. Tsirkin
2023-02-03  9:09     ` Xuan Zhuo
2023-02-03  9:09       ` Xuan Zhuo
2023-02-13 12:15       ` Michael S. Tsirkin
2023-02-13 12:15         ` Michael S. Tsirkin
2023-02-14  1:53         ` Xuan Zhuo
2023-02-14  1:53           ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 07/33] virtio_ring: add api virtio_dma_map() for advance dma Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:07   ` Michael S. Tsirkin
2023-02-03  9:07     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 08/33] virtio_ring: introduce dma sync api for virtio Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 12:44   ` Magnus Karlsson
2023-02-03  9:24   ` Michael S. Tsirkin
2023-02-03  9:24     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 09/33] xsk: xsk_buff_pool add callback for dma_sync Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 12:51   ` Magnus Karlsson
2023-02-03  7:01     ` Xuan Zhuo
2023-02-03  7:01       ` Xuan Zhuo
2023-02-02 11:00 ` Xuan Zhuo [this message]
2023-02-02 11:00   ` [PATCH 10/33] xsk: support virtio DMA map Xuan Zhuo
2023-02-05 22:04   ` kernel test robot
2023-02-05 22:04     ` kernel test robot
2023-02-02 11:00 ` [PATCH 11/33] virtio_net: rename free_old_xmit_skbs to free_old_xmit Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 12/33] virtio_net: unify the code for recycling the xmit ptr Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 13/33] virtio_net: virtnet_poll_tx support rescheduled Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 14/33] virtio_net: independent directory Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 15/33] virtio_net: move to virtio_net.h Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:53   ` Michael S. Tsirkin
2023-02-03  8:53     ` Michael S. Tsirkin
2023-02-03  9:04     ` Xuan Zhuo
2023-02-03  9:04       ` Xuan Zhuo
2023-02-03  9:26       ` Michael S. Tsirkin
2023-02-03  9:26         ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 16/33] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:55   ` Michael S. Tsirkin
2023-02-03  8:55     ` Michael S. Tsirkin
2023-02-03  9:01     ` Xuan Zhuo
2023-02-03  9:01       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 17/33] virtio_net: receive_small() use virtnet_xdp_handler() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 18/33] virtio_net: receive_merageable() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:16   ` Michael S. Tsirkin
2023-02-02 17:16     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 19/33] virtio_net: introduce virtnet_tx_reset() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:23   ` Michael S. Tsirkin
2023-02-02 17:23     ` Michael S. Tsirkin
2023-02-03  4:35     ` Xuan Zhuo
2023-02-03  4:35       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 20/33] virtio_net: xsk: introduce virtnet_rq_bind_xsk_pool() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:48   ` Michael S. Tsirkin
2023-02-03  8:48     ` Michael S. Tsirkin
2023-02-03  8:52     ` Xuan Zhuo
2023-02-03  8:52       ` Xuan Zhuo
2023-02-03  9:28       ` Michael S. Tsirkin
2023-02-03  9:28         ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 21/33] virtio_net: xsk: introduce virtnet_xsk_pool_enable() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 22/33] virtio_net: xsk: introduce xsk disable Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 23:02   ` kernel test robot
2023-02-02 23:02     ` kernel test robot
2023-02-12  7:56   ` kernel test robot
2023-02-12  7:56     ` kernel test robot
2023-02-02 11:00 ` [PATCH 23/33] virtio_net: xsk: support xsk setup Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 24/33] virtio_net: xsk: stop disable tx napi Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:25   ` Michael S. Tsirkin
2023-02-02 17:25     ` Michael S. Tsirkin
2023-02-03  3:24     ` Xuan Zhuo
2023-02-03  3:24       ` Xuan Zhuo
2023-02-03  8:33       ` Michael S. Tsirkin
2023-02-03  8:33         ` Michael S. Tsirkin
2023-02-03  8:49         ` Xuan Zhuo
2023-02-03  8:49           ` Xuan Zhuo
2023-02-03  9:29           ` Michael S. Tsirkin
2023-02-03  9:29             ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 25/33] virtio_net: xsk: __free_old_xmit distinguishes xsk buffer Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 26/33] virtio_net: virtnet_sq_free_unused_buf() check " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 27/33] virtio_net: virtnet_rq_free_unused_buf() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 28/33] net: introduce napi_tx_raise() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 29/33] virtio_net: xsk: tx: support tx Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:39   ` Maciej Fijalkowski
2023-02-03  8:55     ` Xuan Zhuo
2023-02-03  8:55       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 30/33] virtio_net: xsk: tx: support wakeup Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 31/33] virtio_net: xsk: tx: auto wakeup when free old xmit Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 32/33] virtio_net: xsk: rx: introduce add_recvbuf_xsk() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:43   ` Maciej Fijalkowski
2023-02-03  8:56     ` Xuan Zhuo
2023-02-03  8:56       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 33/33] virtio_net: xsk: rx: introduce receive_xsk() to recv xsk buffer Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:08 ` [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:08 ` Michael S. Tsirkin
2023-02-02 11:08   ` Michael S. Tsirkin
2023-02-02 11:11   ` Xuan Zhuo
2023-02-02 11:44   ` Xuan Zhuo
2023-02-02 11:44     ` Xuan Zhuo
2023-02-03  9:08     ` Michael S. Tsirkin
2023-02-03  9:08       ` Michael S. Tsirkin
2023-02-03  9:09       ` Xuan Zhuo
2023-02-03  9:09         ` Xuan Zhuo
2023-02-02 14:41 ` Paolo Abeni
2023-02-02 14:41   ` Paolo Abeni
2023-02-03  3:33   ` Xuan Zhuo
2023-02-03  3:33     ` Xuan Zhuo
2023-02-03  8:37     ` Michael S. Tsirkin
2023-02-03  8:37       ` Michael S. Tsirkin
2023-02-03  8:46       ` Maciej Fijalkowski
2023-02-03  9:09         ` Michael S. Tsirkin
2023-02-03  9:09           ` Michael S. Tsirkin
2023-02-03  9:17     ` Michael S. Tsirkin
2023-02-03  9:17       ` Michael S. Tsirkin
2023-02-06  2:41       ` Xuan Zhuo
2023-02-06  2:41         ` Xuan Zhuo
2023-02-13 12:14         ` Michael S. Tsirkin
2023-02-13 12:14           ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230202110058.130695-11-xuanzhuo@linux.alibaba.com \
    --to=xuanzhuo@linux.alibaba.com \
    --cc=ast@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=imagedong@tencent.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kuba@kernel.org \
    --cc=kuniyu@amazon.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=petrm@nvidia.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.