netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Xuan Zhuo <xuanzhuo@linux.alibaba.com>,
	Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Jesper Dangaard Brouer <hawk@kernel.org>,
	John Fastabend <john.fastabend@gmail.com>,
	virtualization@lists.linux-foundation.org, bpf@vger.kernel.org
Subject: [PATCH net-next v3 16/27] virtio_net: xsk: tx: support xmit xsk buffer
Date: Fri, 29 Dec 2023 15:30:57 +0800	[thread overview]
Message-ID: <20231229073108.57778-17-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20231229073108.57778-1-xuanzhuo@linux.alibaba.com>

The driver's tx napi is very important for XSK. It is responsible for
obtaining data from the XSK queue and sending it out.

At the beginning, we need to trigger tx napi.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio/main.c       | 22 ++++++---
 drivers/net/virtio/virtio_net.h |  4 ++
 drivers/net/virtio/xsk.c        | 88 +++++++++++++++++++++++++++++++++
 drivers/net/virtio/xsk.h        | 13 +++++
 4 files changed, 121 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
index 6ab1f3418139..cb6c8916f605 100644
--- a/drivers/net/virtio/main.c
+++ b/drivers/net/virtio/main.c
@@ -612,9 +612,9 @@ static void free_old_xmit(struct virtnet_sq *sq, bool in_napi)
 	u64_stats_update_end(&sq->stats.syncp);
 }
 
-static void check_sq_full_and_disable(struct virtnet_info *vi,
-				      struct net_device *dev,
-				      struct virtnet_sq *sq)
+void virtnet_check_sq_full_and_disable(struct virtnet_info *vi,
+				       struct net_device *dev,
+				       struct virtnet_sq *sq)
 {
 	bool use_napi = sq->napi.weight;
 	int qnum;
@@ -772,7 +772,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	ret = nxmit;
 
 	if (!virtnet_is_xdp_raw_buffer_queue(vi, sq - vi->sq))
-		check_sq_full_and_disable(vi, dev, sq);
+		virtnet_check_sq_full_and_disable(vi, dev, sq);
 
 	if (flags & XDP_XMIT_FLUSH) {
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
@@ -2042,6 +2042,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 	struct virtnet_info *vi = sq->vq->vdev->priv;
 	unsigned int index = vq2txq(sq->vq);
 	struct netdev_queue *txq;
+	bool xsk_busy = false;
 	int opaque;
 	bool done;
 
@@ -2054,11 +2055,20 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 	txq = netdev_get_tx_queue(vi->dev, index);
 	__netif_tx_lock(txq, raw_smp_processor_id());
 	virtqueue_disable_cb(sq->vq);
-	free_old_xmit(sq, true);
+
+	if (sq->xsk.pool)
+		xsk_busy = virtnet_xsk_xmit(sq, sq->xsk.pool, budget);
+	else
+		free_old_xmit(sq, true);
 
 	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
 		netif_tx_wake_queue(txq);
 
+	if (xsk_busy) {
+		__netif_tx_unlock(txq);
+		return budget;
+	}
+
 	opaque = virtqueue_enable_cb_prepare(sq->vq);
 
 	done = napi_complete_done(napi, 0);
@@ -2173,7 +2183,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 		nf_reset_ct(skb);
 	}
 
-	check_sq_full_and_disable(vi, dev, sq);
+	virtnet_check_sq_full_and_disable(vi, dev, sq);
 
 	if (kick || netif_xmit_stopped(txq)) {
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
index 6888b0b767c6..7dcbd1d40fba 100644
--- a/drivers/net/virtio/virtio_net.h
+++ b/drivers/net/virtio/virtio_net.h
@@ -9,6 +9,7 @@
 #include <net/xdp_sock_drv.h>
 
 #define VIRTIO_XDP_FLAG	BIT(0)
+#define VIRTIO_XSK_FLAG	BIT(1)
 
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
@@ -289,4 +290,7 @@ void virtnet_tx_pause(struct virtnet_info *vi, struct virtnet_sq *sq);
 void virtnet_tx_resume(struct virtnet_info *vi, struct virtnet_sq *sq);
 void virtnet_sq_free_unused_bufs(struct virtqueue *vq);
 void virtnet_rq_free_unused_bufs(struct virtqueue *vq);
+void virtnet_check_sq_full_and_disable(struct virtnet_info *vi,
+				       struct net_device *dev,
+				       struct virtnet_sq *sq);
 #endif
diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
index 68fa1c422b41..d2a96424ade9 100644
--- a/drivers/net/virtio/xsk.c
+++ b/drivers/net/virtio/xsk.c
@@ -4,9 +4,97 @@
  */
 
 #include "virtio_net.h"
+#include "xsk.h"
 
 static struct virtio_net_hdr_mrg_rxbuf xsk_hdr;
 
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+	sg->dma_address = addr;
+	sg->length = len;
+}
+
+static int virtnet_xsk_xmit_one(struct virtnet_sq *sq,
+				struct xsk_buff_pool *pool,
+				struct xdp_desc *desc)
+{
+	struct virtnet_info *vi;
+	dma_addr_t addr;
+
+	vi = sq->vq->vdev->priv;
+
+	addr = xsk_buff_raw_get_dma(pool, desc->addr);
+	xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
+
+	sg_init_table(sq->sg, 2);
+
+	sg_fill_dma(sq->sg, sq->xsk.hdr_dma_address, vi->hdr_len);
+	sg_fill_dma(sq->sg + 1, addr, desc->len);
+
+	return virtqueue_add_outbuf(sq->vq, sq->sg, 2,
+				    virtnet_xsk_to_ptr(desc->len), GFP_ATOMIC);
+}
+
+static int virtnet_xsk_xmit_batch(struct virtnet_sq *sq,
+				  struct xsk_buff_pool *pool,
+				  unsigned int budget,
+				  u64 *kicks)
+{
+	struct xdp_desc *descs = pool->tx_descs;
+	u32 nb_pkts, max_pkts, i;
+	bool kick = false;
+	int err;
+
+	/* Every xsk tx packet needs two desc(virtnet header and packet). So we
+	 * use sq->vq->num_free / 2 as the limitation.
+	 */
+	max_pkts = min_t(u32, budget, sq->vq->num_free / 2);
+
+	nb_pkts = xsk_tx_peek_release_desc_batch(pool, max_pkts);
+	if (!nb_pkts)
+		return 0;
+
+	for (i = 0; i < nb_pkts; i++) {
+		err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
+		if (unlikely(err))
+			break;
+
+		kick = true;
+	}
+
+	if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
+		(*kicks)++;
+
+	return i;
+}
+
+bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
+		      int budget)
+{
+	struct virtnet_info *vi = sq->vq->vdev->priv;
+	u64 bytes = 0, packets = 0, kicks = 0;
+	int sent;
+
+	virtnet_free_old_xmit(sq, true, &bytes, &packets);
+
+	sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
+
+	if (!virtnet_is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+		virtnet_check_sq_full_and_disable(vi, vi->dev, sq);
+
+	u64_stats_update_begin(&sq->stats.syncp);
+	u64_stats_add(&sq->stats.packets, packets);
+	u64_stats_add(&sq->stats.bytes,   bytes);
+	u64_stats_add(&sq->stats.kicks,   kicks);
+	u64_stats_add(&sq->stats.xdp_tx,  sent);
+	u64_stats_update_end(&sq->stats.syncp);
+
+	if (xsk_uses_need_wakeup(pool))
+		xsk_set_tx_need_wakeup(pool);
+
+	return sent == budget;
+}
+
 static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct virtnet_rq *rq,
 				    struct xsk_buff_pool *pool)
 {
diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
index 1918285c310c..73ca8cd5308b 100644
--- a/drivers/net/virtio/xsk.h
+++ b/drivers/net/virtio/xsk.h
@@ -3,5 +3,18 @@
 #ifndef __XSK_H__
 #define __XSK_H__
 
+#define VIRTIO_XSK_FLAG_OFFSET	4
+
+static inline void *virtnet_xsk_to_ptr(u32 len)
+{
+	unsigned long p;
+
+	p = len << VIRTIO_XSK_FLAG_OFFSET;
+
+	return (void *)(p | VIRTIO_XSK_FLAG);
+}
+
 int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
+bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
+		      int budget);
 #endif
-- 
2.32.0.3.g01195cf9f


  parent reply	other threads:[~2023-12-29  7:31 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-29  7:30 [PATCH net-next v3 00/27] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 01/27] virtio_net: rename free_old_xmit_skbs to free_old_xmit Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 02/27] virtio_net: unify the code for recycling the xmit ptr Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 03/27] virtio_net: independent directory Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 04/27] virtio_net: move core structures to virtio_net.h Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 05/27] virtio_net: add prefix virtnet to all struct inside virtio_net.h Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 06/27] virtio_ring: introduce virtqueue_get_buf_ctx_dma() Xuan Zhuo
2024-01-11  8:34   ` Jason Wang
2024-01-16  7:32     ` Xuan Zhuo
2024-01-22  4:18       ` Jason Wang
2024-01-22  6:04         ` Xuan Zhuo
2024-01-22  6:54           ` Jason Wang
2023-12-29  7:30 ` [PATCH net-next v3 07/27] virtio_ring: virtqueue_disable_and_recycle let the callback detach bufs Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 08/27] virtio_ring: introduce virtqueue_detach_unused_buf_dma() Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 09/27] virtio_ring: introduce virtqueue_get_dma_premapped() Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 10/27] virtio_net: sq support premapped mode Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 11/27] virtio_net: separate virtnet_rx_resize() Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 12/27] virtio_net: separate virtnet_tx_resize() Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 13/27] virtio_net: xsk: bind/unbind xsk Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 14/27] virtio_net: xsk: prevent disable tx napi Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 15/27] virtio_net: move some api to header Xuan Zhuo
2023-12-29  7:30 ` Xuan Zhuo [this message]
2023-12-30  0:28   ` [PATCH net-next v3 16/27] virtio_net: xsk: tx: support xmit xsk buffer kernel test robot
2023-12-29  7:30 ` [PATCH net-next v3 17/27] virtio_net: xsk: tx: support wakeup Xuan Zhuo
2023-12-29  7:30 ` [PATCH net-next v3 18/27] virtio_net: xsk: tx: handle the transmitted xsk buffer Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 19/27] virtio_net: xsk: tx: free the unused " Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 20/27] virtio_net: separate receive_mergeable Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 21/27] virtio_net: separate receive_buf Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 22/27] virtio_net: xsk: rx: support fill with xsk buffer Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 23/27] virtio_net: xsk: rx: support recv merge mode Xuan Zhuo
2023-12-29 21:03   ` kernel test robot
2023-12-30  1:01   ` kernel test robot
2023-12-29  7:31 ` [PATCH net-next v3 24/27] virtio_net: xsk: rx: support recv small mode Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 25/27] virtio_net: xsk: rx: free the unused xsk buffer Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 26/27] virtio_net: update tx timeout record Xuan Zhuo
2023-12-29  7:31 ` [PATCH net-next v3 27/27] virtio_net: xdp_features add NETDEV_XDP_ACT_XSK_ZEROCOPY Xuan Zhuo
2024-01-11  3:27 ` [PATCH net-next v3 00/27] virtio-net: support AF_XDP zero copy Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231229073108.57778-17-xuanzhuo@linux.alibaba.com \
    --to=xuanzhuo@linux.alibaba.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).