All of lore.kernel.org
 help / color / mirror / Atom feed
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: netdev@vger.kernel.org, "David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Jason Wang" <jasowang@redhat.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	"Menglong Dong" <imagedong@tencent.com>,
	"Kuniyuki Iwashima" <kuniyu@amazon.com>,
	"Petr Machata" <petrm@nvidia.com>,
	virtualization@lists.linux-foundation.org, bpf@vger.kernel.org
Subject: Re: [PATCH 15/33] virtio_net: move to virtio_net.h
Date: Fri, 3 Feb 2023 17:04:42 +0800	[thread overview]
Message-ID: <1675415082.88957-7-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20230203035028-mutt-send-email-mst@kernel.org>

On Fri, 3 Feb 2023 03:53:12 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> On Thu, Feb 02, 2023 at 07:00:40PM +0800, Xuan Zhuo wrote:
> > Move some structure definitions and inline functions into the
> > virtio_net.h file.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio/main.c       | 247 +----------------------------
> >  drivers/net/virtio/virtio_net.h | 265 ++++++++++++++++++++++++++++++++
> >  2 files changed, 267 insertions(+), 245 deletions(-)
> >  create mode 100644 drivers/net/virtio/virtio_net.h
> >
> > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> > index eb7f00194b5c..5683cb576474 100644
> > --- a/drivers/net/virtio/main.c
> > +++ b/drivers/net/virtio/main.c
> > @@ -4,24 +4,8 @@
> >   * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
> >   */
> >  //#define DEBUG
> > -#include <linux/netdevice.h>
> > -#include <linux/etherdevice.h>
> > -#include <linux/ethtool.h>
> > -#include <linux/module.h>
> > -#include <linux/virtio.h>
> > -#include <linux/virtio_net.h>
> > -#include <linux/bpf.h>
> > -#include <linux/bpf_trace.h>
> > -#include <linux/scatterlist.h>
> > -#include <linux/if_vlan.h>
> > -#include <linux/slab.h>
> > -#include <linux/cpu.h>
> > -#include <linux/average.h>
> > -#include <linux/filter.h>
> > -#include <linux/kernel.h>
> > -#include <net/route.h>
> > -#include <net/xdp.h>
> > -#include <net/net_failover.h>
> > +
> > +#include "virtio_net.h"
> >
> >  static int napi_weight = NAPI_POLL_WEIGHT;
> >  module_param(napi_weight, int, 0444);
>
>
> You should only move the headers that are actually needed not
> everything.

You mean the "include".

I think it is a simple way to concentrate "Include" into a header file, and
other .c files reference this header file.

Do you agree?

Thanks.

>
>
> > @@ -44,15 +28,6 @@ module_param(napi_tx, bool, 0644);
> >  #define VIRTIO_XDP_TX		BIT(0)
> >  #define VIRTIO_XDP_REDIR	BIT(1)
> >
> > -#define VIRTIO_XDP_FLAG	BIT(0)
> > -
> > -/* RX packet size EWMA. The average packet size is used to determine the packet
> > - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > - * term, transient changes in packet size.
> > - */
> > -DECLARE_EWMA(pkt_len, 0, 64)
> > -
> >  #define VIRTNET_DRIVER_VERSION "1.0.0"
> >
> >  static const unsigned long guest_offloads[] = {
> > @@ -72,36 +47,6 @@ static const unsigned long guest_offloads[] = {
> >  				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
> >  				(1ULL << VIRTIO_NET_F_GUEST_USO6))
> >
> > -struct virtnet_stat_desc {
> > -	char desc[ETH_GSTRING_LEN];
> > -	size_t offset;
> > -};
> > -
> > -struct virtnet_sq_stats {
> > -	struct u64_stats_sync syncp;
> > -	u64 packets;
> > -	u64 bytes;
> > -	u64 xdp_tx;
> > -	u64 xdp_tx_drops;
> > -	u64 kicks;
> > -	u64 tx_timeouts;
> > -};
> > -
> > -struct virtnet_rq_stats {
> > -	struct u64_stats_sync syncp;
> > -	u64 packets;
> > -	u64 bytes;
> > -	u64 drops;
> > -	u64 xdp_packets;
> > -	u64 xdp_tx;
> > -	u64 xdp_redirects;
> > -	u64 xdp_drops;
> > -	u64 kicks;
> > -};
> > -
> > -#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> > -#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> > -
> >  static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
> >  	{ "packets",		VIRTNET_SQ_STAT(packets) },
> >  	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
> > @@ -125,57 +70,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> >  #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
> >  #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
> >
> > -/* Internal representation of a send virtqueue */
> > -struct send_queue {
> > -	/* Virtqueue associated with this send _queue */
> > -	struct virtqueue *vq;
> > -
> > -	/* TX: fragments + linear part + virtio header */
> > -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -	/* Name of the send queue: output.$index */
> > -	char name[16];
> > -
> > -	struct virtnet_sq_stats stats;
> > -
> > -	struct napi_struct napi;
> > -
> > -	/* Record whether sq is in reset state. */
> > -	bool reset;
> > -};
> > -
> > -/* Internal representation of a receive virtqueue */
> > -struct receive_queue {
> > -	/* Virtqueue associated with this receive_queue */
> > -	struct virtqueue *vq;
> > -
> > -	struct napi_struct napi;
> > -
> > -	struct bpf_prog __rcu *xdp_prog;
> > -
> > -	struct virtnet_rq_stats stats;
> > -
> > -	/* Chain pages by the private ptr. */
> > -	struct page *pages;
> > -
> > -	/* Average packet length for mergeable receive buffers. */
> > -	struct ewma_pkt_len mrg_avg_pkt_len;
> > -
> > -	/* Page frag for packet buffer allocation. */
> > -	struct page_frag alloc_frag;
> > -
> > -	/* RX: fragments + linear part + virtio header */
> > -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -	/* Min single buffer size for mergeable buffers case. */
> > -	unsigned int min_buf_len;
> > -
> > -	/* Name of this receive queue: input.$index */
> > -	char name[16];
> > -
> > -	struct xdp_rxq_info xdp_rxq;
> > -};
> > -
> >  /* This structure can contain rss message with maximum settings for indirection table and keysize
> >   * Note, that default structure that describes RSS configuration virtio_net_rss_config
> >   * contains same info but can't handle table values.
> > @@ -206,90 +100,6 @@ struct control_buf {
> >  	struct virtio_net_ctrl_rss rss;
> >  };
> >
> > -struct virtnet_info {
> > -	struct virtio_device *vdev;
> > -	struct virtqueue *cvq;
> > -	struct net_device *dev;
> > -	struct send_queue *sq;
> > -	struct receive_queue *rq;
> > -	unsigned int status;
> > -
> > -	/* Max # of queue pairs supported by the device */
> > -	u16 max_queue_pairs;
> > -
> > -	/* # of queue pairs currently used by the driver */
> > -	u16 curr_queue_pairs;
> > -
> > -	/* # of XDP queue pairs currently used by the driver */
> > -	u16 xdp_queue_pairs;
> > -
> > -	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > -	bool xdp_enabled;
> > -
> > -	/* I like... big packets and I cannot lie! */
> > -	bool big_packets;
> > -
> > -	/* number of sg entries allocated for big packets */
> > -	unsigned int big_packets_num_skbfrags;
> > -
> > -	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> > -	bool mergeable_rx_bufs;
> > -
> > -	/* Host supports rss and/or hash report */
> > -	bool has_rss;
> > -	bool has_rss_hash_report;
> > -	u8 rss_key_size;
> > -	u16 rss_indir_table_size;
> > -	u32 rss_hash_types_supported;
> > -	u32 rss_hash_types_saved;
> > -
> > -	/* Has control virtqueue */
> > -	bool has_cvq;
> > -
> > -	/* Host can handle any s/g split between our header and packet data */
> > -	bool any_header_sg;
> > -
> > -	/* Packet virtio header size */
> > -	u8 hdr_len;
> > -
> > -	/* Work struct for delayed refilling if we run low on memory. */
> > -	struct delayed_work refill;
> > -
> > -	/* Is delayed refill enabled? */
> > -	bool refill_enabled;
> > -
> > -	/* The lock to synchronize the access to refill_enabled */
> > -	spinlock_t refill_lock;
> > -
> > -	/* Work struct for config space updates */
> > -	struct work_struct config_work;
> > -
> > -	/* Does the affinity hint is set for virtqueues? */
> > -	bool affinity_hint_set;
> > -
> > -	/* CPU hotplug instances for online & dead */
> > -	struct hlist_node node;
> > -	struct hlist_node node_dead;
> > -
> > -	struct control_buf *ctrl;
> > -
> > -	/* Ethtool settings */
> > -	u8 duplex;
> > -	u32 speed;
> > -
> > -	/* Interrupt coalescing settings */
> > -	u32 tx_usecs;
> > -	u32 rx_usecs;
> > -	u32 tx_max_packets;
> > -	u32 rx_max_packets;
> > -
> > -	unsigned long guest_offloads;
> > -	unsigned long guest_offloads_capable;
> > -
> > -	/* failover when STANDBY feature enabled */
> > -	struct failover *failover;
> > -};
> > -
> >  struct padded_vnet_hdr {
> >  	struct virtio_net_hdr_v1_hash hdr;
> >  	/*
> > @@ -303,45 +113,11 @@ struct padded_vnet_hdr {
> >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> > -static bool is_xdp_frame(void *ptr)
> > -{
> > -	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > -}
> > -
> >  static void *xdp_to_ptr(struct xdp_frame *ptr)
> >  {
> >  	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> >  }
> >
> > -static struct xdp_frame *ptr_to_xdp(void *ptr)
> > -{
> > -	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> > -}
> > -
> > -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> > -			    struct virtnet_sq_stats *stats)
> > -{
> > -	unsigned int len;
> > -	void *ptr;
> > -
> > -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> > -		if (!is_xdp_frame(ptr)) {
> > -			struct sk_buff *skb = ptr;
> > -
> > -			pr_debug("Sent skb %p\n", skb);
> > -
> > -			stats->bytes += skb->len;
> > -			napi_consume_skb(skb, in_napi);
> > -		} else {
> > -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> > -
> > -			stats->bytes += xdp_get_frame_len(frame);
> > -			xdp_return_frame(frame);
> > -		}
> > -		stats->packets++;
> > -	}
> > -}
> > -
> >  /* Converting between virtqueue no. and kernel tx/rx queue no.
> >   * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
> >   */
> > @@ -411,15 +187,6 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> >  	spin_unlock_bh(&vi->refill_lock);
> >  }
> >
> > -static void virtqueue_napi_schedule(struct napi_struct *napi,
> > -				    struct virtqueue *vq)
> > -{
> > -	if (napi_schedule_prep(napi)) {
> > -		virtqueue_disable_cb(vq);
> > -		__napi_schedule(napi);
> > -	}
> > -}
> > -
> >  static void virtqueue_napi_complete(struct napi_struct *napi,
> >  				    struct virtqueue *vq, int processed)
> >  {
> > @@ -1740,16 +1507,6 @@ static void free_old_xmit(struct send_queue *sq, bool in_napi)
> >  	u64_stats_update_end(&sq->stats.syncp);
> >  }
> >
> > -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> > -{
> > -	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> > -		return false;
> > -	else if (q < vi->curr_queue_pairs)
> > -		return true;
> > -	else
> > -		return false;
> > -}
> > -
> >  static void virtnet_poll_cleantx(struct receive_queue *rq)
> >  {
> >  	struct virtnet_info *vi = rq->vq->vdev->priv;
> > diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
> > new file mode 100644
> > index 000000000000..8bf31429ae28
> > --- /dev/null
> > +++ b/drivers/net/virtio/virtio_net.h
> > @@ -0,0 +1,265 @@
> > +/* SPDX-License-Identifier: GPL-2.0-or-later */
> > +
> > +#ifndef __VIRTIO_NET_H__
> > +#define __VIRTIO_NET_H__
> > +#include <linux/netdevice.h>
> > +#include <linux/etherdevice.h>
> > +#include <linux/ethtool.h>
> > +#include <linux/module.h>
> > +#include <linux/virtio.h>
> > +#include <linux/virtio_net.h>
> > +#include <linux/bpf.h>
> > +#include <linux/bpf_trace.h>
> > +#include <linux/scatterlist.h>
> > +#include <linux/if_vlan.h>
> > +#include <linux/slab.h>
> > +#include <linux/cpu.h>
> > +#include <linux/average.h>
> > +#include <linux/filter.h>
> > +#include <linux/kernel.h>
> > +#include <net/route.h>
> > +#include <net/xdp.h>
> > +#include <net/net_failover.h>
> > +#include <net/xdp_sock_drv.h>
> > +
> > +#define VIRTIO_XDP_FLAG	BIT(0)
> > +
> > +struct virtnet_info {
> > +	struct virtio_device *vdev;
> > +	struct virtqueue *cvq;
> > +	struct net_device *dev;
> > +	struct send_queue *sq;
> > +	struct receive_queue *rq;
> > +	unsigned int status;
> > +
> > +	/* Max # of queue pairs supported by the device */
> > +	u16 max_queue_pairs;
> > +
> > +	/* # of queue pairs currently used by the driver */
> > +	u16 curr_queue_pairs;
> > +
> > +	/* # of XDP queue pairs currently used by the driver */
> > +	u16 xdp_queue_pairs;
> > +
> > +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > +	bool xdp_enabled;
> > +
> > +	/* I like... big packets and I cannot lie! */
> > +	bool big_packets;
> > +
> > +	/* number of sg entries allocated for big packets */
> > +	unsigned int big_packets_num_skbfrags;
> > +
> > +	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> > +	bool mergeable_rx_bufs;
> > +
> > +	/* Host supports rss and/or hash report */
> > +	bool has_rss;
> > +	bool has_rss_hash_report;
> > +	u8 rss_key_size;
> > +	u16 rss_indir_table_size;
> > +	u32 rss_hash_types_supported;
> > +	u32 rss_hash_types_saved;
> > +
> > +	/* Has control virtqueue */
> > +	bool has_cvq;
> > +
> > +	/* Host can handle any s/g split between our header and packet data */
> > +	bool any_header_sg;
> > +
> > +	/* Packet virtio header size */
> > +	u8 hdr_len;
> > +
> > +	/* Work struct for delayed refilling if we run low on memory. */
> > +	struct delayed_work refill;
> > +
> > +	/* Is delayed refill enabled? */
> > +	bool refill_enabled;
> > +
> > +	/* The lock to synchronize the access to refill_enabled */
> > +	spinlock_t refill_lock;
> > +
> > +	/* Work struct for config space updates */
> > +	struct work_struct config_work;
> > +
> > +	/* Does the affinity hint is set for virtqueues? */
> > +	bool affinity_hint_set;
> > +
> > +	/* CPU hotplug instances for online & dead */
> > +	struct hlist_node node;
> > +	struct hlist_node node_dead;
> > +
> > +	struct control_buf *ctrl;
> > +
> > +	/* Ethtool settings */
> > +	u8 duplex;
> > +	u32 speed;
> > +
> > +	/* Interrupt coalescing settings */
> > +	u32 tx_usecs;
> > +	u32 rx_usecs;
> > +	u32 tx_max_packets;
> > +	u32 rx_max_packets;
> > +
> > +	unsigned long guest_offloads;
> > +	unsigned long guest_offloads_capable;
> > +
> > +	/* failover when STANDBY feature enabled */
> > +	struct failover *failover;
> > +};
> > +
> > +/* RX packet size EWMA. The average packet size is used to determine the packet
> > + * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > + * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > + * term, transient changes in packet size.
> > + */
> > +DECLARE_EWMA(pkt_len, 0, 64)
> > +
> > +struct virtnet_stat_desc {
> > +	char desc[ETH_GSTRING_LEN];
> > +	size_t offset;
> > +};
> > +
> > +struct virtnet_sq_stats {
> > +	struct u64_stats_sync syncp;
> > +	u64 packets;
> > +	u64 bytes;
> > +	u64 xdp_tx;
> > +	u64 xdp_tx_drops;
> > +	u64 kicks;
> > +	u64 tx_timeouts;
> > +};
> > +
> > +struct virtnet_rq_stats {
> > +	struct u64_stats_sync syncp;
> > +	u64 packets;
> > +	u64 bytes;
> > +	u64 drops;
> > +	u64 xdp_packets;
> > +	u64 xdp_tx;
> > +	u64 xdp_redirects;
> > +	u64 xdp_drops;
> > +	u64 kicks;
> > +};
> > +
> > +#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> > +#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> > +
> > +/* Internal representation of a send virtqueue */
> > +struct send_queue {
> > +	/* Virtqueue associated with this send _queue */
> > +	struct virtqueue *vq;
> > +
> > +	/* TX: fragments + linear part + virtio header */
> > +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > +
> > +	/* Name of the send queue: output.$index */
> > +	char name[16];
> > +
> > +	struct virtnet_sq_stats stats;
> > +
> > +	struct napi_struct napi;
> > +
> > +	/* Record whether sq is in reset state. */
> > +	bool reset;
> > +};
> > +
> > +/* Internal representation of a receive virtqueue */
> > +struct receive_queue {
> > +	/* Virtqueue associated with this receive_queue */
> > +	struct virtqueue *vq;
> > +
> > +	struct napi_struct napi;
> > +
> > +	struct bpf_prog __rcu *xdp_prog;
> > +
> > +	struct virtnet_rq_stats stats;
> > +
> > +	/* Chain pages by the private ptr. */
> > +	struct page *pages;
> > +
> > +	/* Average packet length for mergeable receive buffers. */
> > +	struct ewma_pkt_len mrg_avg_pkt_len;
> > +
> > +	/* Page frag for packet buffer allocation. */
> > +	struct page_frag alloc_frag;
> > +
> > +	/* RX: fragments + linear part + virtio header */
> > +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > +
> > +	/* Min single buffer size for mergeable buffers case. */
> > +	unsigned int min_buf_len;
> > +
> > +	/* Name of this receive queue: input.$index */
> > +	char name[16];
> > +
> > +	struct xdp_rxq_info xdp_rxq;
> > +};
> > +
> > +static inline bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> > +{
> > +	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> > +		return false;
> > +	else if (q < vi->curr_queue_pairs)
> > +		return true;
> > +	else
> > +		return false;
> > +}
> > +
> > +static inline void virtnet_return_xdp_frame(struct send_queue *sq,
> > +					    struct xdp_frame *frame)
> > +{
> > +	struct virtnet_info *vi = sq->vq->vdev->priv;
> > +	dma_addr_t *p_addr, addr;
> > +
> > +	p_addr = frame->data - sizeof(*p_addr);
> > +	addr = *p_addr;
> > +
> > +	virtio_dma_unmap(&vi->vdev->dev, addr, frame->len, DMA_TO_DEVICE);
> > +
> > +	xdp_return_frame(frame);
> > +}
> > +
> > +static inline void virtqueue_napi_schedule(struct napi_struct *napi,
> > +					   struct virtqueue *vq)
> > +{
> > +	if (napi_schedule_prep(napi)) {
> > +		virtqueue_disable_cb(vq);
> > +		__napi_schedule(napi);
> > +	}
> > +}
> > +
> > +static inline bool is_xdp_frame(void *ptr)
> > +{
> > +	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > +}
> > +
> > +static struct xdp_frame *ptr_to_xdp(void *ptr)
> > +{
> > +	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> > +}
> > +
> > +static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> > +			    struct virtnet_sq_stats *stats)
> > +{
> > +	unsigned int len;
> > +	void *ptr;
> > +
> > +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> > +		if (!is_xdp_frame(ptr)) {
> > +			struct sk_buff *skb = ptr;
> > +
> > +			pr_debug("Sent skb %p\n", skb);
> > +
> > +			stats->bytes += skb->len;
> > +			napi_consume_skb(skb, in_napi);
> > +		} else {
> > +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> > +
> > +			stats->bytes += xdp_get_frame_len(frame);
> > +			xdp_return_frame(frame);
> > +		}
> > +		stats->packets++;
> > +	}
> > +}
> > +#endif
>
> All these APIs not prefixed with virtnet were ok as internal
> static functions. No longer ok in a header.

I agree. Will fix.

Thanks.

>
>
> > --
> > 2.32.0.3.g01195cf9f
>

WARNING: multiple messages have this Message-ID (diff)
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: "Michael S. Tsirkin" <mst@redhat.com>
Cc: "Petr Machata" <petrm@nvidia.com>,
	"Menglong Dong" <imagedong@tencent.com>,
	"Maciej Fijalkowski" <maciej.fijalkowski@intel.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	netdev@vger.kernel.org,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Eric Dumazet" <edumazet@google.com>,
	"Kuniyuki Iwashima" <kuniyu@amazon.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	bpf@vger.kernel.org, "Paolo Abeni" <pabeni@redhat.com>,
	virtualization@lists.linux-foundation.org,
	"David S. Miller" <davem@davemloft.net>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>
Subject: Re: [PATCH 15/33] virtio_net: move to virtio_net.h
Date: Fri, 3 Feb 2023 17:04:42 +0800	[thread overview]
Message-ID: <1675415082.88957-7-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20230203035028-mutt-send-email-mst@kernel.org>

On Fri, 3 Feb 2023 03:53:12 -0500, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> On Thu, Feb 02, 2023 at 07:00:40PM +0800, Xuan Zhuo wrote:
> > Move some structure definitions and inline functions into the
> > virtio_net.h file.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio/main.c       | 247 +----------------------------
> >  drivers/net/virtio/virtio_net.h | 265 ++++++++++++++++++++++++++++++++
> >  2 files changed, 267 insertions(+), 245 deletions(-)
> >  create mode 100644 drivers/net/virtio/virtio_net.h
> >
> > diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
> > index eb7f00194b5c..5683cb576474 100644
> > --- a/drivers/net/virtio/main.c
> > +++ b/drivers/net/virtio/main.c
> > @@ -4,24 +4,8 @@
> >   * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
> >   */
> >  //#define DEBUG
> > -#include <linux/netdevice.h>
> > -#include <linux/etherdevice.h>
> > -#include <linux/ethtool.h>
> > -#include <linux/module.h>
> > -#include <linux/virtio.h>
> > -#include <linux/virtio_net.h>
> > -#include <linux/bpf.h>
> > -#include <linux/bpf_trace.h>
> > -#include <linux/scatterlist.h>
> > -#include <linux/if_vlan.h>
> > -#include <linux/slab.h>
> > -#include <linux/cpu.h>
> > -#include <linux/average.h>
> > -#include <linux/filter.h>
> > -#include <linux/kernel.h>
> > -#include <net/route.h>
> > -#include <net/xdp.h>
> > -#include <net/net_failover.h>
> > +
> > +#include "virtio_net.h"
> >
> >  static int napi_weight = NAPI_POLL_WEIGHT;
> >  module_param(napi_weight, int, 0444);
>
>
> You should only move the headers that are actually needed not
> everything.

You mean the "include".

I think it is a simple way to concentrate "Include" into a header file, and
other .c files reference this header file.

Do you agree?

Thanks.

>
>
> > @@ -44,15 +28,6 @@ module_param(napi_tx, bool, 0644);
> >  #define VIRTIO_XDP_TX		BIT(0)
> >  #define VIRTIO_XDP_REDIR	BIT(1)
> >
> > -#define VIRTIO_XDP_FLAG	BIT(0)
> > -
> > -/* RX packet size EWMA. The average packet size is used to determine the packet
> > - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > - * term, transient changes in packet size.
> > - */
> > -DECLARE_EWMA(pkt_len, 0, 64)
> > -
> >  #define VIRTNET_DRIVER_VERSION "1.0.0"
> >
> >  static const unsigned long guest_offloads[] = {
> > @@ -72,36 +47,6 @@ static const unsigned long guest_offloads[] = {
> >  				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
> >  				(1ULL << VIRTIO_NET_F_GUEST_USO6))
> >
> > -struct virtnet_stat_desc {
> > -	char desc[ETH_GSTRING_LEN];
> > -	size_t offset;
> > -};
> > -
> > -struct virtnet_sq_stats {
> > -	struct u64_stats_sync syncp;
> > -	u64 packets;
> > -	u64 bytes;
> > -	u64 xdp_tx;
> > -	u64 xdp_tx_drops;
> > -	u64 kicks;
> > -	u64 tx_timeouts;
> > -};
> > -
> > -struct virtnet_rq_stats {
> > -	struct u64_stats_sync syncp;
> > -	u64 packets;
> > -	u64 bytes;
> > -	u64 drops;
> > -	u64 xdp_packets;
> > -	u64 xdp_tx;
> > -	u64 xdp_redirects;
> > -	u64 xdp_drops;
> > -	u64 kicks;
> > -};
> > -
> > -#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> > -#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> > -
> >  static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
> >  	{ "packets",		VIRTNET_SQ_STAT(packets) },
> >  	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
> > @@ -125,57 +70,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> >  #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
> >  #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
> >
> > -/* Internal representation of a send virtqueue */
> > -struct send_queue {
> > -	/* Virtqueue associated with this send _queue */
> > -	struct virtqueue *vq;
> > -
> > -	/* TX: fragments + linear part + virtio header */
> > -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -	/* Name of the send queue: output.$index */
> > -	char name[16];
> > -
> > -	struct virtnet_sq_stats stats;
> > -
> > -	struct napi_struct napi;
> > -
> > -	/* Record whether sq is in reset state. */
> > -	bool reset;
> > -};
> > -
> > -/* Internal representation of a receive virtqueue */
> > -struct receive_queue {
> > -	/* Virtqueue associated with this receive_queue */
> > -	struct virtqueue *vq;
> > -
> > -	struct napi_struct napi;
> > -
> > -	struct bpf_prog __rcu *xdp_prog;
> > -
> > -	struct virtnet_rq_stats stats;
> > -
> > -	/* Chain pages by the private ptr. */
> > -	struct page *pages;
> > -
> > -	/* Average packet length for mergeable receive buffers. */
> > -	struct ewma_pkt_len mrg_avg_pkt_len;
> > -
> > -	/* Page frag for packet buffer allocation. */
> > -	struct page_frag alloc_frag;
> > -
> > -	/* RX: fragments + linear part + virtio header */
> > -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > -
> > -	/* Min single buffer size for mergeable buffers case. */
> > -	unsigned int min_buf_len;
> > -
> > -	/* Name of this receive queue: input.$index */
> > -	char name[16];
> > -
> > -	struct xdp_rxq_info xdp_rxq;
> > -};
> > -
> >  /* This structure can contain rss message with maximum settings for indirection table and keysize
> >   * Note, that default structure that describes RSS configuration virtio_net_rss_config
> >   * contains same info but can't handle table values.
> > @@ -206,90 +100,6 @@ struct control_buf {
> >  	struct virtio_net_ctrl_rss rss;
> >  };
> >
> > -struct virtnet_info {
> > -	struct virtio_device *vdev;
> > -	struct virtqueue *cvq;
> > -	struct net_device *dev;
> > -	struct send_queue *sq;
> > -	struct receive_queue *rq;
> > -	unsigned int status;
> > -
> > -	/* Max # of queue pairs supported by the device */
> > -	u16 max_queue_pairs;
> > -
> > -	/* # of queue pairs currently used by the driver */
> > -	u16 curr_queue_pairs;
> > -
> > -	/* # of XDP queue pairs currently used by the driver */
> > -	u16 xdp_queue_pairs;
> > -
> > -	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > -	bool xdp_enabled;
> > -
> > -	/* I like... big packets and I cannot lie! */
> > -	bool big_packets;
> > -
> > -	/* number of sg entries allocated for big packets */
> > -	unsigned int big_packets_num_skbfrags;
> > -
> > -	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> > -	bool mergeable_rx_bufs;
> > -
> > -	/* Host supports rss and/or hash report */
> > -	bool has_rss;
> > -	bool has_rss_hash_report;
> > -	u8 rss_key_size;
> > -	u16 rss_indir_table_size;
> > -	u32 rss_hash_types_supported;
> > -	u32 rss_hash_types_saved;
> > -
> > -	/* Has control virtqueue */
> > -	bool has_cvq;
> > -
> > -	/* Host can handle any s/g split between our header and packet data */
> > -	bool any_header_sg;
> > -
> > -	/* Packet virtio header size */
> > -	u8 hdr_len;
> > -
> > -	/* Work struct for delayed refilling if we run low on memory. */
> > -	struct delayed_work refill;
> > -
> > -	/* Is delayed refill enabled? */
> > -	bool refill_enabled;
> > -
> > -	/* The lock to synchronize the access to refill_enabled */
> > -	spinlock_t refill_lock;
> > -
> > -	/* Work struct for config space updates */
> > -	struct work_struct config_work;
> > -
> > -	/* Does the affinity hint is set for virtqueues? */
> > -	bool affinity_hint_set;
> > -
> > -	/* CPU hotplug instances for online & dead */
> > -	struct hlist_node node;
> > -	struct hlist_node node_dead;
> > -
> > -	struct control_buf *ctrl;
> > -
> > -	/* Ethtool settings */
> > -	u8 duplex;
> > -	u32 speed;
> > -
> > -	/* Interrupt coalescing settings */
> > -	u32 tx_usecs;
> > -	u32 rx_usecs;
> > -	u32 tx_max_packets;
> > -	u32 rx_max_packets;
> > -
> > -	unsigned long guest_offloads;
> > -	unsigned long guest_offloads_capable;
> > -
> > -	/* failover when STANDBY feature enabled */
> > -	struct failover *failover;
> > -};
> > -
> >  struct padded_vnet_hdr {
> >  	struct virtio_net_hdr_v1_hash hdr;
> >  	/*
> > @@ -303,45 +113,11 @@ struct padded_vnet_hdr {
> >  static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> >  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> >
> > -static bool is_xdp_frame(void *ptr)
> > -{
> > -	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > -}
> > -
> >  static void *xdp_to_ptr(struct xdp_frame *ptr)
> >  {
> >  	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> >  }
> >
> > -static struct xdp_frame *ptr_to_xdp(void *ptr)
> > -{
> > -	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> > -}
> > -
> > -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> > -			    struct virtnet_sq_stats *stats)
> > -{
> > -	unsigned int len;
> > -	void *ptr;
> > -
> > -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> > -		if (!is_xdp_frame(ptr)) {
> > -			struct sk_buff *skb = ptr;
> > -
> > -			pr_debug("Sent skb %p\n", skb);
> > -
> > -			stats->bytes += skb->len;
> > -			napi_consume_skb(skb, in_napi);
> > -		} else {
> > -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> > -
> > -			stats->bytes += xdp_get_frame_len(frame);
> > -			xdp_return_frame(frame);
> > -		}
> > -		stats->packets++;
> > -	}
> > -}
> > -
> >  /* Converting between virtqueue no. and kernel tx/rx queue no.
> >   * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
> >   */
> > @@ -411,15 +187,6 @@ static void disable_delayed_refill(struct virtnet_info *vi)
> >  	spin_unlock_bh(&vi->refill_lock);
> >  }
> >
> > -static void virtqueue_napi_schedule(struct napi_struct *napi,
> > -				    struct virtqueue *vq)
> > -{
> > -	if (napi_schedule_prep(napi)) {
> > -		virtqueue_disable_cb(vq);
> > -		__napi_schedule(napi);
> > -	}
> > -}
> > -
> >  static void virtqueue_napi_complete(struct napi_struct *napi,
> >  				    struct virtqueue *vq, int processed)
> >  {
> > @@ -1740,16 +1507,6 @@ static void free_old_xmit(struct send_queue *sq, bool in_napi)
> >  	u64_stats_update_end(&sq->stats.syncp);
> >  }
> >
> > -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> > -{
> > -	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> > -		return false;
> > -	else if (q < vi->curr_queue_pairs)
> > -		return true;
> > -	else
> > -		return false;
> > -}
> > -
> >  static void virtnet_poll_cleantx(struct receive_queue *rq)
> >  {
> >  	struct virtnet_info *vi = rq->vq->vdev->priv;
> > diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
> > new file mode 100644
> > index 000000000000..8bf31429ae28
> > --- /dev/null
> > +++ b/drivers/net/virtio/virtio_net.h
> > @@ -0,0 +1,265 @@
> > +/* SPDX-License-Identifier: GPL-2.0-or-later */
> > +
> > +#ifndef __VIRTIO_NET_H__
> > +#define __VIRTIO_NET_H__
> > +#include <linux/netdevice.h>
> > +#include <linux/etherdevice.h>
> > +#include <linux/ethtool.h>
> > +#include <linux/module.h>
> > +#include <linux/virtio.h>
> > +#include <linux/virtio_net.h>
> > +#include <linux/bpf.h>
> > +#include <linux/bpf_trace.h>
> > +#include <linux/scatterlist.h>
> > +#include <linux/if_vlan.h>
> > +#include <linux/slab.h>
> > +#include <linux/cpu.h>
> > +#include <linux/average.h>
> > +#include <linux/filter.h>
> > +#include <linux/kernel.h>
> > +#include <net/route.h>
> > +#include <net/xdp.h>
> > +#include <net/net_failover.h>
> > +#include <net/xdp_sock_drv.h>
> > +
> > +#define VIRTIO_XDP_FLAG	BIT(0)
> > +
> > +struct virtnet_info {
> > +	struct virtio_device *vdev;
> > +	struct virtqueue *cvq;
> > +	struct net_device *dev;
> > +	struct send_queue *sq;
> > +	struct receive_queue *rq;
> > +	unsigned int status;
> > +
> > +	/* Max # of queue pairs supported by the device */
> > +	u16 max_queue_pairs;
> > +
> > +	/* # of queue pairs currently used by the driver */
> > +	u16 curr_queue_pairs;
> > +
> > +	/* # of XDP queue pairs currently used by the driver */
> > +	u16 xdp_queue_pairs;
> > +
> > +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> > +	bool xdp_enabled;
> > +
> > +	/* I like... big packets and I cannot lie! */
> > +	bool big_packets;
> > +
> > +	/* number of sg entries allocated for big packets */
> > +	unsigned int big_packets_num_skbfrags;
> > +
> > +	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> > +	bool mergeable_rx_bufs;
> > +
> > +	/* Host supports rss and/or hash report */
> > +	bool has_rss;
> > +	bool has_rss_hash_report;
> > +	u8 rss_key_size;
> > +	u16 rss_indir_table_size;
> > +	u32 rss_hash_types_supported;
> > +	u32 rss_hash_types_saved;
> > +
> > +	/* Has control virtqueue */
> > +	bool has_cvq;
> > +
> > +	/* Host can handle any s/g split between our header and packet data */
> > +	bool any_header_sg;
> > +
> > +	/* Packet virtio header size */
> > +	u8 hdr_len;
> > +
> > +	/* Work struct for delayed refilling if we run low on memory. */
> > +	struct delayed_work refill;
> > +
> > +	/* Is delayed refill enabled? */
> > +	bool refill_enabled;
> > +
> > +	/* The lock to synchronize the access to refill_enabled */
> > +	spinlock_t refill_lock;
> > +
> > +	/* Work struct for config space updates */
> > +	struct work_struct config_work;
> > +
> > +	/* Does the affinity hint is set for virtqueues? */
> > +	bool affinity_hint_set;
> > +
> > +	/* CPU hotplug instances for online & dead */
> > +	struct hlist_node node;
> > +	struct hlist_node node_dead;
> > +
> > +	struct control_buf *ctrl;
> > +
> > +	/* Ethtool settings */
> > +	u8 duplex;
> > +	u32 speed;
> > +
> > +	/* Interrupt coalescing settings */
> > +	u32 tx_usecs;
> > +	u32 rx_usecs;
> > +	u32 tx_max_packets;
> > +	u32 rx_max_packets;
> > +
> > +	unsigned long guest_offloads;
> > +	unsigned long guest_offloads_capable;
> > +
> > +	/* failover when STANDBY feature enabled */
> > +	struct failover *failover;
> > +};
> > +
> > +/* RX packet size EWMA. The average packet size is used to determine the packet
> > + * buffer size when refilling RX rings. As the entire RX ring may be refilled
> > + * at once, the weight is chosen so that the EWMA will be insensitive to short-
> > + * term, transient changes in packet size.
> > + */
> > +DECLARE_EWMA(pkt_len, 0, 64)
> > +
> > +struct virtnet_stat_desc {
> > +	char desc[ETH_GSTRING_LEN];
> > +	size_t offset;
> > +};
> > +
> > +struct virtnet_sq_stats {
> > +	struct u64_stats_sync syncp;
> > +	u64 packets;
> > +	u64 bytes;
> > +	u64 xdp_tx;
> > +	u64 xdp_tx_drops;
> > +	u64 kicks;
> > +	u64 tx_timeouts;
> > +};
> > +
> > +struct virtnet_rq_stats {
> > +	struct u64_stats_sync syncp;
> > +	u64 packets;
> > +	u64 bytes;
> > +	u64 drops;
> > +	u64 xdp_packets;
> > +	u64 xdp_tx;
> > +	u64 xdp_redirects;
> > +	u64 xdp_drops;
> > +	u64 kicks;
> > +};
> > +
> > +#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> > +#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> > +
> > +/* Internal representation of a send virtqueue */
> > +struct send_queue {
> > +	/* Virtqueue associated with this send _queue */
> > +	struct virtqueue *vq;
> > +
> > +	/* TX: fragments + linear part + virtio header */
> > +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > +
> > +	/* Name of the send queue: output.$index */
> > +	char name[16];
> > +
> > +	struct virtnet_sq_stats stats;
> > +
> > +	struct napi_struct napi;
> > +
> > +	/* Record whether sq is in reset state. */
> > +	bool reset;
> > +};
> > +
> > +/* Internal representation of a receive virtqueue */
> > +struct receive_queue {
> > +	/* Virtqueue associated with this receive_queue */
> > +	struct virtqueue *vq;
> > +
> > +	struct napi_struct napi;
> > +
> > +	struct bpf_prog __rcu *xdp_prog;
> > +
> > +	struct virtnet_rq_stats stats;
> > +
> > +	/* Chain pages by the private ptr. */
> > +	struct page *pages;
> > +
> > +	/* Average packet length for mergeable receive buffers. */
> > +	struct ewma_pkt_len mrg_avg_pkt_len;
> > +
> > +	/* Page frag for packet buffer allocation. */
> > +	struct page_frag alloc_frag;
> > +
> > +	/* RX: fragments + linear part + virtio header */
> > +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> > +
> > +	/* Min single buffer size for mergeable buffers case. */
> > +	unsigned int min_buf_len;
> > +
> > +	/* Name of this receive queue: input.$index */
> > +	char name[16];
> > +
> > +	struct xdp_rxq_info xdp_rxq;
> > +};
> > +
> > +static inline bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> > +{
> > +	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> > +		return false;
> > +	else if (q < vi->curr_queue_pairs)
> > +		return true;
> > +	else
> > +		return false;
> > +}
> > +
> > +static inline void virtnet_return_xdp_frame(struct send_queue *sq,
> > +					    struct xdp_frame *frame)
> > +{
> > +	struct virtnet_info *vi = sq->vq->vdev->priv;
> > +	dma_addr_t *p_addr, addr;
> > +
> > +	p_addr = frame->data - sizeof(*p_addr);
> > +	addr = *p_addr;
> > +
> > +	virtio_dma_unmap(&vi->vdev->dev, addr, frame->len, DMA_TO_DEVICE);
> > +
> > +	xdp_return_frame(frame);
> > +}
> > +
> > +static inline void virtqueue_napi_schedule(struct napi_struct *napi,
> > +					   struct virtqueue *vq)
> > +{
> > +	if (napi_schedule_prep(napi)) {
> > +		virtqueue_disable_cb(vq);
> > +		__napi_schedule(napi);
> > +	}
> > +}
> > +
> > +static inline bool is_xdp_frame(void *ptr)
> > +{
> > +	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > +}
> > +
> > +static struct xdp_frame *ptr_to_xdp(void *ptr)
> > +{
> > +	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> > +}
> > +
> > +static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> > +			    struct virtnet_sq_stats *stats)
> > +{
> > +	unsigned int len;
> > +	void *ptr;
> > +
> > +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> > +		if (!is_xdp_frame(ptr)) {
> > +			struct sk_buff *skb = ptr;
> > +
> > +			pr_debug("Sent skb %p\n", skb);
> > +
> > +			stats->bytes += skb->len;
> > +			napi_consume_skb(skb, in_napi);
> > +		} else {
> > +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> > +
> > +			stats->bytes += xdp_get_frame_len(frame);
> > +			xdp_return_frame(frame);
> > +		}
> > +		stats->packets++;
> > +	}
> > +}
> > +#endif
>
> All these APIs not prefixed with virtnet were ok as internal
> static functions. No longer ok in a header.

I agree. Will fix.

Thanks.

>
>
> > --
> > 2.32.0.3.g01195cf9f
>
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  reply	other threads:[~2023-02-03  9:06 UTC|newest]

Thread overview: 155+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-02 11:00 [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:00 ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 01/33] virtio_ring: virtqueue_add() support premapped Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 02/33] virtio_ring: split: virtqueue_add_split() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 03/33] virtio_ring: packed: virtqueue_add_packed() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:16   ` Michael S. Tsirkin
2023-02-03  9:16     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 04/33] virtio_ring: introduce virtqueue_add_outbuf_premapped() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 05/33] virtio_ring: introduce virtqueue_add_inbuf_premapped() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 06/33] virtio_ring: introduce virtqueue_reset() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:05   ` Michael S. Tsirkin
2023-02-03  9:05     ` Michael S. Tsirkin
2023-02-03  9:09     ` Xuan Zhuo
2023-02-03  9:09       ` Xuan Zhuo
2023-02-13 12:15       ` Michael S. Tsirkin
2023-02-13 12:15         ` Michael S. Tsirkin
2023-02-14  1:53         ` Xuan Zhuo
2023-02-14  1:53           ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 07/33] virtio_ring: add api virtio_dma_map() for advance dma Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  9:07   ` Michael S. Tsirkin
2023-02-03  9:07     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 08/33] virtio_ring: introduce dma sync api for virtio Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 12:44   ` Magnus Karlsson
2023-02-03  9:24   ` Michael S. Tsirkin
2023-02-03  9:24     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 09/33] xsk: xsk_buff_pool add callback for dma_sync Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 12:51   ` Magnus Karlsson
2023-02-03  7:01     ` Xuan Zhuo
2023-02-03  7:01       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 10/33] xsk: support virtio DMA map Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-05 22:04   ` kernel test robot
2023-02-05 22:04     ` kernel test robot
2023-02-02 11:00 ` [PATCH 11/33] virtio_net: rename free_old_xmit_skbs to free_old_xmit Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 12/33] virtio_net: unify the code for recycling the xmit ptr Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 13/33] virtio_net: virtnet_poll_tx support rescheduled Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 14/33] virtio_net: independent directory Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 15/33] virtio_net: move to virtio_net.h Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:53   ` Michael S. Tsirkin
2023-02-03  8:53     ` Michael S. Tsirkin
2023-02-03  9:04     ` Xuan Zhuo [this message]
2023-02-03  9:04       ` Xuan Zhuo
2023-02-03  9:26       ` Michael S. Tsirkin
2023-02-03  9:26         ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 16/33] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:55   ` Michael S. Tsirkin
2023-02-03  8:55     ` Michael S. Tsirkin
2023-02-03  9:01     ` Xuan Zhuo
2023-02-03  9:01       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 17/33] virtio_net: receive_small() use virtnet_xdp_handler() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 18/33] virtio_net: receive_merageable() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:16   ` Michael S. Tsirkin
2023-02-02 17:16     ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 19/33] virtio_net: introduce virtnet_tx_reset() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:23   ` Michael S. Tsirkin
2023-02-02 17:23     ` Michael S. Tsirkin
2023-02-03  4:35     ` Xuan Zhuo
2023-02-03  4:35       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 20/33] virtio_net: xsk: introduce virtnet_rq_bind_xsk_pool() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:48   ` Michael S. Tsirkin
2023-02-03  8:48     ` Michael S. Tsirkin
2023-02-03  8:52     ` Xuan Zhuo
2023-02-03  8:52       ` Xuan Zhuo
2023-02-03  9:28       ` Michael S. Tsirkin
2023-02-03  9:28         ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 21/33] virtio_net: xsk: introduce virtnet_xsk_pool_enable() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 22/33] virtio_net: xsk: introduce xsk disable Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 23:02   ` kernel test robot
2023-02-02 23:02     ` kernel test robot
2023-02-12  7:56   ` kernel test robot
2023-02-12  7:56     ` kernel test robot
2023-02-02 11:00 ` [PATCH 23/33] virtio_net: xsk: support xsk setup Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 24/33] virtio_net: xsk: stop disable tx napi Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 17:25   ` Michael S. Tsirkin
2023-02-02 17:25     ` Michael S. Tsirkin
2023-02-03  3:24     ` Xuan Zhuo
2023-02-03  3:24       ` Xuan Zhuo
2023-02-03  8:33       ` Michael S. Tsirkin
2023-02-03  8:33         ` Michael S. Tsirkin
2023-02-03  8:49         ` Xuan Zhuo
2023-02-03  8:49           ` Xuan Zhuo
2023-02-03  9:29           ` Michael S. Tsirkin
2023-02-03  9:29             ` Michael S. Tsirkin
2023-02-02 11:00 ` [PATCH 25/33] virtio_net: xsk: __free_old_xmit distinguishes xsk buffer Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 26/33] virtio_net: virtnet_sq_free_unused_buf() check " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 27/33] virtio_net: virtnet_rq_free_unused_buf() " Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 28/33] net: introduce napi_tx_raise() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 29/33] virtio_net: xsk: tx: support tx Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:39   ` Maciej Fijalkowski
2023-02-03  8:55     ` Xuan Zhuo
2023-02-03  8:55       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 30/33] virtio_net: xsk: tx: support wakeup Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 31/33] virtio_net: xsk: tx: auto wakeup when free old xmit Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 32/33] virtio_net: xsk: rx: introduce add_recvbuf_xsk() Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-03  8:43   ` Maciej Fijalkowski
2023-02-03  8:56     ` Xuan Zhuo
2023-02-03  8:56       ` Xuan Zhuo
2023-02-02 11:00 ` [PATCH 33/33] virtio_net: xsk: rx: introduce receive_xsk() to recv xsk buffer Xuan Zhuo
2023-02-02 11:00   ` Xuan Zhuo
2023-02-02 11:08 ` [PATCH 00/33] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-02-02 11:08 ` Michael S. Tsirkin
2023-02-02 11:08   ` Michael S. Tsirkin
2023-02-02 11:11   ` Xuan Zhuo
2023-02-02 11:44   ` Xuan Zhuo
2023-02-02 11:44     ` Xuan Zhuo
2023-02-03  9:08     ` Michael S. Tsirkin
2023-02-03  9:08       ` Michael S. Tsirkin
2023-02-03  9:09       ` Xuan Zhuo
2023-02-03  9:09         ` Xuan Zhuo
2023-02-02 14:41 ` Paolo Abeni
2023-02-02 14:41   ` Paolo Abeni
2023-02-03  3:33   ` Xuan Zhuo
2023-02-03  3:33     ` Xuan Zhuo
2023-02-03  8:37     ` Michael S. Tsirkin
2023-02-03  8:37       ` Michael S. Tsirkin
2023-02-03  8:46       ` Maciej Fijalkowski
2023-02-03  9:09         ` Michael S. Tsirkin
2023-02-03  9:09           ` Michael S. Tsirkin
2023-02-03  9:17     ` Michael S. Tsirkin
2023-02-03  9:17       ` Michael S. Tsirkin
2023-02-06  2:41       ` Xuan Zhuo
2023-02-06  2:41         ` Xuan Zhuo
2023-02-13 12:14         ` Michael S. Tsirkin
2023-02-13 12:14           ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1675415082.88957-7-xuanzhuo@linux.alibaba.com \
    --to=xuanzhuo@linux.alibaba.com \
    --cc=ast@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=imagedong@tencent.com \
    --cc=jasowang@redhat.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kuba@kernel.org \
    --cc=kuniyu@amazon.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=magnus.karlsson@intel.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=petrm@nvidia.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.