All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>, netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Björn Töpel" <bjorn@kernel.org>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Andrii Nakryiko" <andrii@kernel.org>,
	"Martin KaFai Lau" <kafai@fb.com>,
	"Song Liu" <songliubraving@fb.com>, "Yonghong Song" <yhs@fb.com>,
	"KP Singh" <kpsingh@kernel.org>,
	virtualization@lists.linux-foundation.org, bpf@vger.kernel.org,
	"dust . li" <dust.li@linux.alibaba.com>
Subject: Re: [PATCH net-next v5 11/15] virtio-net: move to virtio_net.h
Date: Wed, 16 Jun 2021 15:35:06 +0800	[thread overview]
Message-ID: <82588c26-465e-2caf-8f35-10b529faab36@redhat.com> (raw)
In-Reply-To: <20210610082209.91487-12-xuanzhuo@linux.alibaba.com>


在 2021/6/10 下午4:22, Xuan Zhuo 写道:
> Move some structure definitions and inline functions into the
> virtio_net.h file.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>


Acked-by: Jason Wang <jasowang@redhat.com>


> ---
>   drivers/net/virtio/virtio_net.c | 225 +------------------------------
>   drivers/net/virtio/virtio_net.h | 230 ++++++++++++++++++++++++++++++++
>   2 files changed, 232 insertions(+), 223 deletions(-)
>   create mode 100644 drivers/net/virtio/virtio_net.h
>
> diff --git a/drivers/net/virtio/virtio_net.c b/drivers/net/virtio/virtio_net.c
> index 953739860563..395ec1f18331 100644
> --- a/drivers/net/virtio/virtio_net.c
> +++ b/drivers/net/virtio/virtio_net.c
> @@ -4,24 +4,8 @@
>    * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
>    */
>   //#define DEBUG
> -#include <linux/netdevice.h>
> -#include <linux/etherdevice.h>
> -#include <linux/ethtool.h>
> -#include <linux/module.h>
> -#include <linux/virtio.h>
> -#include <linux/virtio_net.h>
> -#include <linux/bpf.h>
> -#include <linux/bpf_trace.h>
> -#include <linux/scatterlist.h>
> -#include <linux/if_vlan.h>
> -#include <linux/slab.h>
> -#include <linux/cpu.h>
> -#include <linux/average.h>
> -#include <linux/filter.h>
> -#include <linux/kernel.h>
> -#include <net/route.h>
> -#include <net/xdp.h>
> -#include <net/net_failover.h>
> +
> +#include "virtio_net.h"
>   
>   static int napi_weight = NAPI_POLL_WEIGHT;
>   module_param(napi_weight, int, 0444);
> @@ -44,15 +28,6 @@ module_param(napi_tx, bool, 0644);
>   #define VIRTIO_XDP_TX		BIT(0)
>   #define VIRTIO_XDP_REDIR	BIT(1)
>   
> -#define VIRTIO_XDP_FLAG	BIT(0)
> -
> -/* RX packet size EWMA. The average packet size is used to determine the packet
> - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> - * term, transient changes in packet size.
> - */
> -DECLARE_EWMA(pkt_len, 0, 64)
> -
>   #define VIRTNET_DRIVER_VERSION "1.0.0"
>   
>   static const unsigned long guest_offloads[] = {
> @@ -68,35 +43,6 @@ static const unsigned long guest_offloads[] = {
>   				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
>   				(1ULL << VIRTIO_NET_F_GUEST_UFO))
>   
> -struct virtnet_stat_desc {
> -	char desc[ETH_GSTRING_LEN];
> -	size_t offset;
> -};
> -
> -struct virtnet_sq_stats {
> -	struct u64_stats_sync syncp;
> -	u64 packets;
> -	u64 bytes;
> -	u64 xdp_tx;
> -	u64 xdp_tx_drops;
> -	u64 kicks;
> -};
> -
> -struct virtnet_rq_stats {
> -	struct u64_stats_sync syncp;
> -	u64 packets;
> -	u64 bytes;
> -	u64 drops;
> -	u64 xdp_packets;
> -	u64 xdp_tx;
> -	u64 xdp_redirects;
> -	u64 xdp_drops;
> -	u64 kicks;
> -};
> -
> -#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> -#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> -
>   static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
>   	{ "packets",		VIRTNET_SQ_STAT(packets) },
>   	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
> @@ -119,54 +65,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
>   #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
>   #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
>   
> -/* Internal representation of a send virtqueue */
> -struct send_queue {
> -	/* Virtqueue associated with this send _queue */
> -	struct virtqueue *vq;
> -
> -	/* TX: fragments + linear part + virtio header */
> -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -	/* Name of the send queue: output.$index */
> -	char name[40];
> -
> -	struct virtnet_sq_stats stats;
> -
> -	struct napi_struct napi;
> -};
> -
> -/* Internal representation of a receive virtqueue */
> -struct receive_queue {
> -	/* Virtqueue associated with this receive_queue */
> -	struct virtqueue *vq;
> -
> -	struct napi_struct napi;
> -
> -	struct bpf_prog __rcu *xdp_prog;
> -
> -	struct virtnet_rq_stats stats;
> -
> -	/* Chain pages by the private ptr. */
> -	struct page *pages;
> -
> -	/* Average packet length for mergeable receive buffers. */
> -	struct ewma_pkt_len mrg_avg_pkt_len;
> -
> -	/* Page frag for packet buffer allocation. */
> -	struct page_frag alloc_frag;
> -
> -	/* RX: fragments + linear part + virtio header */
> -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -	/* Min single buffer size for mergeable buffers case. */
> -	unsigned int min_buf_len;
> -
> -	/* Name of this receive queue: input.$index */
> -	char name[40];
> -
> -	struct xdp_rxq_info xdp_rxq;
> -};
> -
>   /* Control VQ buffers: protected by the rtnl lock */
>   struct control_buf {
>   	struct virtio_net_ctrl_hdr hdr;
> @@ -178,67 +76,6 @@ struct control_buf {
>   	__virtio64 offloads;
>   };
>   
> -struct virtnet_info {
> -	struct virtio_device *vdev;
> -	struct virtqueue *cvq;
> -	struct net_device *dev;
> -	struct send_queue *sq;
> -	struct receive_queue *rq;
> -	unsigned int status;
> -
> -	/* Max # of queue pairs supported by the device */
> -	u16 max_queue_pairs;
> -
> -	/* # of queue pairs currently used by the driver */
> -	u16 curr_queue_pairs;
> -
> -	/* # of XDP queue pairs currently used by the driver */
> -	u16 xdp_queue_pairs;
> -
> -	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> -	bool xdp_enabled;
> -
> -	/* I like... big packets and I cannot lie! */
> -	bool big_packets;
> -
> -	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> -	bool mergeable_rx_bufs;
> -
> -	/* Has control virtqueue */
> -	bool has_cvq;
> -
> -	/* Host can handle any s/g split between our header and packet data */
> -	bool any_header_sg;
> -
> -	/* Packet virtio header size */
> -	u8 hdr_len;
> -
> -	/* Work struct for refilling if we run low on memory. */
> -	struct delayed_work refill;
> -
> -	/* Work struct for config space updates */
> -	struct work_struct config_work;
> -
> -	/* Does the affinity hint is set for virtqueues? */
> -	bool affinity_hint_set;
> -
> -	/* CPU hotplug instances for online & dead */
> -	struct hlist_node node;
> -	struct hlist_node node_dead;
> -
> -	struct control_buf *ctrl;
> -
> -	/* Ethtool settings */
> -	u8 duplex;
> -	u32 speed;
> -
> -	unsigned long guest_offloads;
> -	unsigned long guest_offloads_capable;
> -
> -	/* failover when STANDBY feature enabled */
> -	struct failover *failover;
> -};
> -
>   struct padded_vnet_hdr {
>   	struct virtio_net_hdr_mrg_rxbuf hdr;
>   	/*
> @@ -249,21 +86,6 @@ struct padded_vnet_hdr {
>   	char padding[4];
>   };
>   
> -static bool is_xdp_frame(void *ptr)
> -{
> -	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> -}
> -
> -static void *xdp_to_ptr(struct xdp_frame *ptr)
> -{
> -	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> -}
> -
> -static struct xdp_frame *ptr_to_xdp(void *ptr)
> -{
> -	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> -}
> -
>   static char *virtnet_alloc_frag(struct receive_queue *rq, unsigned int len,
>   				int gfp)
>   {
> @@ -280,30 +102,6 @@ static char *virtnet_alloc_frag(struct receive_queue *rq, unsigned int len,
>   	return buf;
>   }
>   
> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> -			    struct virtnet_sq_stats *stats)
> -{
> -	unsigned int len;
> -	void *ptr;
> -
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (!is_xdp_frame(ptr)) {
> -			struct sk_buff *skb = ptr;
> -
> -			pr_debug("Sent skb %p\n", skb);
> -
> -			stats->bytes += skb->len;
> -			napi_consume_skb(skb, in_napi);
> -		} else {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			stats->bytes += frame->len;
> -			xdp_return_frame(frame);
> -		}
> -		stats->packets++;
> -	}
> -}
> -
>   /* Converting between virtqueue no. and kernel tx/rx queue no.
>    * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
>    */
> @@ -359,15 +157,6 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
>   	return p;
>   }
>   
> -static void virtqueue_napi_schedule(struct napi_struct *napi,
> -				    struct virtqueue *vq)
> -{
> -	if (napi_schedule_prep(napi)) {
> -		virtqueue_disable_cb(vq);
> -		__napi_schedule(napi);
> -	}
> -}
> -
>   static void virtqueue_napi_complete(struct napi_struct *napi,
>   				    struct virtqueue *vq, int processed)
>   {
> @@ -1537,16 +1326,6 @@ static void free_old_xmit(struct send_queue *sq, bool in_napi)
>   	u64_stats_update_end(&sq->stats.syncp);
>   }
>   
> -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> -{
> -	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> -		return false;
> -	else if (q < vi->curr_queue_pairs)
> -		return true;
> -	else
> -		return false;
> -}
> -
>   static void virtnet_poll_cleantx(struct receive_queue *rq)
>   {
>   	struct virtnet_info *vi = rq->vq->vdev->priv;
> diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
> new file mode 100644
> index 000000000000..931cc81f92fb
> --- /dev/null
> +++ b/drivers/net/virtio/virtio_net.h
> @@ -0,0 +1,230 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +
> +#ifndef __VIRTIO_NET_H__
> +#define __VIRTIO_NET_H__
> +#include <linux/netdevice.h>
> +#include <linux/etherdevice.h>
> +#include <linux/ethtool.h>
> +#include <linux/module.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_net.h>
> +#include <linux/bpf.h>
> +#include <linux/bpf_trace.h>
> +#include <linux/scatterlist.h>
> +#include <linux/if_vlan.h>
> +#include <linux/slab.h>
> +#include <linux/cpu.h>
> +#include <linux/average.h>
> +#include <linux/filter.h>
> +#include <linux/kernel.h>
> +#include <net/route.h>
> +#include <net/xdp.h>
> +#include <net/net_failover.h>
> +#include <net/xdp_sock_drv.h>
> +
> +#define VIRTIO_XDP_FLAG	BIT(0)
> +
> +struct virtnet_info {
> +	struct virtio_device *vdev;
> +	struct virtqueue *cvq;
> +	struct net_device *dev;
> +	struct send_queue *sq;
> +	struct receive_queue *rq;
> +	unsigned int status;
> +
> +	/* Max # of queue pairs supported by the device */
> +	u16 max_queue_pairs;
> +
> +	/* # of queue pairs currently used by the driver */
> +	u16 curr_queue_pairs;
> +
> +	/* # of XDP queue pairs currently used by the driver */
> +	u16 xdp_queue_pairs;
> +
> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> +	bool xdp_enabled;
> +
> +	/* I like... big packets and I cannot lie! */
> +	bool big_packets;
> +
> +	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> +	bool mergeable_rx_bufs;
> +
> +	/* Has control virtqueue */
> +	bool has_cvq;
> +
> +	/* Host can handle any s/g split between our header and packet data */
> +	bool any_header_sg;
> +
> +	/* Packet virtio header size */
> +	u8 hdr_len;
> +
> +	/* Work struct for refilling if we run low on memory. */
> +	struct delayed_work refill;
> +
> +	/* Work struct for config space updates */
> +	struct work_struct config_work;
> +
> +	/* Does the affinity hint is set for virtqueues? */
> +	bool affinity_hint_set;
> +
> +	/* CPU hotplug instances for online & dead */
> +	struct hlist_node node;
> +	struct hlist_node node_dead;
> +
> +	struct control_buf *ctrl;
> +
> +	/* Ethtool settings */
> +	u8 duplex;
> +	u32 speed;
> +
> +	unsigned long guest_offloads;
> +	unsigned long guest_offloads_capable;
> +
> +	/* failover when STANDBY feature enabled */
> +	struct failover *failover;
> +};
> +
> +/* RX packet size EWMA. The average packet size is used to determine the packet
> + * buffer size when refilling RX rings. As the entire RX ring may be refilled
> + * at once, the weight is chosen so that the EWMA will be insensitive to short-
> + * term, transient changes in packet size.
> + */
> +DECLARE_EWMA(pkt_len, 0, 64)
> +
> +struct virtnet_stat_desc {
> +	char desc[ETH_GSTRING_LEN];
> +	size_t offset;
> +};
> +
> +struct virtnet_sq_stats {
> +	struct u64_stats_sync syncp;
> +	u64 packets;
> +	u64 bytes;
> +	u64 xdp_tx;
> +	u64 xdp_tx_drops;
> +	u64 kicks;
> +};
> +
> +struct virtnet_rq_stats {
> +	struct u64_stats_sync syncp;
> +	u64 packets;
> +	u64 bytes;
> +	u64 drops;
> +	u64 xdp_packets;
> +	u64 xdp_tx;
> +	u64 xdp_redirects;
> +	u64 xdp_drops;
> +	u64 kicks;
> +};
> +
> +#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> +#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> +
> +/* Internal representation of a send virtqueue */
> +struct send_queue {
> +	/* Virtqueue associated with this send _queue */
> +	struct virtqueue *vq;
> +
> +	/* TX: fragments + linear part + virtio header */
> +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> +
> +	/* Name of the send queue: output.$index */
> +	char name[40];
> +
> +	struct virtnet_sq_stats stats;
> +
> +	struct napi_struct napi;
> +};
> +
> +/* Internal representation of a receive virtqueue */
> +struct receive_queue {
> +	/* Virtqueue associated with this receive_queue */
> +	struct virtqueue *vq;
> +
> +	struct napi_struct napi;
> +
> +	struct bpf_prog __rcu *xdp_prog;
> +
> +	struct virtnet_rq_stats stats;
> +
> +	/* Chain pages by the private ptr. */
> +	struct page *pages;
> +
> +	/* Average packet length for mergeable receive buffers. */
> +	struct ewma_pkt_len mrg_avg_pkt_len;
> +
> +	/* Page frag for packet buffer allocation. */
> +	struct page_frag alloc_frag;
> +
> +	/* RX: fragments + linear part + virtio header */
> +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> +
> +	/* Min single buffer size for mergeable buffers case. */
> +	unsigned int min_buf_len;
> +
> +	/* Name of this receive queue: input.$index */
> +	char name[40];
> +
> +	struct xdp_rxq_info xdp_rxq;
> +};
> +
> +static inline bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> +{
> +	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> +		return false;
> +	else if (q < vi->curr_queue_pairs)
> +		return true;
> +	else
> +		return false;
> +}
> +
> +static inline void virtqueue_napi_schedule(struct napi_struct *napi,
> +					   struct virtqueue *vq)
> +{
> +	if (napi_schedule_prep(napi)) {
> +		virtqueue_disable_cb(vq);
> +		__napi_schedule(napi);
> +	}
> +}
> +
> +static inline bool is_xdp_frame(void *ptr)
> +{
> +	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> +}
> +
> +static inline void *xdp_to_ptr(struct xdp_frame *ptr)
> +{
> +	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> +}
> +
> +static inline struct xdp_frame *ptr_to_xdp(void *ptr)
> +{
> +	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> +}
> +
> +static inline void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +				   struct virtnet_sq_stats *stats)
> +{
> +	unsigned int len;
> +	void *ptr;
> +
> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +		if (!is_xdp_frame(ptr)) {
> +			struct sk_buff *skb = ptr;
> +
> +			pr_debug("Sent skb %p\n", skb);
> +
> +			stats->bytes += skb->len;
> +			napi_consume_skb(skb, in_napi);
> +		} else {
> +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +			stats->bytes += frame->len;
> +			xdp_return_frame(frame);
> +		}
> +		stats->packets++;
> +	}
> +}
> +
> +#endif


WARNING: multiple messages have this Message-ID (diff)
From: Jason Wang <jasowang@redhat.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>, netdev@vger.kernel.org
Cc: "Song Liu" <songliubraving@fb.com>,
	"Martin KaFai Lau" <kafai@fb.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Yonghong Song" <yhs@fb.com>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Andrii Nakryiko" <andrii@kernel.org>,
	"Björn Töpel" <bjorn@kernel.org>,
	"dust . li" <dust.li@linux.alibaba.com>,
	"Jonathan Lemon" <jonathan.lemon@gmail.com>,
	"KP Singh" <kpsingh@kernel.org>,
	"Jakub Kicinski" <kuba@kernel.org>,
	bpf@vger.kernel.org, virtualization@lists.linux-foundation.org,
	"David S. Miller" <davem@davemloft.net>,
	"Magnus Karlsson" <magnus.karlsson@intel.com>
Subject: Re: [PATCH net-next v5 11/15] virtio-net: move to virtio_net.h
Date: Wed, 16 Jun 2021 15:35:06 +0800	[thread overview]
Message-ID: <82588c26-465e-2caf-8f35-10b529faab36@redhat.com> (raw)
In-Reply-To: <20210610082209.91487-12-xuanzhuo@linux.alibaba.com>


在 2021/6/10 下午4:22, Xuan Zhuo 写道:
> Move some structure definitions and inline functions into the
> virtio_net.h file.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>


Acked-by: Jason Wang <jasowang@redhat.com>


> ---
>   drivers/net/virtio/virtio_net.c | 225 +------------------------------
>   drivers/net/virtio/virtio_net.h | 230 ++++++++++++++++++++++++++++++++
>   2 files changed, 232 insertions(+), 223 deletions(-)
>   create mode 100644 drivers/net/virtio/virtio_net.h
>
> diff --git a/drivers/net/virtio/virtio_net.c b/drivers/net/virtio/virtio_net.c
> index 953739860563..395ec1f18331 100644
> --- a/drivers/net/virtio/virtio_net.c
> +++ b/drivers/net/virtio/virtio_net.c
> @@ -4,24 +4,8 @@
>    * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
>    */
>   //#define DEBUG
> -#include <linux/netdevice.h>
> -#include <linux/etherdevice.h>
> -#include <linux/ethtool.h>
> -#include <linux/module.h>
> -#include <linux/virtio.h>
> -#include <linux/virtio_net.h>
> -#include <linux/bpf.h>
> -#include <linux/bpf_trace.h>
> -#include <linux/scatterlist.h>
> -#include <linux/if_vlan.h>
> -#include <linux/slab.h>
> -#include <linux/cpu.h>
> -#include <linux/average.h>
> -#include <linux/filter.h>
> -#include <linux/kernel.h>
> -#include <net/route.h>
> -#include <net/xdp.h>
> -#include <net/net_failover.h>
> +
> +#include "virtio_net.h"
>   
>   static int napi_weight = NAPI_POLL_WEIGHT;
>   module_param(napi_weight, int, 0444);
> @@ -44,15 +28,6 @@ module_param(napi_tx, bool, 0644);
>   #define VIRTIO_XDP_TX		BIT(0)
>   #define VIRTIO_XDP_REDIR	BIT(1)
>   
> -#define VIRTIO_XDP_FLAG	BIT(0)
> -
> -/* RX packet size EWMA. The average packet size is used to determine the packet
> - * buffer size when refilling RX rings. As the entire RX ring may be refilled
> - * at once, the weight is chosen so that the EWMA will be insensitive to short-
> - * term, transient changes in packet size.
> - */
> -DECLARE_EWMA(pkt_len, 0, 64)
> -
>   #define VIRTNET_DRIVER_VERSION "1.0.0"
>   
>   static const unsigned long guest_offloads[] = {
> @@ -68,35 +43,6 @@ static const unsigned long guest_offloads[] = {
>   				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
>   				(1ULL << VIRTIO_NET_F_GUEST_UFO))
>   
> -struct virtnet_stat_desc {
> -	char desc[ETH_GSTRING_LEN];
> -	size_t offset;
> -};
> -
> -struct virtnet_sq_stats {
> -	struct u64_stats_sync syncp;
> -	u64 packets;
> -	u64 bytes;
> -	u64 xdp_tx;
> -	u64 xdp_tx_drops;
> -	u64 kicks;
> -};
> -
> -struct virtnet_rq_stats {
> -	struct u64_stats_sync syncp;
> -	u64 packets;
> -	u64 bytes;
> -	u64 drops;
> -	u64 xdp_packets;
> -	u64 xdp_tx;
> -	u64 xdp_redirects;
> -	u64 xdp_drops;
> -	u64 kicks;
> -};
> -
> -#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> -#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> -
>   static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
>   	{ "packets",		VIRTNET_SQ_STAT(packets) },
>   	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
> @@ -119,54 +65,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
>   #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
>   #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
>   
> -/* Internal representation of a send virtqueue */
> -struct send_queue {
> -	/* Virtqueue associated with this send _queue */
> -	struct virtqueue *vq;
> -
> -	/* TX: fragments + linear part + virtio header */
> -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -	/* Name of the send queue: output.$index */
> -	char name[40];
> -
> -	struct virtnet_sq_stats stats;
> -
> -	struct napi_struct napi;
> -};
> -
> -/* Internal representation of a receive virtqueue */
> -struct receive_queue {
> -	/* Virtqueue associated with this receive_queue */
> -	struct virtqueue *vq;
> -
> -	struct napi_struct napi;
> -
> -	struct bpf_prog __rcu *xdp_prog;
> -
> -	struct virtnet_rq_stats stats;
> -
> -	/* Chain pages by the private ptr. */
> -	struct page *pages;
> -
> -	/* Average packet length for mergeable receive buffers. */
> -	struct ewma_pkt_len mrg_avg_pkt_len;
> -
> -	/* Page frag for packet buffer allocation. */
> -	struct page_frag alloc_frag;
> -
> -	/* RX: fragments + linear part + virtio header */
> -	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> -
> -	/* Min single buffer size for mergeable buffers case. */
> -	unsigned int min_buf_len;
> -
> -	/* Name of this receive queue: input.$index */
> -	char name[40];
> -
> -	struct xdp_rxq_info xdp_rxq;
> -};
> -
>   /* Control VQ buffers: protected by the rtnl lock */
>   struct control_buf {
>   	struct virtio_net_ctrl_hdr hdr;
> @@ -178,67 +76,6 @@ struct control_buf {
>   	__virtio64 offloads;
>   };
>   
> -struct virtnet_info {
> -	struct virtio_device *vdev;
> -	struct virtqueue *cvq;
> -	struct net_device *dev;
> -	struct send_queue *sq;
> -	struct receive_queue *rq;
> -	unsigned int status;
> -
> -	/* Max # of queue pairs supported by the device */
> -	u16 max_queue_pairs;
> -
> -	/* # of queue pairs currently used by the driver */
> -	u16 curr_queue_pairs;
> -
> -	/* # of XDP queue pairs currently used by the driver */
> -	u16 xdp_queue_pairs;
> -
> -	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> -	bool xdp_enabled;
> -
> -	/* I like... big packets and I cannot lie! */
> -	bool big_packets;
> -
> -	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> -	bool mergeable_rx_bufs;
> -
> -	/* Has control virtqueue */
> -	bool has_cvq;
> -
> -	/* Host can handle any s/g split between our header and packet data */
> -	bool any_header_sg;
> -
> -	/* Packet virtio header size */
> -	u8 hdr_len;
> -
> -	/* Work struct for refilling if we run low on memory. */
> -	struct delayed_work refill;
> -
> -	/* Work struct for config space updates */
> -	struct work_struct config_work;
> -
> -	/* Does the affinity hint is set for virtqueues? */
> -	bool affinity_hint_set;
> -
> -	/* CPU hotplug instances for online & dead */
> -	struct hlist_node node;
> -	struct hlist_node node_dead;
> -
> -	struct control_buf *ctrl;
> -
> -	/* Ethtool settings */
> -	u8 duplex;
> -	u32 speed;
> -
> -	unsigned long guest_offloads;
> -	unsigned long guest_offloads_capable;
> -
> -	/* failover when STANDBY feature enabled */
> -	struct failover *failover;
> -};
> -
>   struct padded_vnet_hdr {
>   	struct virtio_net_hdr_mrg_rxbuf hdr;
>   	/*
> @@ -249,21 +86,6 @@ struct padded_vnet_hdr {
>   	char padding[4];
>   };
>   
> -static bool is_xdp_frame(void *ptr)
> -{
> -	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> -}
> -
> -static void *xdp_to_ptr(struct xdp_frame *ptr)
> -{
> -	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> -}
> -
> -static struct xdp_frame *ptr_to_xdp(void *ptr)
> -{
> -	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> -}
> -
>   static char *virtnet_alloc_frag(struct receive_queue *rq, unsigned int len,
>   				int gfp)
>   {
> @@ -280,30 +102,6 @@ static char *virtnet_alloc_frag(struct receive_queue *rq, unsigned int len,
>   	return buf;
>   }
>   
> -static void __free_old_xmit(struct send_queue *sq, bool in_napi,
> -			    struct virtnet_sq_stats *stats)
> -{
> -	unsigned int len;
> -	void *ptr;
> -
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (!is_xdp_frame(ptr)) {
> -			struct sk_buff *skb = ptr;
> -
> -			pr_debug("Sent skb %p\n", skb);
> -
> -			stats->bytes += skb->len;
> -			napi_consume_skb(skb, in_napi);
> -		} else {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			stats->bytes += frame->len;
> -			xdp_return_frame(frame);
> -		}
> -		stats->packets++;
> -	}
> -}
> -
>   /* Converting between virtqueue no. and kernel tx/rx queue no.
>    * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
>    */
> @@ -359,15 +157,6 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
>   	return p;
>   }
>   
> -static void virtqueue_napi_schedule(struct napi_struct *napi,
> -				    struct virtqueue *vq)
> -{
> -	if (napi_schedule_prep(napi)) {
> -		virtqueue_disable_cb(vq);
> -		__napi_schedule(napi);
> -	}
> -}
> -
>   static void virtqueue_napi_complete(struct napi_struct *napi,
>   				    struct virtqueue *vq, int processed)
>   {
> @@ -1537,16 +1326,6 @@ static void free_old_xmit(struct send_queue *sq, bool in_napi)
>   	u64_stats_update_end(&sq->stats.syncp);
>   }
>   
> -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> -{
> -	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> -		return false;
> -	else if (q < vi->curr_queue_pairs)
> -		return true;
> -	else
> -		return false;
> -}
> -
>   static void virtnet_poll_cleantx(struct receive_queue *rq)
>   {
>   	struct virtnet_info *vi = rq->vq->vdev->priv;
> diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
> new file mode 100644
> index 000000000000..931cc81f92fb
> --- /dev/null
> +++ b/drivers/net/virtio/virtio_net.h
> @@ -0,0 +1,230 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +
> +#ifndef __VIRTIO_NET_H__
> +#define __VIRTIO_NET_H__
> +#include <linux/netdevice.h>
> +#include <linux/etherdevice.h>
> +#include <linux/ethtool.h>
> +#include <linux/module.h>
> +#include <linux/virtio.h>
> +#include <linux/virtio_net.h>
> +#include <linux/bpf.h>
> +#include <linux/bpf_trace.h>
> +#include <linux/scatterlist.h>
> +#include <linux/if_vlan.h>
> +#include <linux/slab.h>
> +#include <linux/cpu.h>
> +#include <linux/average.h>
> +#include <linux/filter.h>
> +#include <linux/kernel.h>
> +#include <net/route.h>
> +#include <net/xdp.h>
> +#include <net/net_failover.h>
> +#include <net/xdp_sock_drv.h>
> +
> +#define VIRTIO_XDP_FLAG	BIT(0)
> +
> +struct virtnet_info {
> +	struct virtio_device *vdev;
> +	struct virtqueue *cvq;
> +	struct net_device *dev;
> +	struct send_queue *sq;
> +	struct receive_queue *rq;
> +	unsigned int status;
> +
> +	/* Max # of queue pairs supported by the device */
> +	u16 max_queue_pairs;
> +
> +	/* # of queue pairs currently used by the driver */
> +	u16 curr_queue_pairs;
> +
> +	/* # of XDP queue pairs currently used by the driver */
> +	u16 xdp_queue_pairs;
> +
> +	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
> +	bool xdp_enabled;
> +
> +	/* I like... big packets and I cannot lie! */
> +	bool big_packets;
> +
> +	/* Host will merge rx buffers for big packets (shake it! shake it!) */
> +	bool mergeable_rx_bufs;
> +
> +	/* Has control virtqueue */
> +	bool has_cvq;
> +
> +	/* Host can handle any s/g split between our header and packet data */
> +	bool any_header_sg;
> +
> +	/* Packet virtio header size */
> +	u8 hdr_len;
> +
> +	/* Work struct for refilling if we run low on memory. */
> +	struct delayed_work refill;
> +
> +	/* Work struct for config space updates */
> +	struct work_struct config_work;
> +
> +	/* Does the affinity hint is set for virtqueues? */
> +	bool affinity_hint_set;
> +
> +	/* CPU hotplug instances for online & dead */
> +	struct hlist_node node;
> +	struct hlist_node node_dead;
> +
> +	struct control_buf *ctrl;
> +
> +	/* Ethtool settings */
> +	u8 duplex;
> +	u32 speed;
> +
> +	unsigned long guest_offloads;
> +	unsigned long guest_offloads_capable;
> +
> +	/* failover when STANDBY feature enabled */
> +	struct failover *failover;
> +};
> +
> +/* RX packet size EWMA. The average packet size is used to determine the packet
> + * buffer size when refilling RX rings. As the entire RX ring may be refilled
> + * at once, the weight is chosen so that the EWMA will be insensitive to short-
> + * term, transient changes in packet size.
> + */
> +DECLARE_EWMA(pkt_len, 0, 64)
> +
> +struct virtnet_stat_desc {
> +	char desc[ETH_GSTRING_LEN];
> +	size_t offset;
> +};
> +
> +struct virtnet_sq_stats {
> +	struct u64_stats_sync syncp;
> +	u64 packets;
> +	u64 bytes;
> +	u64 xdp_tx;
> +	u64 xdp_tx_drops;
> +	u64 kicks;
> +};
> +
> +struct virtnet_rq_stats {
> +	struct u64_stats_sync syncp;
> +	u64 packets;
> +	u64 bytes;
> +	u64 drops;
> +	u64 xdp_packets;
> +	u64 xdp_tx;
> +	u64 xdp_redirects;
> +	u64 xdp_drops;
> +	u64 kicks;
> +};
> +
> +#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> +#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
> +
> +/* Internal representation of a send virtqueue */
> +struct send_queue {
> +	/* Virtqueue associated with this send _queue */
> +	struct virtqueue *vq;
> +
> +	/* TX: fragments + linear part + virtio header */
> +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> +
> +	/* Name of the send queue: output.$index */
> +	char name[40];
> +
> +	struct virtnet_sq_stats stats;
> +
> +	struct napi_struct napi;
> +};
> +
> +/* Internal representation of a receive virtqueue */
> +struct receive_queue {
> +	/* Virtqueue associated with this receive_queue */
> +	struct virtqueue *vq;
> +
> +	struct napi_struct napi;
> +
> +	struct bpf_prog __rcu *xdp_prog;
> +
> +	struct virtnet_rq_stats stats;
> +
> +	/* Chain pages by the private ptr. */
> +	struct page *pages;
> +
> +	/* Average packet length for mergeable receive buffers. */
> +	struct ewma_pkt_len mrg_avg_pkt_len;
> +
> +	/* Page frag for packet buffer allocation. */
> +	struct page_frag alloc_frag;
> +
> +	/* RX: fragments + linear part + virtio header */
> +	struct scatterlist sg[MAX_SKB_FRAGS + 2];
> +
> +	/* Min single buffer size for mergeable buffers case. */
> +	unsigned int min_buf_len;
> +
> +	/* Name of this receive queue: input.$index */
> +	char name[40];
> +
> +	struct xdp_rxq_info xdp_rxq;
> +};
> +
> +static inline bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> +{
> +	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> +		return false;
> +	else if (q < vi->curr_queue_pairs)
> +		return true;
> +	else
> +		return false;
> +}
> +
> +static inline void virtqueue_napi_schedule(struct napi_struct *napi,
> +					   struct virtqueue *vq)
> +{
> +	if (napi_schedule_prep(napi)) {
> +		virtqueue_disable_cb(vq);
> +		__napi_schedule(napi);
> +	}
> +}
> +
> +static inline bool is_xdp_frame(void *ptr)
> +{
> +	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> +}
> +
> +static inline void *xdp_to_ptr(struct xdp_frame *ptr)
> +{
> +	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
> +}
> +
> +static inline struct xdp_frame *ptr_to_xdp(void *ptr)
> +{
> +	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
> +}
> +
> +static inline void __free_old_xmit(struct send_queue *sq, bool in_napi,
> +				   struct virtnet_sq_stats *stats)
> +{
> +	unsigned int len;
> +	void *ptr;
> +
> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +		if (!is_xdp_frame(ptr)) {
> +			struct sk_buff *skb = ptr;
> +
> +			pr_debug("Sent skb %p\n", skb);
> +
> +			stats->bytes += skb->len;
> +			napi_consume_skb(skb, in_napi);
> +		} else {
> +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +			stats->bytes += frame->len;
> +			xdp_return_frame(frame);
> +		}
> +		stats->packets++;
> +	}
> +}
> +
> +#endif

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  reply	other threads:[~2021-06-16  7:35 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-10  8:21 [PATCH net-next v5 00/15] virtio-net: support xdp socket zero copy Xuan Zhuo
2021-06-10  8:21 ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 01/15] netdevice: priv_flags extend to 64bit Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 02/15] netdevice: add priv_flags IFF_NOT_USE_DMA_ADDR Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 03/15] virtio-net: " Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-16  9:27   ` Jason Wang
2021-06-16  9:27     ` Jason Wang
2021-06-16 10:27     ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 04/15] xsk: XDP_SETUP_XSK_POOL support option IFF_NOT_USE_DMA_ADDR Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-10  8:21 ` [PATCH net-next v5 05/15] virtio: support virtqueue_detach_unused_buf_ctx Xuan Zhuo
2021-06-10  8:21   ` Xuan Zhuo
2021-06-17  2:48   ` kernel test robot
2021-06-17  2:48     ` kernel test robot
2021-06-10  8:22 ` [PATCH net-next v5 06/15] virtio-net: unify the code for recycling the xmit ptr Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  2:42   ` Jason Wang
2021-06-16  2:42     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 07/15] virtio-net: standalone virtnet_aloc_frag function Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  2:45   ` Jason Wang
2021-06-16  2:45     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 08/15] virtio-net: split the receive_mergeable function Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  7:33   ` Jason Wang
2021-06-16  7:33     ` Jason Wang
2021-06-16  7:52     ` Xuan Zhuo
2021-06-10  8:22 ` [PATCH net-next v5 09/15] virtio-net: virtnet_poll_tx support budget check Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-10  8:22 ` [PATCH net-next v5 10/15] virtio-net: independent directory Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  7:34   ` Jason Wang
2021-06-16  7:34     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 11/15] virtio-net: move to virtio_net.h Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  7:35   ` Jason Wang [this message]
2021-06-16  7:35     ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 12/15] virtio-net: support AF_XDP zc tx Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-16  9:26   ` Jason Wang
2021-06-16  9:26     ` Jason Wang
2021-06-16 10:10     ` Magnus Karlsson
2021-06-16 10:19     ` Xuan Zhuo
2021-06-16 12:51       ` Jason Wang
2021-06-16 12:51         ` Jason Wang
2021-06-16 12:57         ` Xuan Zhuo
2021-06-17  2:36           ` Jason Wang
2021-06-17  2:36             ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 13/15] virtio-net: support AF_XDP zc rx Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  2:48   ` kernel test robot
2021-06-17  2:48     ` kernel test robot
2021-06-17  3:23   ` Jason Wang
2021-06-17  3:23     ` Jason Wang
2021-06-17  5:53     ` Xuan Zhuo
2021-06-17  6:03       ` Jason Wang
2021-06-17  6:03         ` Jason Wang
2021-06-17  6:37         ` Xuan Zhuo
2021-06-17  6:58           ` Jason Wang
2021-06-17  6:58             ` Jason Wang
2021-06-21  3:26   ` kernel test robot
2021-06-21  3:26     ` kernel test robot
2021-06-10  8:22 ` [PATCH net-next v5 14/15] virtio-net: xsk direct xmit inside xsk wakeup Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  3:07   ` Jason Wang
2021-06-17  3:07     ` Jason Wang
2021-06-17  5:55     ` Xuan Zhuo
2021-06-17  6:01       ` Jason Wang
2021-06-17  6:01         ` Jason Wang
2021-06-10  8:22 ` [PATCH net-next v5 15/15] virtio-net: xsk zero copy xmit kick by threshold Xuan Zhuo
2021-06-10  8:22   ` Xuan Zhuo
2021-06-17  3:08   ` Jason Wang
2021-06-17  3:08     ` Jason Wang
2021-06-17  5:56     ` Xuan Zhuo
2021-06-17  6:00       ` Jason Wang
2021-06-17  6:00         ` Jason Wang
2021-06-17  6:43         ` Xuan Zhuo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=82588c26-465e-2caf-8f35-10b529faab36@redhat.com \
    --to=jasowang@redhat.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bjorn@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=dust.li@linux.alibaba.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=kafai@fb.com \
    --cc=kpsingh@kernel.org \
    --cc=kuba@kernel.org \
    --cc=magnus.karlsson@intel.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=songliubraving@fb.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xuanzhuo@linux.alibaba.com \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.