virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v5 0/9] virtio-net: support device stats
@ 2024-03-18 11:05 Xuan Zhuo
  2024-03-18 11:05 ` [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures Xuan Zhuo
                   ` (11 more replies)
  0 siblings, 12 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

As the spec:

https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

The virtio net supports to get device stats.

Please review.

Thanks.

v5:
    1. Fix some small problems in last version
    2. Not report stats that will be reported by netlink
    3. remove "_queue" from  ethtool -S

v4:
    1. Support per-queue statistics API
    2. Fix some small problems in last version

v3:
    1. rebase net-next

v2:
    1. fix the usage of the leXX_to_cpu()
    2. add comment to the structure virtnet_stats_map

v1:
    1. fix some definitions of the marco and the struct






Xuan Zhuo (9):
  virtio_net: introduce device stats feature and structures
  virtio_net: virtnet_send_command supports command-specific-result
  virtio_net: remove "_queue" from ethtool -S
  virtio_net: support device stats
  virtio_net: stats map include driver stats
  virtio_net: add the total stats field
  virtio_net: rename stat tx_timeout to timeout
  netdev: add queue stats
  virtio-net: support queue stat

 Documentation/netlink/specs/netdev.yaml | 104 ++++
 drivers/net/virtio_net.c                | 755 +++++++++++++++++++++---
 include/net/netdev_queues.h             |  27 +
 include/uapi/linux/netdev.h             |  19 +
 include/uapi/linux/virtio_net.h         | 143 +++++
 net/core/netdev-genl.c                  |  23 +-
 tools/include/uapi/linux/netdev.h       |  19 +
 7 files changed, 1013 insertions(+), 77 deletions(-)

--
2.32.0.3.g01195cf9f


^ permalink raw reply	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-04-10  6:09   ` Jason Wang
  2024-03-18 11:05 ` [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

The virtio-net device stats spec:

https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

We introduce the relative feature and structures.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 include/uapi/linux/virtio_net.h | 143 ++++++++++++++++++++++++++++++++
 1 file changed, 143 insertions(+)

diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index cc65ef0f3c3e..ac9174717ef1 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,6 +56,7 @@
 #define VIRTIO_NET_F_MQ	22	/* Device supports Receive Flow
 					 * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
+#define VIRTIO_NET_F_DEVICE_STATS 50	/* Device can provide device-level statistics. */
 #define VIRTIO_NET_F_VQ_NOTF_COAL 52	/* Device supports virtqueue notification coalescing */
 #define VIRTIO_NET_F_NOTF_COAL	53	/* Device supports notifications coalescing */
 #define VIRTIO_NET_F_GUEST_USO4	54	/* Guest can handle USOv4 in. */
@@ -406,4 +407,146 @@ struct  virtio_net_ctrl_coal_vq {
 	struct virtio_net_ctrl_coal coal;
 };
 
+/*
+ * Device Statistics
+ */
+#define VIRTIO_NET_CTRL_STATS         8
+#define VIRTIO_NET_CTRL_STATS_QUERY   0
+#define VIRTIO_NET_CTRL_STATS_GET     1
+
+struct virtio_net_stats_capabilities {
+
+#define VIRTIO_NET_STATS_TYPE_CVQ       (1ULL << 32)
+
+#define VIRTIO_NET_STATS_TYPE_RX_BASIC  (1ULL << 0)
+#define VIRTIO_NET_STATS_TYPE_RX_CSUM   (1ULL << 1)
+#define VIRTIO_NET_STATS_TYPE_RX_GSO    (1ULL << 2)
+#define VIRTIO_NET_STATS_TYPE_RX_SPEED  (1ULL << 3)
+
+#define VIRTIO_NET_STATS_TYPE_TX_BASIC  (1ULL << 16)
+#define VIRTIO_NET_STATS_TYPE_TX_CSUM   (1ULL << 17)
+#define VIRTIO_NET_STATS_TYPE_TX_GSO    (1ULL << 18)
+#define VIRTIO_NET_STATS_TYPE_TX_SPEED  (1ULL << 19)
+
+	__le64 supported_stats_types[1];
+};
+
+struct virtio_net_ctrl_queue_stats {
+	struct {
+		__le16 vq_index;
+		__le16 reserved[3];
+		__le64 types_bitmap[1];
+	} stats[1];
+};
+
+struct virtio_net_stats_reply_hdr {
+#define VIRTIO_NET_STATS_TYPE_REPLY_CVQ       32
+
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC  0
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM   1
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO    2
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED  3
+
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC  16
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM   17
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO    18
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED  19
+	__u8 type;
+	__u8 reserved;
+	__le16 vq_index;
+	__le16 reserved1;
+	__le16 size;
+};
+
+struct virtio_net_stats_cvq {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 command_num;
+	__le64 ok_num;
+};
+
+struct virtio_net_stats_rx_basic {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_notifications;
+
+	__le64 rx_packets;
+	__le64 rx_bytes;
+
+	__le64 rx_interrupts;
+
+	__le64 rx_drops;
+	__le64 rx_drop_overruns;
+};
+
+struct virtio_net_stats_tx_basic {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_notifications;
+
+	__le64 tx_packets;
+	__le64 tx_bytes;
+
+	__le64 tx_interrupts;
+
+	__le64 tx_drops;
+	__le64 tx_drop_malformed;
+};
+
+struct virtio_net_stats_rx_csum {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_csum_valid;
+	__le64 rx_needs_csum;
+	__le64 rx_csum_none;
+	__le64 rx_csum_bad;
+};
+
+struct virtio_net_stats_tx_csum {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_csum_none;
+	__le64 tx_needs_csum;
+};
+
+struct virtio_net_stats_rx_gso {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_gso_packets;
+	__le64 rx_gso_bytes;
+	__le64 rx_gso_packets_coalesced;
+	__le64 rx_gso_bytes_coalesced;
+};
+
+struct virtio_net_stats_tx_gso {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_gso_packets;
+	__le64 tx_gso_bytes;
+	__le64 tx_gso_segments;
+	__le64 tx_gso_segments_bytes;
+	__le64 tx_gso_packets_noseg;
+	__le64 tx_gso_bytes_noseg;
+};
+
+struct virtio_net_stats_rx_speed {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	/* rx_{packets,bytes}_allowance_exceeded are too long. So rename to
+	 * short name.
+	 */
+	__le64 rx_ratelimit_packets;
+	__le64 rx_ratelimit_bytes;
+};
+
+struct virtio_net_stats_tx_speed {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	/* tx_{packets,bytes}_allowance_exceeded are too long. So rename to
+	 * short name.
+	 */
+	__le64 tx_ratelimit_packets;
+	__le64 tx_ratelimit_bytes;
+};
+
 #endif /* _UAPI_LINUX_VIRTIO_NET_H */
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
  2024-03-18 11:05 ` [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-04-10  6:09   ` Jason Wang
  2024-03-18 11:05 ` [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S Xuan Zhuo
                   ` (9 subsequent siblings)
  11 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

The virtnet cvq supports to get result from the device.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 47 +++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 20 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7ce4a1011ea..af512d85cd5b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2512,10 +2512,11 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
  * never fail unless improperly formatted.
  */
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
-				 struct scatterlist *out)
+				 struct scatterlist *out,
+				 struct scatterlist *in)
 {
-	struct scatterlist *sgs[4], hdr, stat;
-	unsigned out_num = 0, tmp;
+	struct scatterlist *sgs[5], hdr, stat;
+	u32 out_num = 0, tmp, in_num = 0;
 	int ret;
 
 	/* Caller should know better */
@@ -2533,10 +2534,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 
 	/* Add return status. */
 	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
-	sgs[out_num] = &stat;
+	sgs[out_num + in_num++] = &stat;
 
-	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
-	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+	if (in)
+		sgs[out_num + in_num++] = in;
+
+	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
+	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
 	if (ret < 0) {
 		dev_warn(&vi->vdev->dev,
 			 "Failed to add sgs for command vq: %d\n.", ret);
@@ -2578,7 +2582,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
+					  &sg, NULL)) {
 			dev_warn(&vdev->dev,
 				 "Failed to set mac address by vq command.\n");
 			ret = -EINVAL;
@@ -2647,7 +2652,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
 	rtnl_lock();
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
-				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
+				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
 	rtnl_unlock();
 }
@@ -2664,7 +2669,7 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
-				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
+				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
 			 queue_pairs);
 		return -EINVAL;
@@ -2727,14 +2732,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
+				  VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL))
 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
 			 vi->ctrl->promisc ? "en" : "dis");
 
 	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
+				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL))
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
 			 vi->ctrl->allmulti ? "en" : "dis");
 
@@ -2770,7 +2775,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
+				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL))
 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
 
 	kfree(buf);
@@ -2786,7 +2791,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
+				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -2801,7 +2806,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
+				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -2920,7 +2925,7 @@ static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
-				  &sgs))
+				  &sgs, NULL))
 		return -EINVAL;
 
 	return 0;
@@ -3062,7 +3067,7 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
-				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs, NULL)) {
 		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
 		return false;
 	}
@@ -3380,7 +3385,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
-				  &sgs_tx))
+				  &sgs_tx, NULL))
 		return -EINVAL;
 
 	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
@@ -3430,7 +3435,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-				  &sgs_rx))
+				  &sgs_rx, NULL))
 		return -EINVAL;
 
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
@@ -3899,7 +3904,8 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
-				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
+				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+				  &sg, NULL)) {
 		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
 		return -EINVAL;
 	}
@@ -4822,7 +4828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
+					  &sg, NULL)) {
 			pr_debug("virtio_net: setting MAC address failed\n");
 			rtnl_unlock();
 			err = -EINVAL;
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
  2024-03-18 11:05 ` [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures Xuan Zhuo
  2024-03-18 11:05 ` [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-04-10  6:09   ` Jason Wang
  2024-03-18 11:05 ` [PATCH net-next v5 4/9] virtio_net: support device stats Xuan Zhuo
                   ` (8 subsequent siblings)
  11 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

The key size of ethtool -S is controlled by this macro.

ETH_GSTRING_LEN 32

That includes the \0 at the end. So the max length of the key name must
is 31. But the length of the prefix "rx_queue_0_" is 11. If the queue
num is larger than 10, the length of the prefix is 12. So the
key name max is 19. That is too short. We will introduce some keys
such as "gso_packets_coalesced". So we should change the prefix
to "rx0_".

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index af512d85cd5b..8cb5bdd7ad91 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3273,13 +3273,13 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 	case ETH_SS_STATS:
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
+				ethtool_sprintf(&p, "rx%u_%s", i,
 						virtnet_rq_stats_desc[j].desc);
 		}
 
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
+				ethtool_sprintf(&p, "tx%u_%s", i,
 						virtnet_sq_stats_desc[j].desc);
 		}
 		break;
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (2 preceding siblings ...)
  2024-03-18 11:05 ` [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-04-10  6:09   ` Jason Wang
  2024-03-18 11:05 ` [PATCH net-next v5 5/9] virtio_net: stats map include driver stats Xuan Zhuo
                   ` (7 subsequent siblings)
  11 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

make virtio-net support getting the stats from the device by ethtool -S
<eth0>.

Due to the numerous descriptors stats, an organization method is
required. For this purpose, I have introduced the "virtnet_stats_map".
Utilizing this array simplifies coding tasks such as generating field
names, calculating buffer sizes for requests and responses, and parsing
replies from the device. By iterating over the "virtnet_stats_map,"
these operations become more streamlined and efficient.

NIC statistics:
     rx0_packets: 582951
     rx0_bytes: 155307077
     rx0_drops: 0
     rx0_xdp_packets: 0
     rx0_xdp_tx: 0
     rx0_xdp_redirects: 0
     rx0_xdp_drops: 0
     rx0_kicks: 17007
     rx0_hw_packets: 2179409
     rx0_hw_bytes: 510015040
     rx0_hw_notifications: 0
     rx0_hw_interrupts: 0
     rx0_hw_drops: 12964
     rx0_hw_drop_overruns: 0
     rx0_hw_csum_valid: 2179409
     rx0_hw_csum_none: 0
     rx0_hw_csum_bad: 0
     rx0_hw_needs_csum: 2179409
     rx0_hw_ratelimit_packets: 0
     rx0_hw_ratelimit_bytes: 0
     tx0_packets: 15361
     tx0_bytes: 1918970
     tx0_xdp_tx: 0
     tx0_xdp_tx_drops: 0
     tx0_kicks: 15361
     tx0_timeouts: 0
     tx0_hw_packets: 32272
     tx0_hw_bytes: 4311698
     tx0_hw_notifications: 0
     tx0_hw_interrupts: 0
     tx0_hw_drops: 0
     tx0_hw_drop_malformed: 0
     tx0_hw_csum_none: 0
     tx0_hw_needs_csum: 32272
     tx0_hw_ratelimit_packets: 0
     tx0_hw_ratelimit_bytes: 0

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 397 insertions(+), 4 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8cb5bdd7ad91..70c1d4e850e0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
 #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
 
+#define VIRTNET_STATS_DESC_CQ(name) \
+	{#name, offsetof(struct virtio_net_stats_cvq, name)}
+
+#define VIRTNET_STATS_DESC_RX(class, name) \
+	{#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
+
+#define VIRTNET_STATS_DESC_TX(class, name) \
+	{#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
+
+static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
+	VIRTNET_STATS_DESC_CQ(command_num),
+	VIRTNET_STATS_DESC_CQ(ok_num),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
+	VIRTNET_STATS_DESC_RX(basic, packets),
+	VIRTNET_STATS_DESC_RX(basic, bytes),
+
+	VIRTNET_STATS_DESC_RX(basic, notifications),
+	VIRTNET_STATS_DESC_RX(basic, interrupts),
+
+	VIRTNET_STATS_DESC_RX(basic, drops),
+	VIRTNET_STATS_DESC_RX(basic, drop_overruns),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
+	VIRTNET_STATS_DESC_TX(basic, packets),
+	VIRTNET_STATS_DESC_TX(basic, bytes),
+
+	VIRTNET_STATS_DESC_TX(basic, notifications),
+	VIRTNET_STATS_DESC_TX(basic, interrupts),
+
+	VIRTNET_STATS_DESC_TX(basic, drops),
+	VIRTNET_STATS_DESC_TX(basic, drop_malformed),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
+	VIRTNET_STATS_DESC_RX(csum, csum_valid),
+	VIRTNET_STATS_DESC_RX(csum, needs_csum),
+
+	VIRTNET_STATS_DESC_RX(csum, csum_none),
+	VIRTNET_STATS_DESC_RX(csum, csum_bad),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
+	VIRTNET_STATS_DESC_TX(csum, needs_csum),
+	VIRTNET_STATS_DESC_TX(csum, csum_none),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
+	VIRTNET_STATS_DESC_RX(gso, gso_packets),
+	VIRTNET_STATS_DESC_RX(gso, gso_bytes),
+	VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
+	VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
+	VIRTNET_STATS_DESC_TX(gso, gso_packets),
+	VIRTNET_STATS_DESC_TX(gso, gso_bytes),
+	VIRTNET_STATS_DESC_TX(gso, gso_segments),
+	VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
+	VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
+	VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
+	VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
+	VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
+	VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
+	VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
+};
+
+#define VIRTNET_Q_TYPE_RX 0
+#define VIRTNET_Q_TYPE_TX 1
+#define VIRTNET_Q_TYPE_CQ 2
+
+struct virtnet_stats_map {
+	/* The stat type in bitmap. */
+	u64 stat_type;
+
+	/* The bytes of the response for the stat. */
+	u32 len;
+
+	/* The num of the response fields for the stat. */
+	u32 num;
+
+	/* The type of queue corresponding to the statistics. (cq, rq, sq) */
+	u32 queue_type;
+
+	/* The reply type of the stat. */
+	u8 reply_type;
+
+	/* Describe the name and the offset in the response. */
+	const struct virtnet_stat_desc *desc;
+};
+
+#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)	\
+	{							\
+		VIRTIO_NET_STATS_TYPE_##TYPE,			\
+		sizeof(struct virtio_net_stats_ ## type),	\
+		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
+		VIRTNET_Q_TYPE_##queue_type,			\
+		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
+		&virtnet_stats_##type##_desc[0]			\
+	}
+
+static struct virtnet_stats_map virtio_net_stats_map[] = {
+	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
+
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
+
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
+};
+
 struct virtnet_interrupt_coalesce {
 	u32 max_packets;
 	u32 max_usecs;
@@ -244,6 +367,7 @@ struct control_buf {
 	struct virtio_net_ctrl_coal_tx coal_tx;
 	struct virtio_net_ctrl_coal_rx coal_rx;
 	struct virtio_net_ctrl_coal_vq coal_vq;
+	struct virtio_net_stats_capabilities stats_cap;
 };
 
 struct virtnet_info {
@@ -329,6 +453,8 @@ struct virtnet_info {
 
 	/* failover when STANDBY feature enabled */
 	struct failover *failover;
+
+	u64 device_stats_cap;
 };
 
 struct padded_vnet_hdr {
@@ -389,6 +515,17 @@ static int rxq2vq(int rxq)
 	return rxq * 2;
 }
 
+static int vq_type(struct virtnet_info *vi, int qid)
+{
+	if (qid == vi->max_queue_pairs * 2)
+		return VIRTNET_Q_TYPE_CQ;
+
+	if (qid % 2)
+		return VIRTNET_Q_TYPE_TX;
+
+	return VIRTNET_Q_TYPE_RX;
+}
+
 static inline struct virtio_net_common_hdr *
 skb_vnet_common_hdr(struct sk_buff *skb)
 {
@@ -3263,6 +3400,223 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
+static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+{
+	struct virtnet_stats_map *m;
+	int i, j;
+	u8 *p = *data;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (m->queue_type != type)
+			continue;
+
+		if (!(vi->device_stats_cap & m->stat_type))
+			continue;
+
+		for (j = 0; j < m->num; ++j) {
+			switch (type) {
+			case VIRTNET_Q_TYPE_RX:
+				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
+				break;
+
+			case VIRTNET_Q_TYPE_TX:
+				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
+				break;
+
+			case VIRTNET_Q_TYPE_CQ:
+				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
+				break;
+			}
+		}
+	}
+
+	*data = p;
+}
+
+struct virtnet_stats_ctx {
+	u32 desc_num[3];
+
+	u32 bitmap[3];
+
+	u32 size[3];
+
+	u64 *data;
+};
+
+static void virtnet_stats_ctx_init(struct virtnet_info *vi,
+				   struct virtnet_stats_ctx *ctx,
+				   u64 *data)
+{
+	struct virtnet_stats_map *m;
+	int i;
+
+	ctx->data = data;
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (!(vi->device_stats_cap & m->stat_type))
+			continue;
+
+		ctx->bitmap[m->queue_type]   |= m->stat_type;
+		ctx->desc_num[m->queue_type] += m->num;
+		ctx->size[m->queue_type]     += m->len;
+	}
+}
+
+/* virtnet_fill_stats - copy the stats to ethtool -S
+ * The stats source is the device.
+ *
+ * @vi: virtio net info
+ * @qid: the vq id
+ * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
+ * @base: pointer to the device reply.
+ * @type: the type of the device reply
+ */
+static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
+			       struct virtnet_stats_ctx *ctx,
+			       const u8 *base, u8 type)
+{
+	u32 queue_type, num_rx, num_tx, num_cq;
+	struct virtnet_stats_map *m;
+	u64 offset, bitmap;
+	const __le64 *v;
+	int i, j;
+
+	num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
+	num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
+	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+
+	queue_type = vq_type(vi, qid);
+	bitmap = ctx->bitmap[queue_type];
+	offset = 0;
+
+	if (queue_type == VIRTNET_Q_TYPE_TX) {
+		offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
+		offset += VIRTNET_SQ_STATS_LEN;
+	} else if (queue_type == VIRTNET_Q_TYPE_RX) {
+		offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (m->stat_type & bitmap)
+			offset += m->num;
+
+		if (type != m->reply_type)
+			continue;
+
+		for (j = 0; j < m->num; ++j) {
+			v = (const __le64 *)(base + m->desc[j].offset);
+			ctx->data[offset + j] = le64_to_cpu(*v);
+		}
+
+		break;
+	}
+}
+
+static int __virtnet_get_hw_stats(struct virtnet_info *vi,
+				  struct virtnet_stats_ctx *ctx,
+				  struct virtio_net_ctrl_queue_stats *req,
+				  int req_size, void *reply, int res_size)
+{
+	struct virtio_net_stats_reply_hdr *hdr;
+	struct scatterlist sgs_in, sgs_out;
+	void *p;
+	u32 qid;
+	int ok;
+
+	sg_init_one(&sgs_out, req, req_size);
+	sg_init_one(&sgs_in, reply, res_size);
+
+	ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
+				  VIRTIO_NET_CTRL_STATS_GET,
+				  &sgs_out, &sgs_in);
+	kfree(req);
+
+	if (!ok) {
+		kfree(reply);
+		return ok;
+	}
+
+	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
+		hdr = p;
+		qid = le16_to_cpu(hdr->vq_index);
+		virtnet_fill_stats(vi, qid, ctx, p, hdr->type);
+	}
+
+	kfree(reply);
+	return 0;
+}
+
+static void virtnet_make_stat_req(struct virtnet_info *vi,
+				  struct virtnet_stats_ctx *ctx,
+				  struct virtio_net_ctrl_queue_stats *req,
+				  int qid, int *idx)
+{
+	int qtype = vq_type(vi, qid);
+	u64 bitmap = ctx->bitmap[qtype];
+
+	if (!bitmap)
+		return;
+
+	req->stats[*idx].vq_index = cpu_to_le16(qid);
+	req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
+	*idx += 1;
+}
+
+static int virtnet_get_hw_stats(struct virtnet_info *vi,
+				struct virtnet_stats_ctx *ctx)
+{
+	struct virtio_net_ctrl_queue_stats *req;
+	int qnum, i, j, res_size, qtype, last_vq;
+	void *reply;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+		return 0;
+
+	last_vq = vi->curr_queue_pairs * 2 - 1;
+
+	qnum = 0;
+	res_size = 0;
+	for (i = 0; i <= last_vq ; ++i) {
+		qtype = vq_type(vi, i);
+		if (ctx->bitmap[qtype]) {
+			++qnum;
+			res_size += ctx->size[qtype];
+		}
+	}
+
+	if (ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
+		res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
+		qnum += 1;
+	}
+
+	req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	reply = kmalloc(res_size, GFP_KERNEL);
+	if (!reply) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	j = 0;
+	for (i = 0; i <= last_vq ; ++i)
+		virtnet_make_stat_req(vi, ctx, req, i, &j);
+
+	virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
+
+	return __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
+}
+
 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -3271,16 +3625,22 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 
 	switch (stringset) {
 	case ETH_SS_STATS:
+		virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
+
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
 				ethtool_sprintf(&p, "rx%u_%s", i,
 						virtnet_rq_stats_desc[j].desc);
+
+			virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
 		}
 
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
 				ethtool_sprintf(&p, "tx%u_%s", i,
 						virtnet_sq_stats_desc[j].desc);
+
+			virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
 		}
 		break;
 	}
@@ -3289,11 +3649,35 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 static int virtnet_get_sset_count(struct net_device *dev, int sset)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtnet_stats_ctx ctx = {0};
+	u32 pair_count;
 
 	switch (sset) {
 	case ETH_SS_STATS:
-		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
-					       VIRTNET_SQ_STATS_LEN);
+		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS) &&
+		    !vi->device_stats_cap) {
+			struct scatterlist sg;
+
+			sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
+
+			if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
+						  VIRTIO_NET_CTRL_STATS_QUERY,
+						  NULL, &sg)) {
+				dev_warn(&dev->dev, "Fail to get stats capability\n");
+			} else {
+				__le64 v;
+
+				v = vi->ctrl->stats_cap.supported_stats_types[0];
+				vi->device_stats_cap = le64_to_cpu(v);
+			}
+		}
+
+		virtnet_stats_ctx_init(vi, &ctx, NULL);
+
+		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
+		pair_count += ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
+
+		return ctx.desc_num[VIRTNET_Q_TYPE_CQ] + vi->curr_queue_pairs * pair_count;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3303,11 +3687,18 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 				      struct ethtool_stats *stats, u64 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	unsigned int idx = 0, start, i, j;
+	struct virtnet_stats_ctx ctx = {0};
+	unsigned int idx, start, i, j;
 	const u8 *stats_base;
 	const u64_stats_t *p;
 	size_t offset;
 
+	virtnet_stats_ctx_init(vi, &ctx, data);
+	if (virtnet_get_hw_stats(vi, &ctx))
+		dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
+
+	idx = ctx.desc_num[VIRTNET_Q_TYPE_CQ];
+
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
 
@@ -3321,6 +3712,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			}
 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
 		idx += VIRTNET_RQ_STATS_LEN;
+		idx += ctx.desc_num[VIRTNET_Q_TYPE_RX];
 	}
 
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
@@ -3336,6 +3728,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			}
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 		idx += VIRTNET_SQ_STATS_LEN;
+		idx += ctx.desc_num[VIRTNET_Q_TYPE_TX];
 	}
 }
 
@@ -4963,7 +5356,7 @@ static struct virtio_device_id id_table[] = {
 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
 	VIRTIO_NET_F_VQ_NOTF_COAL, \
-	VIRTIO_NET_F_GUEST_HDRLEN
+	VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
 
 static unsigned int features[] = {
 	VIRTNET_FEATURES,
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 5/9] virtio_net: stats map include driver stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (3 preceding siblings ...)
  2024-03-18 11:05 ` [PATCH net-next v5 4/9] virtio_net: support device stats Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-03-18 11:05 ` [PATCH net-next v5 6/9] virtio_net: add the total stats field Xuan Zhuo
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

In the last commit, we use the stats map to manage the device stats.

Managing driver statistics separately can be inconvenient. To streamline
the process, I propose integrating driver stats into the existing stats
map. This integration will allow us to uniformly handle all statistics
through a single method, simplifying management and reducing complexity
in our codebase.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 200 +++++++++++++++++++--------------------
 1 file changed, 99 insertions(+), 101 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 70c1d4e850e0..27ed25e70177 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -102,32 +102,29 @@ struct virtnet_rq_stats {
 	u64_stats_t kicks;
 };
 
-#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
+#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m)}
+#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m)}
 
 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
-	{ "packets",		VIRTNET_SQ_STAT(packets) },
-	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
-	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
-	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
-	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
-	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
+	VIRTNET_SQ_STAT("packets",      packets),
+	VIRTNET_SQ_STAT("bytes",        bytes),
+	VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
+	VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
+	VIRTNET_SQ_STAT("kicks",        kicks),
+	VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),
 };
 
 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
-	{ "packets",		VIRTNET_RQ_STAT(packets) },
-	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
-	{ "drops",		VIRTNET_RQ_STAT(drops) },
-	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
-	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
-	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
-	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
-	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
+	VIRTNET_RQ_STAT("packets",       packets),
+	VIRTNET_RQ_STAT("bytes",         bytes),
+	VIRTNET_RQ_STAT("drops",         drops),
+	VIRTNET_RQ_STAT("xdp_packets",   xdp_packets),
+	VIRTNET_RQ_STAT("xdp_tx",        xdp_tx),
+	VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
+	VIRTNET_RQ_STAT("xdp_drops",     xdp_drops),
+	VIRTNET_RQ_STAT("kicks",         kicks),
 };
 
-#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
-#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
-
 #define VIRTNET_STATS_DESC_CQ(name) \
 	{#name, offsetof(struct virtio_net_stats_cvq, name)}
 
@@ -208,10 +205,10 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
 #define VIRTNET_Q_TYPE_CQ 2
 
 struct virtnet_stats_map {
-	/* The stat type in bitmap. */
+	/* The stat type in bitmap. Just for device stats. */
 	u64 stat_type;
 
-	/* The bytes of the response for the stat. */
+	/* The bytes of the response for the stat. Just for device stats. */
 	u32 len;
 
 	/* The num of the response fields for the stat. */
@@ -220,9 +217,12 @@ struct virtnet_stats_map {
 	/* The type of queue corresponding to the statistics. (cq, rq, sq) */
 	u32 queue_type;
 
-	/* The reply type of the stat. */
+	/* The reply type of the stat. Just for device stats. */
 	u8 reply_type;
 
+	/* The stats are counted by the driver. */
+	bool from_driver;
+
 	/* Describe the name and the offset in the response. */
 	const struct virtnet_stat_desc *desc;
 };
@@ -234,10 +234,24 @@ struct virtnet_stats_map {
 		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
 		VIRTNET_Q_TYPE_##queue_type,			\
 		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
+		false,						\
 		&virtnet_stats_##type##_desc[0]			\
 	}
 
+#define VIRTNET_DRIVER_STATS_MAP_ITEM(type, queue_type)		\
+	{							\
+		0, 0,						\
+		ARRAY_SIZE(virtnet_ ## type ## _stats_desc),	\
+		VIRTNET_Q_TYPE_##queue_type,			\
+		0, true,					\
+		&virtnet_##type##_stats_desc[0]			\
+	}
+
 static struct virtnet_stats_map virtio_net_stats_map[] = {
+	/* Driver stats should on the start. */
+	VIRTNET_DRIVER_STATS_MAP_ITEM(rq, RX),
+	VIRTNET_DRIVER_STATS_MAP_ITEM(sq, TX),
+
 	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
 
 	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
@@ -251,6 +265,11 @@ static struct virtnet_stats_map virtio_net_stats_map[] = {
 	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
 };
 
+#define virtnet_stats_supported(vi, m) ({				\
+	typeof(m) _m = (m);						\
+	(((vi)->device_stats_cap & _m->stat_type) || _m->from_driver);	\
+})
+
 struct virtnet_interrupt_coalesce {
 	u32 max_packets;
 	u32 max_usecs;
@@ -2266,7 +2285,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
 	u64_stats_set(&stats.packets, packets);
 	u64_stats_update_begin(&rq->stats.syncp);
-	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+	for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
 		size_t offset = virtnet_rq_stats_desc[i].offset;
 		u64_stats_t *item, *src;
 
@@ -3400,38 +3419,34 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
-static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
 {
 	struct virtnet_stats_map *m;
+	const char *tp, *hw, *desc;
 	int i, j;
 	u8 *p = *data;
 
-	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
-		return;
-
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
 
 		if (m->queue_type != type)
 			continue;
 
-		if (!(vi->device_stats_cap & m->stat_type))
+		if (!virtnet_stats_supported(vi, m))
 			continue;
 
+		hw = m->from_driver ? "" : "_hw";
+		tp = type == VIRTNET_Q_TYPE_RX ? "rx" : "tx";
+
 		for (j = 0; j < m->num; ++j) {
-			switch (type) {
-			case VIRTNET_Q_TYPE_RX:
-				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
-				break;
-
-			case VIRTNET_Q_TYPE_TX:
-				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
-				break;
-
-			case VIRTNET_Q_TYPE_CQ:
-				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
-				break;
+			desc = m->desc[j].desc;
+
+			if (type == VIRTNET_Q_TYPE_CQ) {
+				ethtool_sprintf(&p, "cq%s_%s", hw, desc);
+				continue;
 			}
+
+			ethtool_sprintf(&p, "%s%u%s_%s", tp, qid, hw, desc);
 		}
 	}
 
@@ -3460,7 +3475,7 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
 
-		if (!(vi->device_stats_cap & m->stat_type))
+		if (!virtnet_stats_supported(vi, m))
 			continue;
 
 		ctx->bitmap[m->queue_type]   |= m->stat_type;
@@ -3470,51 +3485,64 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 }
 
 /* virtnet_fill_stats - copy the stats to ethtool -S
- * The stats source is the device.
+ * The stats source is the device or the driver.
  *
  * @vi: virtio net info
  * @qid: the vq id
  * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
- * @base: pointer to the device reply.
- * @type: the type of the device reply
+ * @base: pointer to the device reply or the driver stats structure.
+ * @from_driver: designate the base type (device reply, driver stats)
+ * @type: the type of the device reply (if from_driver is true, this must be
+ *     zero)
  */
 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 			       struct virtnet_stats_ctx *ctx,
-			       const u8 *base, u8 type)
+			       const u8 *base, bool from_driver, u8 type)
 {
 	u32 queue_type, num_rx, num_tx, num_cq;
+	const struct virtnet_stat_desc *desc;
 	struct virtnet_stats_map *m;
-	u64 offset, bitmap;
+	const u64_stats_t *v_stat;
 	const __le64 *v;
+	u64 offset;
 	int i, j;
 
-	num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
-	num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
 	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
 
 	queue_type = vq_type(vi, qid);
-	bitmap = ctx->bitmap[queue_type];
 	offset = 0;
 
-	if (queue_type == VIRTNET_Q_TYPE_TX) {
+	if (queue_type == VIRTNET_Q_TYPE_TX)
 		offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
-		offset += VIRTNET_SQ_STATS_LEN;
-	} else if (queue_type == VIRTNET_Q_TYPE_RX) {
-		offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
-	}
+	else if (queue_type == VIRTNET_Q_TYPE_RX)
+		offset = num_cq + num_rx * (qid / 2);
 
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
 
-		if (m->stat_type & bitmap)
-			offset += m->num;
+		if (m->queue_type != queue_type)
+			continue;
 
-		if (type != m->reply_type)
+		if (!virtnet_stats_supported(vi, m))
 			continue;
 
+		/* Checking whether this "m" matches "base" or not. */
+		if (from_driver != m->from_driver || type != m->reply_type) {
+			offset += m->num;
+			continue;
+		}
+
 		for (j = 0; j < m->num; ++j) {
-			v = (const __le64 *)(base + m->desc[j].offset);
-			ctx->data[offset + j] = le64_to_cpu(*v);
+			desc = &m->desc[j];
+			if (!from_driver) {
+				v = (const __le64 *)(base + desc->offset);
+				ctx->data[offset + j] = le64_to_cpu(*v);
+			} else {
+				v_stat = (const u64_stats_t *)(base + desc->offset);
+				ctx->data[offset + j] = u64_stats_read(v_stat);
+			}
 		}
 
 		break;
@@ -3548,7 +3576,7 @@ static int __virtnet_get_hw_stats(struct virtnet_info *vi,
 	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
 		hdr = p;
 		qid = le16_to_cpu(hdr->vq_index);
-		virtnet_fill_stats(vi, qid, ctx, p, hdr->type);
+		virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
 	}
 
 	kfree(reply);
@@ -3620,28 +3648,18 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	unsigned int i, j;
+	unsigned int i;
 	u8 *p = data;
 
 	switch (stringset) {
 	case ETH_SS_STATS:
-		virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
-
-		for (i = 0; i < vi->curr_queue_pairs; i++) {
-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "rx%u_%s", i,
-						virtnet_rq_stats_desc[j].desc);
+		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
 
-			virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
-		}
+		for (i = 0; i < vi->curr_queue_pairs; ++i)
+			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
 
-		for (i = 0; i < vi->curr_queue_pairs; i++) {
-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "tx%u_%s", i,
-						virtnet_sq_stats_desc[j].desc);
-
-			virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
-		}
+		for (i = 0; i < vi->curr_queue_pairs; ++i)
+			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
 		break;
 	}
 }
@@ -3674,8 +3692,7 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
 
 		virtnet_stats_ctx_init(vi, &ctx, NULL);
 
-		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
-		pair_count += ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
+		pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
 
 		return ctx.desc_num[VIRTNET_Q_TYPE_CQ] + vi->curr_queue_pairs * pair_count;
 	default:
@@ -3688,47 +3705,28 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct virtnet_stats_ctx ctx = {0};
-	unsigned int idx, start, i, j;
+	unsigned int start, i;
 	const u8 *stats_base;
-	const u64_stats_t *p;
-	size_t offset;
 
 	virtnet_stats_ctx_init(vi, &ctx, data);
 	if (virtnet_get_hw_stats(vi, &ctx))
 		dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
 
-	idx = ctx.desc_num[VIRTNET_Q_TYPE_CQ];
-
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
+		struct send_queue *sq = &vi->sq[i];
 
 		stats_base = (const u8 *)&rq->stats;
 		do {
 			start = u64_stats_fetch_begin(&rq->stats.syncp);
-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
-				offset = virtnet_rq_stats_desc[j].offset;
-				p = (const u64_stats_t *)(stats_base + offset);
-				data[idx + j] = u64_stats_read(p);
-			}
+			virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
-		idx += VIRTNET_RQ_STATS_LEN;
-		idx += ctx.desc_num[VIRTNET_Q_TYPE_RX];
-	}
-
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		struct send_queue *sq = &vi->sq[i];
 
 		stats_base = (const u8 *)&sq->stats;
 		do {
 			start = u64_stats_fetch_begin(&sq->stats.syncp);
-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
-				offset = virtnet_sq_stats_desc[j].offset;
-				p = (const u64_stats_t *)(stats_base + offset);
-				data[idx + j] = u64_stats_read(p);
-			}
+			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
-		idx += VIRTNET_SQ_STATS_LEN;
-		idx += ctx.desc_num[VIRTNET_Q_TYPE_TX];
 	}
 }
 
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 6/9] virtio_net: add the total stats field
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (4 preceding siblings ...)
  2024-03-18 11:05 ` [PATCH net-next v5 5/9] virtio_net: stats map include driver stats Xuan Zhuo
@ 2024-03-18 11:05 ` Xuan Zhuo
  2024-03-18 11:06 ` [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:05 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

Now, we just show the stats of every queue.

But for the user, the total values of every stat may are valuable.

NIC statistics:
     rx_packets: 373522
     rx_bytes: 85919736
     rx_drops: 0
     rx_xdp_packets: 0
     rx_xdp_tx: 0
     rx_xdp_redirects: 0
     rx_xdp_drops: 0
     rx_kicks: 11125
     rx_hw_notifications: 0
     rx_hw_packets: 1325870
     rx_hw_bytes: 263348963
     rx_hw_interrupts: 0
     rx_hw_drops: 1451
     rx_hw_drop_overruns: 0
     rx_hw_csum_valid: 1325870
     rx_hw_needs_csum: 1325870
     rx_hw_csum_none: 0
     rx_hw_csum_bad: 0
     rx_hw_ratelimit_packets: 0
     rx_hw_ratelimit_bytes: 0
     tx_packets: 10050
     tx_bytes: 1230176
     tx_xdp_tx: 0
     tx_xdp_tx_drops: 0
     tx_kicks: 10050
     tx_timeouts: 0
     tx_hw_notifications: 0
     tx_hw_packets: 32281
     tx_hw_bytes: 4315590
     tx_hw_interrupts: 0
     tx_hw_drops: 0
     tx_hw_drop_malformed: 0
     tx_hw_csum_none: 0
     tx_hw_needs_csum: 32281
     tx_hw_ratelimit_packets: 0
     tx_hw_ratelimit_bytes: 0
     rx0_packets: 373522
     rx0_bytes: 85919736
     rx0_drops: 0
     rx0_xdp_packets: 0
     rx0_xdp_tx: 0
     rx0_xdp_redirects: 0
     rx0_xdp_drops: 0
     rx0_kicks: 11125
     rx0_hw_notifications: 0
     rx0_hw_packets: 1325870
     rx0_hw_bytes: 263348963
     rx0_hw_interrupts: 0
     rx0_hw_drops: 1451
     rx0_hw_drop_overruns: 0
     rx0_hw_csum_valid: 1325870
     rx0_hw_needs_csum: 1325870
     rx0_hw_csum_none: 0
     rx0_hw_csum_bad: 0
     rx0_hw_ratelimit_packets: 0
     rx0_hw_ratelimit_bytes: 0
     tx0_packets: 10050
     tx0_bytes: 1230176
     tx0_xdp_tx: 0
     tx0_xdp_tx_drops: 0
     tx0_kicks: 10050
     tx0_timeouts: 0
     tx0_hw_notifications: 0
     tx0_hw_packets: 32281
     tx0_hw_bytes: 4315590
     tx0_hw_interrupts: 0
     tx0_hw_drops: 0
     tx0_hw_drop_malformed: 0
     tx0_hw_csum_none: 0
     tx0_hw_needs_csum: 32281
     tx0_hw_ratelimit_packets: 0
     tx0_hw_ratelimit_bytes: 0

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 65 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 60 insertions(+), 5 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 27ed25e70177..12dc1d0d8d2b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3419,6 +3419,7 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
+/* qid == -1: for rx/tx queue total field */
 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
 {
 	struct virtnet_stats_map *m;
@@ -3446,7 +3447,10 @@ static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid,
 				continue;
 			}
 
-			ethtool_sprintf(&p, "%s%u%s_%s", tp, qid, hw, desc);
+			if (qid < 0)
+				ethtool_sprintf(&p, "%s%s_%s", tp, hw, desc);
+			else
+				ethtool_sprintf(&p, "%s%u%s_%s", tp, qid, hw, desc);
 		}
 	}
 
@@ -3484,6 +3488,49 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 	}
 }
 
+/* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
+ * @sum: the position to store the sum values
+ * @num: field num
+ * @q_value: the first queue fields
+ * @q_num: number of the queues
+ */
+static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
+{
+	u32 step = num;
+	int i, j;
+	u64 *p;
+
+	for (i = 0; i < num; ++i) {
+		p = sum + i;
+		*p = 0;
+
+		for (j = 0; j < q_num; ++j)
+			*p += *(q_value + i + j * step);
+	}
+}
+
+static void virtnet_fill_total_fields(struct virtnet_info *vi,
+				      struct virtnet_stats_ctx *ctx)
+{
+	u64 *data, *first_rx_q, *first_tx_q;
+	u32 num_cq, num_rx, num_tx;
+
+	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
+
+	first_rx_q = ctx->data + num_rx + num_tx + num_cq;
+	first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
+
+	data = ctx->data;
+
+	stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
+
+	data = ctx->data + num_rx;
+
+	stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
+}
+
 /* virtnet_fill_stats - copy the stats to ethtool -S
  * The stats source is the device or the driver.
  *
@@ -3512,12 +3559,14 @@ static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
 
 	queue_type = vq_type(vi, qid);
-	offset = 0;
+
+	/* skip the total fields of pairs */
+	offset = num_rx + num_tx;
 
 	if (queue_type == VIRTNET_Q_TYPE_TX)
-		offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
+		offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
 	else if (queue_type == VIRTNET_Q_TYPE_RX)
-		offset = num_cq + num_rx * (qid / 2);
+		offset += num_cq + num_rx * (qid / 2);
 
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
@@ -3653,6 +3702,9 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 
 	switch (stringset) {
 	case ETH_SS_STATS:
+		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
+		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
+
 		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
 
 		for (i = 0; i < vi->curr_queue_pairs; ++i)
@@ -3694,7 +3746,8 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
 
 		pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
 
-		return ctx.desc_num[VIRTNET_Q_TYPE_CQ] + vi->curr_queue_pairs * pair_count;
+		return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
+			vi->curr_queue_pairs * pair_count;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3728,6 +3781,8 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 	}
+
+	virtnet_fill_total_fields(vi, &ctx);
 }
 
 static void virtnet_get_channels(struct net_device *dev,
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (5 preceding siblings ...)
  2024-03-18 11:05 ` [PATCH net-next v5 6/9] virtio_net: add the total stats field Xuan Zhuo
@ 2024-03-18 11:06 ` Xuan Zhuo
  2024-04-10  6:09   ` Jason Wang
  2024-03-18 11:06 ` [PATCH net-next v5 8/9] netdev: add queue stats Xuan Zhuo
                   ` (4 subsequent siblings)
  11 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:06 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf,
	Jiri Pirko

Now, we have this:

    tx_queue_0_tx_timeouts

This is used to record the tx schedule timeout.
But this has two "tx". I think the below is enough.

    tx_queue_0_timeouts

So I rename this field.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
---
 drivers/net/virtio_net.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 12dc1d0d8d2b..a24cfde30d08 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -87,7 +87,7 @@ struct virtnet_sq_stats {
 	u64_stats_t xdp_tx;
 	u64_stats_t xdp_tx_drops;
 	u64_stats_t kicks;
-	u64_stats_t tx_timeouts;
+	u64_stats_t timeouts;
 };
 
 struct virtnet_rq_stats {
@@ -111,7 +111,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 	VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
 	VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
 	VIRTNET_SQ_STAT("kicks",        kicks),
-	VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),
+	VIRTNET_SQ_STAT("timeouts",     timeouts),
 };
 
 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
@@ -2780,7 +2780,7 @@ static void virtnet_stats(struct net_device *dev,
 			start = u64_stats_fetch_begin(&sq->stats.syncp);
 			tpackets = u64_stats_read(&sq->stats.packets);
 			tbytes   = u64_stats_read(&sq->stats.bytes);
-			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
+			terrors  = u64_stats_read(&sq->stats.timeouts);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 
 		do {
@@ -4568,7 +4568,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
 
 	u64_stats_update_begin(&sq->stats.syncp);
-	u64_stats_inc(&sq->stats.tx_timeouts);
+	u64_stats_inc(&sq->stats.timeouts);
 	u64_stats_update_end(&sq->stats.syncp);
 
 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 8/9] netdev: add queue stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (6 preceding siblings ...)
  2024-03-18 11:06 ` [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
@ 2024-03-18 11:06 ` Xuan Zhuo
  2024-03-18 11:06 ` [PATCH net-next v5 9/9] virtio-net: support queue stat Xuan Zhuo
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:06 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

These stats are commonly. Support reporting those via netdev-genl queue
stats.

name: rx-hw-drops
name: rx-hw-drop-overruns
name: rx-csum-unnecessary
name: rx-csum-none
name: rx-csum-bad
name: rx-hw-gro-packets
name: rx-hw-gro-bytes
name: rx-hw-gro-wire-packets
name: rx-hw-gro-wire-bytes
name: rx-hw-drop-ratelimits
name: tx-hw-drops
name: tx-hw-drop-errors
name: tx-csum-none
name: tx-needs-csum
name: tx-hw-gso-packets
name: tx-hw-gso-bytes
name: tx-hw-gso-wire-packets
name: tx-hw-gso-wire-bytes
name: tx-hw-drop-ratelimits

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 Documentation/netlink/specs/netdev.yaml | 104 ++++++++++++++++++++++++
 include/net/netdev_queues.h             |  27 ++++++
 include/uapi/linux/netdev.h             |  19 +++++
 net/core/netdev-genl.c                  |  23 +++++-
 tools/include/uapi/linux/netdev.h       |  19 +++++
 5 files changed, 190 insertions(+), 2 deletions(-)

diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 76352dbd2be4..31b51b60ef13 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -335,6 +335,110 @@ attribute-sets:
           Allocation failure may, or may not result in a packet drop, depending
           on driver implementation and whether system recovers quickly.
         type: uint
+      -
+        name: rx-hw-drops
+        doc: |
+          Number of all packets which entered the device, but never left it,
+          including but not limited to: packets dropped due to lack of buffer
+          space, processing errors, explicit or implicit policies and packet
+          filters.
+        type: uint
+      -
+        name: rx-hw-drop-overruns
+        doc: |
+          Number of packets dropped due to transient lack of resources, such as
+          buffer space, host descriptors etc.
+        type: uint
+      -
+        name: rx-csum-unnecessary
+        doc: Number of packets that were marked as CHECKSUM_UNNECESSARY.
+        type: uint
+      -
+        name: rx-csum-none
+        doc: Number of packets that were not checksummed by device.
+        type: uint
+      -
+        name: rx-csum-bad
+        doc: |
+          Number of packets with bad checksum. The packets are not discarded,
+          but still delivered to the stack.
+        type: uint
+      -
+        name: rx-hw-gro-packets
+        doc: |
+          Number of packets that were coalesced from smaller packets by the device.
+          Counts only packets coalesced with the HW-GRO netdevice feature,
+          LRO-coalesced packets are not counted.
+        type: uint
+      -
+        name: rx-hw-gro-bytes
+        doc: See `rx-hw-gro-packets`.
+        type: uint
+      -
+        name: rx-hw-gro-wire-packets
+        doc: |
+          Number of packets that were coalesced to bigger packetss with the HW-GRO
+          netdevice feature. LRO-coalesced packets are not counted.
+        type: uint
+      -
+        name: rx-hw-gro-wire-bytes
+        doc: See `rx-hw-gro-wire-packets`.
+        type: uint
+      -
+        name: rx-hw-drop-ratelimits
+        doc: |
+          Number of the packets dropped by the device due to the received
+          packets bitrate exceeding the device rate limit.
+        type: uint
+      -
+        name: tx-hw-drops
+        doc: |
+          Number of packets that arrived at the device but never left it,
+          encompassing packets dropped for reasons such as processing errors, as
+          well as those affected by explicitly defined policies and packet
+          filtering criteria.
+        type: uint
+      -
+        name: tx-hw-drop-errors
+        doc: Number of packets dropped because they were invalid or malformed.
+        type: uint
+      -
+        name: tx-csum-none
+        doc: |
+          Number of packets that did not require the device to calculate the
+          checksum.
+        type: uint
+      -
+        name: tx-needs-csum
+        doc: |
+          Number of packets that required the device to calculate the checksum.
+        type: uint
+      -
+        name: tx-hw-gso-packets
+        doc: |
+          Number of packets that necessitated segmentation into smaller packets
+          by the device.
+        type: uint
+      -
+        name: tx-hw-gso-bytes
+        doc: See `tx-hw-gso-packets`.
+        type: uint
+      -
+        name: tx-hw-gso-wire-packets
+        doc: |
+          Number of wire-sized packets generated by processing
+          `tx-hw-gso-packets`
+        type: uint
+      -
+        name: tx-hw-gso-wire-bytes
+        doc: See `tx-hw-gso-wire-packets`.
+        type: uint
+      -
+        name: tx-hw-drop-ratelimits
+        doc: |
+          Number of the packets dropped by the device due to the transmit
+          packets bitrate exceeding the device rate limit.
+        type: uint
 
 operations:
   list:
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 1ec408585373..c7ac4539eafc 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -9,11 +9,38 @@ struct netdev_queue_stats_rx {
 	u64 bytes;
 	u64 packets;
 	u64 alloc_fail;
+
+	u64 hw_drops;
+	u64 hw_drop_overruns;
+
+	u64 csum_unnecessary;
+	u64 csum_none;
+	u64 csum_bad;
+
+	u64 hw_gro_packets;
+	u64 hw_gro_bytes;
+	u64 hw_gro_wire_packets;
+	u64 hw_gro_wire_bytes;
+
+	u64 hw_drop_ratelimits;
 };
 
 struct netdev_queue_stats_tx {
 	u64 bytes;
 	u64 packets;
+
+	u64 hw_drops;
+	u64 hw_drop_errors;
+
+	u64 csum_none;
+	u64 needs_csum;
+
+	u64 hw_gso_packets;
+	u64 hw_gso_bytes;
+	u64 hw_gso_wire_packets;
+	u64 hw_gso_wire_bytes;
+
+	u64 hw_drop_ratelimits;
 };
 
 /**
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index bb65ee840cda..cf24f1d9adf8 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -146,6 +146,25 @@ enum {
 	NETDEV_A_QSTATS_TX_PACKETS,
 	NETDEV_A_QSTATS_TX_BYTES,
 	NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+	NETDEV_A_QSTATS_RX_HW_DROPS,
+	NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+	NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
+	NETDEV_A_QSTATS_RX_CSUM_NONE,
+	NETDEV_A_QSTATS_RX_CSUM_BAD,
+	NETDEV_A_QSTATS_RX_HW_GRO_PACKETS,
+	NETDEV_A_QSTATS_RX_HW_GRO_BYTES,
+	NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS,
+	NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES,
+	NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS,
+	NETDEV_A_QSTATS_TX_HW_DROPS,
+	NETDEV_A_QSTATS_TX_HW_DROP_ERRORS,
+	NETDEV_A_QSTATS_TX_CSUM_NONE,
+	NETDEV_A_QSTATS_TX_NEEDS_CSUM,
+	NETDEV_A_QSTATS_TX_HW_GSO_PACKETS,
+	NETDEV_A_QSTATS_TX_HW_GSO_BYTES,
+	NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
+	NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
+	NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
 
 	__NETDEV_A_QSTATS_MAX,
 	NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 7004b3399c2b..a2bf9af2dcf6 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -489,7 +489,17 @@ netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx)
 {
 	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) ||
 	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) ||
-	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail))
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits))
 		return -EMSGSIZE;
 	return 0;
 }
@@ -498,7 +508,16 @@ static int
 netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx)
 {
 	if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) ||
-	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes))
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) ||
+	    netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits))
 		return -EMSGSIZE;
 	return 0;
 }
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index bb65ee840cda..cf24f1d9adf8 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -146,6 +146,25 @@ enum {
 	NETDEV_A_QSTATS_TX_PACKETS,
 	NETDEV_A_QSTATS_TX_BYTES,
 	NETDEV_A_QSTATS_RX_ALLOC_FAIL,
+	NETDEV_A_QSTATS_RX_HW_DROPS,
+	NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS,
+	NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY,
+	NETDEV_A_QSTATS_RX_CSUM_NONE,
+	NETDEV_A_QSTATS_RX_CSUM_BAD,
+	NETDEV_A_QSTATS_RX_HW_GRO_PACKETS,
+	NETDEV_A_QSTATS_RX_HW_GRO_BYTES,
+	NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS,
+	NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES,
+	NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS,
+	NETDEV_A_QSTATS_TX_HW_DROPS,
+	NETDEV_A_QSTATS_TX_HW_DROP_ERRORS,
+	NETDEV_A_QSTATS_TX_CSUM_NONE,
+	NETDEV_A_QSTATS_TX_NEEDS_CSUM,
+	NETDEV_A_QSTATS_TX_HW_GSO_PACKETS,
+	NETDEV_A_QSTATS_TX_HW_GSO_BYTES,
+	NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS,
+	NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES,
+	NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS,
 
 	__NETDEV_A_QSTATS_MAX,
 	NETDEV_A_QSTATS_MAX = (__NETDEV_A_QSTATS_MAX - 1)
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* [PATCH net-next v5 9/9] virtio-net: support queue stat
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (7 preceding siblings ...)
  2024-03-18 11:06 ` [PATCH net-next v5 8/9] netdev: add queue stats Xuan Zhuo
@ 2024-03-18 11:06 ` Xuan Zhuo
  2024-03-18 11:52 ` [PATCH net-next v5 0/9] virtio-net: support device stats Jiri Pirko
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:06 UTC (permalink / raw)
  To: netdev
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Jason Wang, Xuan Zhuo, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

To enhance functionality, we now support reporting statistics through
the netdev-generic netlink (netdev-genl) queue stats interface. However,
this does not extend to all statistics, so a new field, qstat_offset,
has been introduced. This field determines which statistics should be
reported via netdev-genl queue stats.

Given that queue stats are retrieved individually per queue, it's
necessary for the virtnet_get_hw_stats() function to be capable of
fetching statistics for a specific queue.

As the document https://docs.kernel.org/next/networking/statistics.html#notes-for-driver-authors

We should not duplicate the stats which get reported via the netlink API in
ethtool. If the stats are for queue stat, that will not be reported by
ethtool -S.

python3 ./tools/net/ynl/cli.py --spec Documentation/netlink/specs/netdev.yaml
    --dump qstats-get --json '{"scope": "queue"}'
[{'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'rx',
  'rx-bytes': 157844011,
  'rx-csum-bad': 0,
  'rx-csum-none': 0,
  'rx-csum-unnecessary': 2195386,
  'rx-hw-drop-overruns': 0,
  'rx-hw-drop-ratelimits': 0,
  'rx-hw-drops': 12964,
  'rx-packets': 598929},
 {'ifindex': 2,
  'queue-id': 0,
  'queue-type': 'tx',
  'tx-bytes': 1938511,
  'tx-csum-none': 0,
  'tx-hw-drop-errors': 0,
  'tx-hw-drop-ratelimits': 0,
  'tx-hw-drops': 0,
  'tx-needs-csum': 61263,
  'tx-packets': 15515}]

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 254 +++++++++++++++++++++++++++++++--------
 1 file changed, 203 insertions(+), 51 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a24cfde30d08..42c38c121a7a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,7 @@
 #include <net/xdp.h>
 #include <net/net_failover.h>
 #include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -78,6 +79,7 @@ static const unsigned long guest_offloads[] = {
 struct virtnet_stat_desc {
 	char desc[ETH_GSTRING_LEN];
 	size_t offset;
+	int qstat_offset;
 };
 
 struct virtnet_sq_stats {
@@ -102,12 +104,27 @@ struct virtnet_rq_stats {
 	u64_stats_t kicks;
 };
 
-#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m)}
-#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m)}
+#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
+#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
+
+#define VIRTNET_SQ_STAT_QSTAT(name, m)				\
+	{							\
+		name,						\
+		offsetof(struct virtnet_sq_stats, m),		\
+		offsetof(struct netdev_queue_stats_tx, m),	\
+	}
+
+#define VIRTNET_RQ_STAT_QSTAT(name, m)				\
+	{							\
+		name,						\
+		offsetof(struct virtnet_rq_stats, m),		\
+		offsetof(struct netdev_queue_stats_rx, m),	\
+	}
 
 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
-	VIRTNET_SQ_STAT("packets",      packets),
-	VIRTNET_SQ_STAT("bytes",        bytes),
+	VIRTNET_SQ_STAT_QSTAT("packets", packets),
+	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
+
 	VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
 	VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
 	VIRTNET_SQ_STAT("kicks",        kicks),
@@ -115,8 +132,9 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 };
 
 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
-	VIRTNET_RQ_STAT("packets",       packets),
-	VIRTNET_RQ_STAT("bytes",         bytes),
+	VIRTNET_RQ_STAT_QSTAT("packets", packets),
+	VIRTNET_RQ_STAT_QSTAT("bytes",   bytes),
+
 	VIRTNET_RQ_STAT("drops",         drops),
 	VIRTNET_RQ_STAT("xdp_packets",   xdp_packets),
 	VIRTNET_RQ_STAT("xdp_tx",        xdp_tx),
@@ -126,13 +144,27 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 };
 
 #define VIRTNET_STATS_DESC_CQ(name) \
-	{#name, offsetof(struct virtio_net_stats_cvq, name)}
+	{#name, offsetof(struct virtio_net_stats_cvq, name), -1}
 
 #define VIRTNET_STATS_DESC_RX(class, name) \
-	{#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
+	{#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
 
 #define VIRTNET_STATS_DESC_TX(class, name) \
-	{#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
+	{#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
+
+#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field)			\
+	{									\
+		#name,								\
+		offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name),	\
+		offsetof(struct netdev_queue_stats_rx, qstat_field),		\
+	}
+
+#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field)			\
+	{									\
+		#name,								\
+		offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name),	\
+		offsetof(struct netdev_queue_stats_tx, qstat_field),		\
+	}
 
 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
 	VIRTNET_STATS_DESC_CQ(command_num),
@@ -146,8 +178,8 @@ static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
 	VIRTNET_STATS_DESC_RX(basic, notifications),
 	VIRTNET_STATS_DESC_RX(basic, interrupts),
 
-	VIRTNET_STATS_DESC_RX(basic, drops),
-	VIRTNET_STATS_DESC_RX(basic, drop_overruns),
+	VIRTNET_STATS_DESC_RX_QSTAT(basic, drops,         hw_drops),
+	VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
@@ -157,46 +189,47 @@ static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
 	VIRTNET_STATS_DESC_TX(basic, notifications),
 	VIRTNET_STATS_DESC_TX(basic, interrupts),
 
-	VIRTNET_STATS_DESC_TX(basic, drops),
-	VIRTNET_STATS_DESC_TX(basic, drop_malformed),
+	VIRTNET_STATS_DESC_TX_QSTAT(basic, drops,          hw_drops),
+	VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
-	VIRTNET_STATS_DESC_RX(csum, csum_valid),
-	VIRTNET_STATS_DESC_RX(csum, needs_csum),
+	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
+	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none,  csum_none),
+	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad,   csum_bad),
 
-	VIRTNET_STATS_DESC_RX(csum, csum_none),
-	VIRTNET_STATS_DESC_RX(csum, csum_bad),
+	VIRTNET_STATS_DESC_RX(csum, needs_csum),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
-	VIRTNET_STATS_DESC_TX(csum, needs_csum),
-	VIRTNET_STATS_DESC_TX(csum, csum_none),
+	VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none,  csum_none),
+	VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
-	VIRTNET_STATS_DESC_RX(gso, gso_packets),
-	VIRTNET_STATS_DESC_RX(gso, gso_bytes),
-	VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
-	VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
+	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets,           hw_gro_packets),
+	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes,             hw_gro_bytes),
+	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
+	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced,   hw_gro_wire_bytes),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
-	VIRTNET_STATS_DESC_TX(gso, gso_packets),
-	VIRTNET_STATS_DESC_TX(gso, gso_bytes),
-	VIRTNET_STATS_DESC_TX(gso, gso_segments),
-	VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
+	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets,        hw_gso_packets),
+	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes,          hw_gso_bytes),
+	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments,       hw_gso_wire_packets),
+	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
+
 	VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
 	VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
-	VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
+	VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
 	VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
 };
 
 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
-	VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
+	VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
 	VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
 };
 
@@ -3442,6 +3475,9 @@ static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid,
 		for (j = 0; j < m->num; ++j) {
 			desc = m->desc[j].desc;
 
+			if (m->desc[j].qstat_offset >= 0)
+				continue;
+
 			if (type == VIRTNET_Q_TYPE_CQ) {
 				ethtool_sprintf(&p, "cq%s_%s", hw, desc);
 				continue;
@@ -3458,6 +3494,9 @@ static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid,
 }
 
 struct virtnet_stats_ctx {
+	/* the stats are write to qstats or ethtool -S */
+	bool to_qstat;
+
 	u32 desc_num[3];
 
 	u32 bitmap[3];
@@ -3469,12 +3508,13 @@ struct virtnet_stats_ctx {
 
 static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 				   struct virtnet_stats_ctx *ctx,
-				   u64 *data)
+				   u64 *data, bool to_qstat)
 {
 	struct virtnet_stats_map *m;
-	int i;
+	int i, num, j;
 
 	ctx->data = data;
+	ctx->to_qstat = to_qstat;
 
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
@@ -3482,8 +3522,17 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 		if (!virtnet_stats_supported(vi, m))
 			continue;
 
+		num = 0;
+		for (j = 0; j < m->num; ++j) {
+			if ((m->desc[j].qstat_offset >= 0) == ctx->to_qstat)
+				++num;
+		}
+
+		if (!num)
+			continue;
+
 		ctx->bitmap[m->queue_type]   |= m->stat_type;
-		ctx->desc_num[m->queue_type] += m->num;
+		ctx->desc_num[m->queue_type] += num;
 		ctx->size[m->queue_type]     += m->len;
 	}
 }
@@ -3531,7 +3580,7 @@ static void virtnet_fill_total_fields(struct virtnet_info *vi,
 	stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
 }
 
-/* virtnet_fill_stats - copy the stats to ethtool -S
+/* virtnet_fill_stats - copy the stats to qstats or ethtool -S
  * The stats source is the device or the driver.
  *
  * @vi: virtio net info
@@ -3550,8 +3599,9 @@ static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 	const struct virtnet_stat_desc *desc;
 	struct virtnet_stats_map *m;
 	const u64_stats_t *v_stat;
+	u64 offset, value;
 	const __le64 *v;
-	u64 offset;
+	bool skip;
 	int i, j;
 
 	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
@@ -3577,24 +3627,46 @@ static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 		if (!virtnet_stats_supported(vi, m))
 			continue;
 
+		skip = false;
+
 		/* Checking whether this "m" matches "base" or not. */
-		if (from_driver != m->from_driver || type != m->reply_type) {
-			offset += m->num;
-			continue;
-		}
+		if (from_driver != m->from_driver || type != m->reply_type)
+			skip = true;
 
 		for (j = 0; j < m->num; ++j) {
 			desc = &m->desc[j];
+
+			if ((desc->qstat_offset >= 0) != ctx->to_qstat)
+				continue;
+
+			if (skip) {
+				/* update write offset of the ctx->data. */
+				++offset;
+				continue;
+			}
+
 			if (!from_driver) {
 				v = (const __le64 *)(base + desc->offset);
-				ctx->data[offset + j] = le64_to_cpu(*v);
+				value = le64_to_cpu(*v);
 			} else {
 				v_stat = (const u64_stats_t *)(base + desc->offset);
-				ctx->data[offset + j] = u64_stats_read(v_stat);
+				value = u64_stats_read(v_stat);
+			}
+
+			if (ctx->to_qstat) {
+				/* store to the queue stats structure */
+				if (desc->qstat_offset >= 0) {
+					offset = desc->qstat_offset / sizeof(*ctx->data);
+					ctx->data[offset] = value;
+				}
+			} else {
+				/* store to the ethtool -S data area */
+				ctx->data[offset++] = value;
 			}
 		}
 
-		break;
+		if (!skip)
+			break;
 	}
 }
 
@@ -3648,21 +3720,33 @@ static void virtnet_make_stat_req(struct virtnet_info *vi,
 	*idx += 1;
 }
 
+/* qid: -1: get stats of all vq.
+ *     > 0: get the stats for the special vq. This must not be cvq.
+ */
 static int virtnet_get_hw_stats(struct virtnet_info *vi,
-				struct virtnet_stats_ctx *ctx)
+				struct virtnet_stats_ctx *ctx, int qid)
 {
+	int qnum, i, j, res_size, qtype, last_vq, first_vq;
 	struct virtio_net_ctrl_queue_stats *req;
-	int qnum, i, j, res_size, qtype, last_vq;
+	bool enable_cvq;
 	void *reply;
 
 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
 		return 0;
 
-	last_vq = vi->curr_queue_pairs * 2 - 1;
+	if (qid == -1) {
+		last_vq = vi->curr_queue_pairs * 2 - 1;
+		first_vq = 0;
+		enable_cvq = true;
+	} else {
+		last_vq = qid;
+		first_vq = qid;
+		enable_cvq = false;
+	}
 
 	qnum = 0;
 	res_size = 0;
-	for (i = 0; i <= last_vq ; ++i) {
+	for (i = first_vq; i <= last_vq ; ++i) {
 		qtype = vq_type(vi, i);
 		if (ctx->bitmap[qtype]) {
 			++qnum;
@@ -3670,7 +3754,7 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
 		}
 	}
 
-	if (ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
+	if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
 		res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
 		qnum += 1;
 	}
@@ -3686,10 +3770,11 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
 	}
 
 	j = 0;
-	for (i = 0; i <= last_vq ; ++i)
+	for (i = first_vq; i <= last_vq ; ++i)
 		virtnet_make_stat_req(vi, ctx, req, i, &j);
 
-	virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
+	if (enable_cvq)
+		virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
 
 	return __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
 }
@@ -3742,7 +3827,7 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
 			}
 		}
 
-		virtnet_stats_ctx_init(vi, &ctx, NULL);
+		virtnet_stats_ctx_init(vi, &ctx, NULL, false);
 
 		pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
 
@@ -3761,8 +3846,8 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 	unsigned int start, i;
 	const u8 *stats_base;
 
-	virtnet_stats_ctx_init(vi, &ctx, data);
-	if (virtnet_get_hw_stats(vi, &ctx))
+	virtnet_stats_ctx_init(vi, &ctx, data, false);
+	if (virtnet_get_hw_stats(vi, &ctx, -1))
 		dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
 
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
@@ -4301,6 +4386,72 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
 	.set_rxnfc = virtnet_set_rxnfc,
 };
 
+static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
+				       struct netdev_queue_stats_rx *stats)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+	struct receive_queue *rq = &vi->rq[i];
+	struct virtnet_stats_ctx ctx = {0};
+
+	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+	virtnet_get_hw_stats(vi, &ctx, i * 2);
+	virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
+}
+
+static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
+				       struct netdev_queue_stats_tx *stats)
+{
+	struct virtnet_info *vi = netdev_priv(dev);
+	struct send_queue *sq = &vi->sq[i];
+	struct virtnet_stats_ctx ctx = {0};
+
+	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+	virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
+	virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
+}
+
+static void virtnet_get_base_stats(struct net_device *dev,
+				   struct netdev_queue_stats_rx *rx,
+				   struct netdev_queue_stats_tx *tx)
+{
+	/* The queue stats of the virtio-net will not be reset. So here we
+	 * return 0.
+	 */
+	rx->bytes = 0;
+	rx->packets = 0;
+	rx->alloc_fail = 0;
+	rx->hw_drops = 0;
+	rx->hw_drop_overruns = 0;
+	rx->csum_unnecessary = 0;
+	rx->csum_none = 0;
+	rx->csum_bad = 0;
+	rx->hw_gro_packets = 0;
+	rx->hw_gro_bytes = 0;
+	rx->hw_gro_wire_packets = 0;
+	rx->hw_gro_wire_bytes = 0;
+	rx->hw_drop_ratelimits = 0;
+
+	tx->bytes = 0;
+	tx->packets = 0;
+	tx->hw_drops = 0;
+	tx->hw_drop_errors = 0;
+	tx->csum_none = 0;
+	tx->needs_csum = 0;
+	tx->hw_gso_packets = 0;
+	tx->hw_gso_bytes = 0;
+	tx->hw_gso_wire_packets = 0;
+	tx->hw_gso_wire_bytes = 0;
+	tx->hw_drop_ratelimits = 0;
+}
+
+static const struct netdev_stat_ops virtnet_stat_ops = {
+	.get_queue_stats_rx	= virtnet_get_queue_stats_rx,
+	.get_queue_stats_tx	= virtnet_get_queue_stats_tx,
+	.get_base_stats		= virtnet_get_base_stats,
+};
+
 static void virtnet_freeze_down(struct virtio_device *vdev)
 {
 	struct virtnet_info *vi = vdev->priv;
@@ -5060,6 +5211,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
 			   IFF_TX_SKB_NO_LINEAR;
 	dev->netdev_ops = &virtnet_netdev;
+	dev->stat_ops = &virtnet_stat_ops;
 	dev->features = NETIF_F_HIGHDMA;
 
 	dev->ethtool_ops = &virtnet_ethtool_ops;
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (8 preceding siblings ...)
  2024-03-18 11:06 ` [PATCH net-next v5 9/9] virtio-net: support queue stat Xuan Zhuo
@ 2024-03-18 11:52 ` Jiri Pirko
  2024-03-18 11:53   ` Xuan Zhuo
  2024-03-20  9:45 ` Xuan Zhuo
  2024-04-22 20:33 ` Michael S. Tsirkin
  11 siblings, 1 reply; 34+ messages in thread
From: Jiri Pirko @ 2024-03-18 11:52 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
>As the spec:
>
>https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
>The virtio net supports to get device stats.
>
>Please review.

net-next is closed. Please resubmit next week.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 11:52 ` [PATCH net-next v5 0/9] virtio-net: support device stats Jiri Pirko
@ 2024-03-18 11:53   ` Xuan Zhuo
  2024-03-18 12:19     ` Jiri Pirko
  0 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-18 11:53 UTC (permalink / raw)
  To: Jiri Pirko
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, 18 Mar 2024 12:52:18 +0100, Jiri Pirko <jiri@resnulli.us> wrote:
> Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
> >As the spec:
> >
> >https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> >
> >The virtio net supports to get device stats.
> >
> >Please review.
>
> net-next is closed. Please resubmit next week.


For review.

Thanks.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 11:53   ` Xuan Zhuo
@ 2024-03-18 12:19     ` Jiri Pirko
  2024-03-19 10:12       ` Paolo Abeni
  0 siblings, 1 reply; 34+ messages in thread
From: Jiri Pirko @ 2024-03-18 12:19 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

Mon, Mar 18, 2024 at 12:53:38PM CET, xuanzhuo@linux.alibaba.com wrote:
>On Mon, 18 Mar 2024 12:52:18 +0100, Jiri Pirko <jiri@resnulli.us> wrote:
>> Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
>> >As the spec:
>> >
>> >https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>> >
>> >The virtio net supports to get device stats.
>> >
>> >Please review.
>>
>> net-next is closed. Please resubmit next week.
>
>
>For review.

RFC, or wait.

>
>Thanks.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 12:19     ` Jiri Pirko
@ 2024-03-19 10:12       ` Paolo Abeni
  2024-03-20  8:04         ` Xuan Zhuo
  0 siblings, 1 reply; 34+ messages in thread
From: Paolo Abeni @ 2024-03-19 10:12 UTC (permalink / raw)
  To: Jiri Pirko, Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Michael S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, 2024-03-18 at 13:19 +0100, Jiri Pirko wrote:
> Mon, Mar 18, 2024 at 12:53:38PM CET, xuanzhuo@linux.alibaba.com wrote:
> > On Mon, 18 Mar 2024 12:52:18 +0100, Jiri Pirko <jiri@resnulli.us> wrote:
> > > Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
> > > > As the spec:
> > > > 
> > > > https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > 
> > > > The virtio net supports to get device stats.
> > > > 
> > > > Please review.
> > > 
> > > net-next is closed. Please resubmit next week.
> > 
> > 
> > For review.
> 
> RFC, or wait.

@Xuan, please note that you received exactly the same feedback on your
previous submission, a few days ago. While I do understand the legit
interest in reviews, ignoring explicit feedback tend to bring no
feedback at all.

Paolo


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-19 10:12       ` Paolo Abeni
@ 2024-03-20  8:04         ` Xuan Zhuo
  2024-03-20 12:23           ` Jiri Pirko
  2024-03-21  3:38           ` Jakub Kicinski
  0 siblings, 2 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-20  8:04 UTC (permalink / raw)
  To: Paolo Abeni
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Michael  S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf,
	Jiri Pirko

On Tue, 19 Mar 2024 11:12:23 +0100, Paolo Abeni <pabeni@redhat.com> wrote:
> On Mon, 2024-03-18 at 13:19 +0100, Jiri Pirko wrote:
> > Mon, Mar 18, 2024 at 12:53:38PM CET, xuanzhuo@linux.alibaba.com wrote:
> > > On Mon, 18 Mar 2024 12:52:18 +0100, Jiri Pirko <jiri@resnulli.us> wrote:
> > > > Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
> > > > > As the spec:
> > > > >
> > > > > https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > >
> > > > > The virtio net supports to get device stats.
> > > > >
> > > > > Please review.
> > > >
> > > > net-next is closed. Please resubmit next week.
> > >
> > >
> > > For review.
> >
> > RFC, or wait.
>
> @Xuan, please note that you received exactly the same feedback on your
> previous submission, a few days ago. While I do understand the legit
> interest in reviews, ignoring explicit feedback tend to bring no
> feedback at all.

Sorry.

I have a question regarding the workflow for feature discussions. If we
consistently engage in discussions about a particular feature, this may result
in the submission of multiple patch sets. In light of this, should we modify the
usage of "PATCH" or "RFC" in our submissions depending on whether the merge
window is open or closed? This causes the title of our patch sets to keep
changing.

Or I miss something.


Thanks.


>
> Paolo
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (9 preceding siblings ...)
  2024-03-18 11:52 ` [PATCH net-next v5 0/9] virtio-net: support device stats Jiri Pirko
@ 2024-03-20  9:45 ` Xuan Zhuo
  2024-04-22 20:33 ` Michael S. Tsirkin
  11 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-20  9:45 UTC (permalink / raw)
  To: Jason Wang
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Michael S. Tsirkin, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
	Amritha Nambiar, Larysa Zaremba, Sridhar Samudrala,
	Maciej Fijalkowski, virtualization, bpf, netdev

Hope this in your list.

Thanks.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-20  8:04         ` Xuan Zhuo
@ 2024-03-20 12:23           ` Jiri Pirko
  2024-03-21  3:38           ` Jakub Kicinski
  1 sibling, 0 replies; 34+ messages in thread
From: Jiri Pirko @ 2024-03-20 12:23 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Paolo Abeni, netdev, David S. Miller, Eric Dumazet,
	Jakub Kicinski, Michael  S. Tsirkin, Jason Wang,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Stanislav Fomichev, Amritha Nambiar,
	Larysa Zaremba, Sridhar Samudrala, Maciej Fijalkowski,
	virtualization, bpf

Wed, Mar 20, 2024 at 09:04:21AM CET, xuanzhuo@linux.alibaba.com wrote:
>On Tue, 19 Mar 2024 11:12:23 +0100, Paolo Abeni <pabeni@redhat.com> wrote:
>> On Mon, 2024-03-18 at 13:19 +0100, Jiri Pirko wrote:
>> > Mon, Mar 18, 2024 at 12:53:38PM CET, xuanzhuo@linux.alibaba.com wrote:
>> > > On Mon, 18 Mar 2024 12:52:18 +0100, Jiri Pirko <jiri@resnulli.us> wrote:
>> > > > Mon, Mar 18, 2024 at 12:05:53PM CET, xuanzhuo@linux.alibaba.com wrote:
>> > > > > As the spec:
>> > > > >
>> > > > > https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>> > > > >
>> > > > > The virtio net supports to get device stats.
>> > > > >
>> > > > > Please review.
>> > > >
>> > > > net-next is closed. Please resubmit next week.
>> > >
>> > >
>> > > For review.
>> >
>> > RFC, or wait.
>>
>> @Xuan, please note that you received exactly the same feedback on your
>> previous submission, a few days ago. While I do understand the legit
>> interest in reviews, ignoring explicit feedback tend to bring no
>> feedback at all.
>
>Sorry.
>
>I have a question regarding the workflow for feature discussions. If we
>consistently engage in discussions about a particular feature, this may result
>in the submission of multiple patch sets. In light of this, should we modify the
>usage of "PATCH" or "RFC" in our submissions depending on whether the merge

No, just wait 2 weeks, simple.


>window is open or closed? This causes the title of our patch sets to keep
>changing.
>
>Or I miss something.
>
>
>Thanks.
>
>
>>
>> Paolo
>>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-20  8:04         ` Xuan Zhuo
  2024-03-20 12:23           ` Jiri Pirko
@ 2024-03-21  3:38           ` Jakub Kicinski
  2024-03-21  3:54             ` Xuan Zhuo
  1 sibling, 1 reply; 34+ messages in thread
From: Jakub Kicinski @ 2024-03-21  3:38 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Paolo Abeni, netdev, David S. Miller, Eric Dumazet,
	Michael  S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf,
	Jiri Pirko

On Wed, 20 Mar 2024 16:04:21 +0800 Xuan Zhuo wrote:
> I have a question regarding the workflow for feature discussions. If we
> consistently engage in discussions about a particular feature, this may result
> in the submission of multiple patch sets. In light of this, should we modify the
> usage of "PATCH" or "RFC" in our submissions depending on whether the merge
> window is open or closed? This causes the title of our patch sets to keep
> changing.

Is switching between RFC and PATCH causing issues?
Should be a simple modification to the git format-patch argument.
But perhaps your workload is different than mine.

The merge window is only 2 weeks every 10 weeks, it's not changing
often, I don't think.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-21  3:38           ` Jakub Kicinski
@ 2024-03-21  3:54             ` Xuan Zhuo
  2024-03-21 12:42               ` Simon Horman
  0 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-03-21  3:54 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Paolo Abeni, netdev, David S.  Miller, Eric Dumazet,
	Michael  S. Tsirkin, Jason Wang, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf,
	Jiri Pirko

On Wed, 20 Mar 2024 20:38:01 -0700, Jakub Kicinski <kuba@kernel.org> wrote:
> On Wed, 20 Mar 2024 16:04:21 +0800 Xuan Zhuo wrote:
> > I have a question regarding the workflow for feature discussions. If we
> > consistently engage in discussions about a particular feature, this may result
> > in the submission of multiple patch sets. In light of this, should we modify the
> > usage of "PATCH" or "RFC" in our submissions depending on whether the merge
> > window is open or closed? This causes the title of our patch sets to keep
> > changing.
>
> Is switching between RFC and PATCH causing issues?

You know someone may ignore the RFC patches.
And for me, that the pathsets for the particular feture have differ
prefix "PATCH" or "RFC" is odd.

> Should be a simple modification to the git format-patch argument.

That is ok.


> But perhaps your workload is different than mine.
>
> The merge window is only 2 weeks every 10 weeks, it's not changing
> often, I don't think.

YES. I'm ok, if that is a rule.

Thanks.

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-21  3:54             ` Xuan Zhuo
@ 2024-03-21 12:42               ` Simon Horman
  0 siblings, 0 replies; 34+ messages in thread
From: Simon Horman @ 2024-03-21 12:42 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Jakub Kicinski, Paolo Abeni, netdev, David S.  Miller,
	Eric Dumazet, Michael  S. Tsirkin, Jason Wang,
	Alexei Starovoitov, Daniel Borkmann, Jesper Dangaard Brouer,
	John Fastabend, Stanislav Fomichev, Amritha Nambiar,
	Larysa Zaremba, Sridhar Samudrala, Maciej Fijalkowski,
	virtualization, bpf, Jiri Pirko

On Thu, Mar 21, 2024 at 11:54:34AM +0800, Xuan Zhuo wrote:
> On Wed, 20 Mar 2024 20:38:01 -0700, Jakub Kicinski <kuba@kernel.org> wrote:
> > On Wed, 20 Mar 2024 16:04:21 +0800 Xuan Zhuo wrote:
> > > I have a question regarding the workflow for feature discussions. If we
> > > consistently engage in discussions about a particular feature, this may result
> > > in the submission of multiple patch sets. In light of this, should we modify the
> > > usage of "PATCH" or "RFC" in our submissions depending on whether the merge
> > > window is open or closed? This causes the title of our patch sets to keep
> > > changing.
> >
> > Is switching between RFC and PATCH causing issues?
> 
> You know someone may ignore the RFC patches.
> And for me, that the pathsets for the particular feture have differ
> prefix "PATCH" or "RFC" is odd.
> 
> > Should be a simple modification to the git format-patch argument.
> 
> That is ok.
> 
> 
> > But perhaps your workload is different than mine.
> >
> > The merge window is only 2 weeks every 10 weeks, it's not changing
> > often, I don't think.
> 
> YES. I'm ok, if that is a rule.

Hi,

Maybe this helps:

It is a long standing rule that for netdev, during the merge window,
net-next is closed. During this time bugfixes may be posted (for net),
and RFCs may be posted.

https://docs.kernel.org/process/maintainer-netdev.html#git-trees-and-patch-flow

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures
  2024-03-18 11:05 ` [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures Xuan Zhuo
@ 2024-04-10  6:09   ` Jason Wang
  0 siblings, 0 replies; 34+ messages in thread
From: Jason Wang @ 2024-04-10  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> The virtio-net device stats spec:
>
> https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
> We introduce the relative feature and structures.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result
  2024-03-18 11:05 ` [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
@ 2024-04-10  6:09   ` Jason Wang
  2024-04-10 10:50     ` Xuan Zhuo
  0 siblings, 1 reply; 34+ messages in thread
From: Jason Wang @ 2024-04-10  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
> The virtnet cvq supports to get result from the device.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 47 +++++++++++++++++++++++-----------------
>  1 file changed, 27 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index d7ce4a1011ea..af512d85cd5b 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -2512,10 +2512,11 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
>   * never fail unless improperly formatted.
>   */
>  static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> -                                struct scatterlist *out)
> +                                struct scatterlist *out,
> +                                struct scatterlist *in)
>  {
> -       struct scatterlist *sgs[4], hdr, stat;
> -       unsigned out_num = 0, tmp;
> +       struct scatterlist *sgs[5], hdr, stat;
> +       u32 out_num = 0, tmp, in_num = 0;
>         int ret;
>
>         /* Caller should know better */
> @@ -2533,10 +2534,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
>
>         /* Add return status. */
>         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
> -       sgs[out_num] = &stat;
> +       sgs[out_num + in_num++] = &stat;
>
> -       BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
> -       ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
> +       if (in)
> +               sgs[out_num + in_num++] = in;
> +
> +       BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
> +       ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
>         if (ret < 0) {
>                 dev_warn(&vi->vdev->dev,
>                          "Failed to add sgs for command vq: %d\n.", ret);
> @@ -2578,7 +2582,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
>         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
>                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
>                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
> -                                         VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
> +                                         VIRTIO_NET_CTRL_MAC_ADDR_SET,
> +                                         &sg, NULL)) {
>                         dev_warn(&vdev->dev,
>                                  "Failed to set mac address by vq command.\n");
>                         ret = -EINVAL;
> @@ -2647,7 +2652,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
>  {
>         rtnl_lock();
>         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
> -                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
> +                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))

Nit: It might be better to introduce a virtnet_send_command_reply()
and let virtnet_send_command() call it as in=NULL to simplify the
changes.

Others look good.

Thanks


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S
  2024-03-18 11:05 ` [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S Xuan Zhuo
@ 2024-04-10  6:09   ` Jason Wang
  0 siblings, 0 replies; 34+ messages in thread
From: Jason Wang @ 2024-04-10  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> The key size of ethtool -S is controlled by this macro.
>
> ETH_GSTRING_LEN 32
>
> That includes the \0 at the end. So the max length of the key name must
> is 31. But the length of the prefix "rx_queue_0_" is 11. If the queue
> num is larger than 10, the length of the prefix is 12. So the
> key name max is 19. That is too short. We will introduce some keys
> such as "gso_packets_coalesced". So we should change the prefix
> to "rx0_".
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout
  2024-03-18 11:06 ` [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
@ 2024-04-10  6:09   ` Jason Wang
  0 siblings, 0 replies; 34+ messages in thread
From: Jason Wang @ 2024-04-10  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf,
	Jiri Pirko

On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> Now, we have this:
>
>     tx_queue_0_tx_timeouts
>
> This is used to record the tx schedule timeout.
> But this has two "tx". I think the below is enough.
>
>     tx_queue_0_timeouts
>
> So I rename this field.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Jiri Pirko <jiri@nvidia.com>
> ---
>  drivers/net/virtio_net.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 12dc1d0d8d2b..a24cfde30d08 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -87,7 +87,7 @@ struct virtnet_sq_stats {
>         u64_stats_t xdp_tx;
>         u64_stats_t xdp_tx_drops;
>         u64_stats_t kicks;
> -       u64_stats_t tx_timeouts;
> +       u64_stats_t timeouts;
>  };
>
>  struct virtnet_rq_stats {
> @@ -111,7 +111,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
>         VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
>         VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
>         VIRTNET_SQ_STAT("kicks",        kicks),
> -       VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),

This is noticeable by userspace, not sure if it's too late.

Thanks


> +       VIRTNET_SQ_STAT("timeouts",     timeouts),
>  };
>
>  static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> @@ -2780,7 +2780,7 @@ static void virtnet_stats(struct net_device *dev,
>                         start = u64_stats_fetch_begin(&sq->stats.syncp);
>                         tpackets = u64_stats_read(&sq->stats.packets);
>                         tbytes   = u64_stats_read(&sq->stats.bytes);
> -                       terrors  = u64_stats_read(&sq->stats.tx_timeouts);
> +                       terrors  = u64_stats_read(&sq->stats.timeouts);
>                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
>
>                 do {
> @@ -4568,7 +4568,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
>         struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
>
>         u64_stats_update_begin(&sq->stats.syncp);
> -       u64_stats_inc(&sq->stats.tx_timeouts);
> +       u64_stats_inc(&sq->stats.timeouts);
>         u64_stats_update_end(&sq->stats.syncp);
>
>         netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
> --
> 2.32.0.3.g01195cf9f
>


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-03-18 11:05 ` [PATCH net-next v5 4/9] virtio_net: support device stats Xuan Zhuo
@ 2024-04-10  6:09   ` Jason Wang
  2024-04-10 10:52     ` Xuan Zhuo
  0 siblings, 1 reply; 34+ messages in thread
From: Jason Wang @ 2024-04-10  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
> make virtio-net support getting the stats from the device by ethtool -S
> <eth0>.
>
> Due to the numerous descriptors stats, an organization method is
> required. For this purpose, I have introduced the "virtnet_stats_map".
> Utilizing this array simplifies coding tasks such as generating field
> names, calculating buffer sizes for requests and responses, and parsing
> replies from the device. By iterating over the "virtnet_stats_map,"
> these operations become more streamlined and efficient.
>
> NIC statistics:
>      rx0_packets: 582951
>      rx0_bytes: 155307077
>      rx0_drops: 0
>      rx0_xdp_packets: 0
>      rx0_xdp_tx: 0
>      rx0_xdp_redirects: 0
>      rx0_xdp_drops: 0
>      rx0_kicks: 17007
>      rx0_hw_packets: 2179409
>      rx0_hw_bytes: 510015040
>      rx0_hw_notifications: 0
>      rx0_hw_interrupts: 0
>      rx0_hw_drops: 12964
>      rx0_hw_drop_overruns: 0
>      rx0_hw_csum_valid: 2179409
>      rx0_hw_csum_none: 0
>      rx0_hw_csum_bad: 0
>      rx0_hw_needs_csum: 2179409
>      rx0_hw_ratelimit_packets: 0
>      rx0_hw_ratelimit_bytes: 0
>      tx0_packets: 15361
>      tx0_bytes: 1918970
>      tx0_xdp_tx: 0
>      tx0_xdp_tx_drops: 0
>      tx0_kicks: 15361
>      tx0_timeouts: 0
>      tx0_hw_packets: 32272
>      tx0_hw_bytes: 4311698
>      tx0_hw_notifications: 0
>      tx0_hw_interrupts: 0
>      tx0_hw_drops: 0
>      tx0_hw_drop_malformed: 0
>      tx0_hw_csum_none: 0
>      tx0_hw_needs_csum: 32272
>      tx0_hw_ratelimit_packets: 0
>      tx0_hw_ratelimit_bytes: 0
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 397 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8cb5bdd7ad91..70c1d4e850e0 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
>  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
>  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
>
> +#define VIRTNET_STATS_DESC_CQ(name) \
> +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> +
> +#define VIRTNET_STATS_DESC_RX(class, name) \
> +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> +
> +#define VIRTNET_STATS_DESC_TX(class, name) \
> +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> +
> +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> +       VIRTNET_STATS_DESC_CQ(command_num),
> +       VIRTNET_STATS_DESC_CQ(ok_num),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> +       VIRTNET_STATS_DESC_RX(basic, packets),
> +       VIRTNET_STATS_DESC_RX(basic, bytes),
> +
> +       VIRTNET_STATS_DESC_RX(basic, notifications),
> +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> +
> +       VIRTNET_STATS_DESC_RX(basic, drops),
> +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> +       VIRTNET_STATS_DESC_TX(basic, packets),
> +       VIRTNET_STATS_DESC_TX(basic, bytes),
> +
> +       VIRTNET_STATS_DESC_TX(basic, notifications),
> +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> +
> +       VIRTNET_STATS_DESC_TX(basic, drops),
> +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> +
> +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> +};
> +
> +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> +};
> +
> +#define VIRTNET_Q_TYPE_RX 0
> +#define VIRTNET_Q_TYPE_TX 1
> +#define VIRTNET_Q_TYPE_CQ 2
> +
> +struct virtnet_stats_map {
> +       /* The stat type in bitmap. */
> +       u64 stat_type;
> +
> +       /* The bytes of the response for the stat. */
> +       u32 len;
> +
> +       /* The num of the response fields for the stat. */
> +       u32 num;
> +
> +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> +       u32 queue_type;
> +
> +       /* The reply type of the stat. */
> +       u8 reply_type;
> +
> +       /* Describe the name and the offset in the response. */
> +       const struct virtnet_stat_desc *desc;
> +};
> +
> +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> +       {                                                       \
> +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> +               sizeof(struct virtio_net_stats_ ## type),       \
> +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> +               VIRTNET_Q_TYPE_##queue_type,                    \
> +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> +               &virtnet_stats_##type##_desc[0]                 \
> +       }
> +
> +static struct virtnet_stats_map virtio_net_stats_map[] = {
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> +
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> +
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> +};

I think the reason you did this is to ease the future extensions but
multiple levels of nested macros makes the code hard to review. Any
way to eliminate this?

> +
>  struct virtnet_interrupt_coalesce {
>         u32 max_packets;
>         u32 max_usecs;
> @@ -244,6 +367,7 @@ struct control_buf {
>         struct virtio_net_ctrl_coal_tx coal_tx;
>         struct virtio_net_ctrl_coal_rx coal_rx;
>         struct virtio_net_ctrl_coal_vq coal_vq;
> +       struct virtio_net_stats_capabilities stats_cap;
>  };
>
>  struct virtnet_info {
> @@ -329,6 +453,8 @@ struct virtnet_info {
>
>         /* failover when STANDBY feature enabled */
>         struct failover *failover;
> +
> +       u64 device_stats_cap;
>  };
>
>  struct padded_vnet_hdr {
> @@ -389,6 +515,17 @@ static int rxq2vq(int rxq)
>         return rxq * 2;
>  }
>
> +static int vq_type(struct virtnet_info *vi, int qid)
> +{
> +       if (qid == vi->max_queue_pairs * 2)
> +               return VIRTNET_Q_TYPE_CQ;
> +
> +       if (qid % 2)
> +               return VIRTNET_Q_TYPE_TX;
> +
> +       return VIRTNET_Q_TYPE_RX;
> +}
> +
>  static inline struct virtio_net_common_hdr *
>  skb_vnet_common_hdr(struct sk_buff *skb)
>  {
> @@ -3263,6 +3400,223 @@ static int virtnet_set_channels(struct net_device *dev,
>         return err;
>  }
>
> +static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
> +{
> +       struct virtnet_stats_map *m;
> +       int i, j;
> +       u8 *p = *data;
> +
> +       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> +               return;
> +
> +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> +               m = &virtio_net_stats_map[i];
> +
> +               if (m->queue_type != type)
> +                       continue;
> +
> +               if (!(vi->device_stats_cap & m->stat_type))
> +                       continue;
> +
> +               for (j = 0; j < m->num; ++j) {
> +                       switch (type) {
> +                       case VIRTNET_Q_TYPE_RX:
> +                               ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
> +                               break;
> +
> +                       case VIRTNET_Q_TYPE_TX:
> +                               ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
> +                               break;
> +
> +                       case VIRTNET_Q_TYPE_CQ:
> +                               ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
> +                               break;
> +                       }
> +               }
> +       }
> +
> +       *data = p;
> +}
> +
> +struct virtnet_stats_ctx {
> +       u32 desc_num[3];
> +
> +       u32 bitmap[3];
> +
> +       u32 size[3];
> +
> +       u64 *data;
> +};

Let's explain the meaning of each field here.

> +
> +static void virtnet_stats_ctx_init(struct virtnet_info *vi,
> +                                  struct virtnet_stats_ctx *ctx,
> +                                  u64 *data)
> +{
> +       struct virtnet_stats_map *m;
> +       int i;
> +
> +       ctx->data = data;
> +
> +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> +               m = &virtio_net_stats_map[i];
> +
> +               if (!(vi->device_stats_cap & m->stat_type))
> +                       continue;
> +
> +               ctx->bitmap[m->queue_type]   |= m->stat_type;
> +               ctx->desc_num[m->queue_type] += m->num;
> +               ctx->size[m->queue_type]     += m->len;
> +       }
> +}
> +
> +/* virtnet_fill_stats - copy the stats to ethtool -S
> + * The stats source is the device.
> + *
> + * @vi: virtio net info
> + * @qid: the vq id
> + * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
> + * @base: pointer to the device reply.
> + * @type: the type of the device reply
> + */
> +static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> +                              struct virtnet_stats_ctx *ctx,
> +                              const u8 *base, u8 type)
> +{
> +       u32 queue_type, num_rx, num_tx, num_cq;
> +       struct virtnet_stats_map *m;
> +       u64 offset, bitmap;
> +       const __le64 *v;
> +       int i, j;
> +
> +       num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> +       num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> +       num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> +
> +       queue_type = vq_type(vi, qid);
> +       bitmap = ctx->bitmap[queue_type];
> +       offset = 0;
> +
> +       if (queue_type == VIRTNET_Q_TYPE_TX) {
> +               offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> +               offset += VIRTNET_SQ_STATS_LEN;
> +       } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> +               offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> +       }
> +
> +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> +               m = &virtio_net_stats_map[i];
> +
> +               if (m->stat_type & bitmap)
> +                       offset += m->num;
> +
> +               if (type != m->reply_type)
> +                       continue;
> +
> +               for (j = 0; j < m->num; ++j) {
> +                       v = (const __le64 *)(base + m->desc[j].offset);
> +                       ctx->data[offset + j] = le64_to_cpu(*v);
> +               }
> +
> +               break;
> +       }
> +}
> +
> +static int __virtnet_get_hw_stats(struct virtnet_info *vi,
> +                                 struct virtnet_stats_ctx *ctx,
> +                                 struct virtio_net_ctrl_queue_stats *req,
> +                                 int req_size, void *reply, int res_size)
> +{
> +       struct virtio_net_stats_reply_hdr *hdr;
> +       struct scatterlist sgs_in, sgs_out;
> +       void *p;
> +       u32 qid;
> +       int ok;
> +
> +       sg_init_one(&sgs_out, req, req_size);
> +       sg_init_one(&sgs_in, reply, res_size);
> +
> +       ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
> +                                 VIRTIO_NET_CTRL_STATS_GET,
> +                                 &sgs_out, &sgs_in);
> +       kfree(req);

I'd suggest letting the caller free this for simplicity.

> +
> +       if (!ok) {
> +               kfree(reply);
> +               return ok;
> +       }
> +
> +       for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {

hdr->size is under the control of a device which might be malicious?

Btw, I'd expect that type implies size but looks not, any reason for that?

> +               hdr = p;
> +               qid = le16_to_cpu(hdr->vq_index);
> +               virtnet_fill_stats(vi, qid, ctx, p, hdr->type);
> +       }
> +
> +       kfree(reply);

As caller has the logic for err handling like:

        reply = kmalloc(res_size, GFP_KERNEL);
        if (!reply) {
                kfree(req);
                return -ENOMEM;
        }

So let's free it from the caller.

> +       return 0;
> +}
> +
> +static void virtnet_make_stat_req(struct virtnet_info *vi,
> +                                 struct virtnet_stats_ctx *ctx,
> +                                 struct virtio_net_ctrl_queue_stats *req,
> +                                 int qid, int *idx)
> +{
> +       int qtype = vq_type(vi, qid);
> +       u64 bitmap = ctx->bitmap[qtype];
> +
> +       if (!bitmap)
> +               return;
> +
> +       req->stats[*idx].vq_index = cpu_to_le16(qid);
> +       req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
> +       *idx += 1;
> +}
> +
> +static int virtnet_get_hw_stats(struct virtnet_info *vi,
> +                               struct virtnet_stats_ctx *ctx)
> +{
> +       struct virtio_net_ctrl_queue_stats *req;
> +       int qnum, i, j, res_size, qtype, last_vq;
> +       void *reply;
> +
> +       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> +               return 0;
> +
> +       last_vq = vi->curr_queue_pairs * 2 - 1;
> +
> +       qnum = 0;
> +       res_size = 0;
> +       for (i = 0; i <= last_vq ; ++i) {
> +               qtype = vq_type(vi, i);
> +               if (ctx->bitmap[qtype]) {
> +                       ++qnum;
> +                       res_size += ctx->size[qtype];
> +               }
> +       }
> +
> +       if (ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
> +               res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
> +               qnum += 1;
> +       }
> +
> +       req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
> +       if (!req)
> +               return -ENOMEM;
> +
> +       reply = kmalloc(res_size, GFP_KERNEL);
> +       if (!reply) {
> +               kfree(req);
> +               return -ENOMEM;
> +       }
> +
> +       j = 0;
> +       for (i = 0; i <= last_vq ; ++i)
> +               virtnet_make_stat_req(vi, ctx, req, i, &j);
> +
> +       virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);

Instead of preparing those on the fly, could we prepare the request
during the probe instead of here? As most of the field (except the
data) won't be changed.

> +
> +       return __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
> +}
> +
>  static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
>  {
>         struct virtnet_info *vi = netdev_priv(dev);
> @@ -3271,16 +3625,22 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
>
>         switch (stringset) {
>         case ETH_SS_STATS:
> +               virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
> +
>                 for (i = 0; i < vi->curr_queue_pairs; i++) {
>                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
>                                 ethtool_sprintf(&p, "rx%u_%s", i,
>                                                 virtnet_rq_stats_desc[j].desc);
> +
> +                       virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
>                 }
>
>                 for (i = 0; i < vi->curr_queue_pairs; i++) {
>                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
>                                 ethtool_sprintf(&p, "tx%u_%s", i,
>                                                 virtnet_sq_stats_desc[j].desc);
> +
> +                       virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
>                 }
>                 break;
>         }
> @@ -3289,11 +3649,35 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
>  static int virtnet_get_sset_count(struct net_device *dev, int sset)
>  {
>         struct virtnet_info *vi = netdev_priv(dev);
> +       struct virtnet_stats_ctx ctx = {0};
> +       u32 pair_count;
>
>         switch (sset) {
>         case ETH_SS_STATS:
> -               return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
> -                                              VIRTNET_SQ_STATS_LEN);
> +               if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS) &&
> +                   !vi->device_stats_cap) {
> +                       struct scatterlist sg;
> +
> +                       sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
> +
> +                       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
> +                                                 VIRTIO_NET_CTRL_STATS_QUERY,
> +                                                 NULL, &sg)) {
> +                               dev_warn(&dev->dev, "Fail to get stats capability\n");

Should we fail here?

> +                       } else {
> +                               __le64 v;
> +
> +                               v = vi->ctrl->stats_cap.supported_stats_types[0];
> +                               vi->device_stats_cap = le64_to_cpu(v);
> +                       }
> +               }
> +
> +               virtnet_stats_ctx_init(vi, &ctx, NULL);
> +
> +               pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
> +               pair_count += ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
> +
> +               return ctx.desc_num[VIRTNET_Q_TYPE_CQ] + vi->curr_queue_pairs * pair_count;

I wonder why we don't do this during the probe?

>         default:
>                 return -EOPNOTSUPP;
>         }
> @@ -3303,11 +3687,18 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
>                                       struct ethtool_stats *stats, u64 *data)
>  {
>         struct virtnet_info *vi = netdev_priv(dev);
> -       unsigned int idx = 0, start, i, j;
> +       struct virtnet_stats_ctx ctx = {0};
> +       unsigned int idx, start, i, j;
>         const u8 *stats_base;
>         const u64_stats_t *p;
>         size_t offset;
>
> +       virtnet_stats_ctx_init(vi, &ctx, data);
> +       if (virtnet_get_hw_stats(vi, &ctx))
> +               dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
> +
> +       idx = ctx.desc_num[VIRTNET_Q_TYPE_CQ];
> +
>         for (i = 0; i < vi->curr_queue_pairs; i++) {
>                 struct receive_queue *rq = &vi->rq[i];
>
> @@ -3321,6 +3712,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
>                         }
>                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
>                 idx += VIRTNET_RQ_STATS_LEN;
> +               idx += ctx.desc_num[VIRTNET_Q_TYPE_RX];
>         }
>
>         for (i = 0; i < vi->curr_queue_pairs; i++) {
> @@ -3336,6 +3728,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
>                         }
>                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
>                 idx += VIRTNET_SQ_STATS_LEN;
> +               idx += ctx.desc_num[VIRTNET_Q_TYPE_TX];
>         }
>  }
>
> @@ -4963,7 +5356,7 @@ static struct virtio_device_id id_table[] = {
>         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
>         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
>         VIRTIO_NET_F_VQ_NOTF_COAL, \
> -       VIRTIO_NET_F_GUEST_HDRLEN
> +       VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
>
>  static unsigned int features[] = {
>         VIRTNET_FEATURES,
> --
> 2.32.0.3.g01195cf9f
>

Thanks


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result
  2024-04-10  6:09   ` Jason Wang
@ 2024-04-10 10:50     ` Xuan Zhuo
  0 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-04-10 10:50 UTC (permalink / raw)
  To: Jason Wang
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Wed, 10 Apr 2024 14:09:11 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> >
> > The virtnet cvq supports to get result from the device.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 47 +++++++++++++++++++++++-----------------
> >  1 file changed, 27 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index d7ce4a1011ea..af512d85cd5b 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -2512,10 +2512,11 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
> >   * never fail unless improperly formatted.
> >   */
> >  static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> > -                                struct scatterlist *out)
> > +                                struct scatterlist *out,
> > +                                struct scatterlist *in)
> >  {
> > -       struct scatterlist *sgs[4], hdr, stat;
> > -       unsigned out_num = 0, tmp;
> > +       struct scatterlist *sgs[5], hdr, stat;
> > +       u32 out_num = 0, tmp, in_num = 0;
> >         int ret;
> >
> >         /* Caller should know better */
> > @@ -2533,10 +2534,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
> >
> >         /* Add return status. */
> >         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
> > -       sgs[out_num] = &stat;
> > +       sgs[out_num + in_num++] = &stat;
> >
> > -       BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
> > -       ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
> > +       if (in)
> > +               sgs[out_num + in_num++] = in;
> > +
> > +       BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
> > +       ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
> >         if (ret < 0) {
> >                 dev_warn(&vi->vdev->dev,
> >                          "Failed to add sgs for command vq: %d\n.", ret);
> > @@ -2578,7 +2582,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
> >         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
> >                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
> >                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
> > -                                         VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
> > +                                         VIRTIO_NET_CTRL_MAC_ADDR_SET,
> > +                                         &sg, NULL)) {
> >                         dev_warn(&vdev->dev,
> >                                  "Failed to set mac address by vq command.\n");
> >                         ret = -EINVAL;
> > @@ -2647,7 +2652,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
> >  {
> >         rtnl_lock();
> >         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
> > -                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
> > +                                 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
>
> Nit: It might be better to introduce a virtnet_send_command_reply()
> and let virtnet_send_command() call it as in=NULL to simplify the
> changes.

OK.

Thanks.


>
> Others look good.
>
> Thanks
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-10  6:09   ` Jason Wang
@ 2024-04-10 10:52     ` Xuan Zhuo
  2024-04-11  6:09       ` Jason Wang
  0 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-04-10 10:52 UTC (permalink / raw)
  To: Jason Wang
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> >
> > make virtio-net support getting the stats from the device by ethtool -S
> > <eth0>.
> >
> > Due to the numerous descriptors stats, an organization method is
> > required. For this purpose, I have introduced the "virtnet_stats_map".
> > Utilizing this array simplifies coding tasks such as generating field
> > names, calculating buffer sizes for requests and responses, and parsing
> > replies from the device. By iterating over the "virtnet_stats_map,"
> > these operations become more streamlined and efficient.
> >
> > NIC statistics:
> >      rx0_packets: 582951
> >      rx0_bytes: 155307077
> >      rx0_drops: 0
> >      rx0_xdp_packets: 0
> >      rx0_xdp_tx: 0
> >      rx0_xdp_redirects: 0
> >      rx0_xdp_drops: 0
> >      rx0_kicks: 17007
> >      rx0_hw_packets: 2179409
> >      rx0_hw_bytes: 510015040
> >      rx0_hw_notifications: 0
> >      rx0_hw_interrupts: 0
> >      rx0_hw_drops: 12964
> >      rx0_hw_drop_overruns: 0
> >      rx0_hw_csum_valid: 2179409
> >      rx0_hw_csum_none: 0
> >      rx0_hw_csum_bad: 0
> >      rx0_hw_needs_csum: 2179409
> >      rx0_hw_ratelimit_packets: 0
> >      rx0_hw_ratelimit_bytes: 0
> >      tx0_packets: 15361
> >      tx0_bytes: 1918970
> >      tx0_xdp_tx: 0
> >      tx0_xdp_tx_drops: 0
> >      tx0_kicks: 15361
> >      tx0_timeouts: 0
> >      tx0_hw_packets: 32272
> >      tx0_hw_bytes: 4311698
> >      tx0_hw_notifications: 0
> >      tx0_hw_interrupts: 0
> >      tx0_hw_drops: 0
> >      tx0_hw_drop_malformed: 0
> >      tx0_hw_csum_none: 0
> >      tx0_hw_needs_csum: 32272
> >      tx0_hw_ratelimit_packets: 0
> >      tx0_hw_ratelimit_bytes: 0
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > ---
> >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 397 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> >
> > +#define VIRTNET_STATS_DESC_CQ(name) \
> > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > +
> > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > +
> > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > +       VIRTNET_STATS_DESC_CQ(command_num),
> > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > +
> > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > +
> > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > +
> > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > +
> > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > +
> > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > +};
> > +
> > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > +};
> > +
> > +#define VIRTNET_Q_TYPE_RX 0
> > +#define VIRTNET_Q_TYPE_TX 1
> > +#define VIRTNET_Q_TYPE_CQ 2
> > +
> > +struct virtnet_stats_map {
> > +       /* The stat type in bitmap. */
> > +       u64 stat_type;
> > +
> > +       /* The bytes of the response for the stat. */
> > +       u32 len;
> > +
> > +       /* The num of the response fields for the stat. */
> > +       u32 num;
> > +
> > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > +       u32 queue_type;
> > +
> > +       /* The reply type of the stat. */
> > +       u8 reply_type;
> > +
> > +       /* Describe the name and the offset in the response. */
> > +       const struct virtnet_stat_desc *desc;
> > +};
> > +
> > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > +       {                                                       \
> > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > +               sizeof(struct virtio_net_stats_ ## type),       \
> > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > +               &virtnet_stats_##type##_desc[0]                 \
> > +       }
> > +
> > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > +
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > +
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > +};
>
> I think the reason you did this is to ease the future extensions but
> multiple levels of nested macros makes the code hard to review. Any
> way to eliminate this?


NOT only for the future extensions.

When we parse the reply from the device, we need to check the reply stats
one by one, we need the stats info to help parse the stats.

	static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
	                              struct virtnet_stats_ctx *ctx,
	                              const u8 *base, u8 type)
	{
	       u32 queue_type, num_rx, num_tx, num_cq;
	       struct virtnet_stats_map *m;
	       u64 offset, bitmap;
	       const __le64 *v;
	       int i, j;

	       num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
	       num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
	       num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];

	       queue_type = vq_type(vi, qid);
	       bitmap = ctx->bitmap[queue_type];
	       offset = 0;

	       if (queue_type == VIRTNET_Q_TYPE_TX) {
	               offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
	               offset += VIRTNET_SQ_STATS_LEN;
	       } else if (queue_type == VIRTNET_Q_TYPE_RX) {
	               offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
	       }

	       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
	               m = &virtio_net_stats_map[i];

->	               if (m->stat_type & bitmap)
	                       offset += m->num;

->	               if (type != m->reply_type)
	                       continue;

	               for (j = 0; j < m->num; ++j) {
	                       v = (const __le64 *)(base + m->desc[j].offset);
	                       ctx->data[offset + j] = le64_to_cpu(*v);
	               }

	               break;
	       }
	}

Thanks.


>
> > +
> >  struct virtnet_interrupt_coalesce {
> >         u32 max_packets;
> >         u32 max_usecs;
> > @@ -244,6 +367,7 @@ struct control_buf {
> >         struct virtio_net_ctrl_coal_tx coal_tx;
> >         struct virtio_net_ctrl_coal_rx coal_rx;
> >         struct virtio_net_ctrl_coal_vq coal_vq;
> > +       struct virtio_net_stats_capabilities stats_cap;
> >  };
> >
> >  struct virtnet_info {
> > @@ -329,6 +453,8 @@ struct virtnet_info {
> >
> >         /* failover when STANDBY feature enabled */
> >         struct failover *failover;
> > +
> > +       u64 device_stats_cap;
> >  };
> >
> >  struct padded_vnet_hdr {
> > @@ -389,6 +515,17 @@ static int rxq2vq(int rxq)
> >         return rxq * 2;
> >  }
> >
> > +static int vq_type(struct virtnet_info *vi, int qid)
> > +{
> > +       if (qid == vi->max_queue_pairs * 2)
> > +               return VIRTNET_Q_TYPE_CQ;
> > +
> > +       if (qid % 2)
> > +               return VIRTNET_Q_TYPE_TX;
> > +
> > +       return VIRTNET_Q_TYPE_RX;
> > +}
> > +
> >  static inline struct virtio_net_common_hdr *
> >  skb_vnet_common_hdr(struct sk_buff *skb)
> >  {
> > @@ -3263,6 +3400,223 @@ static int virtnet_set_channels(struct net_device *dev,
> >         return err;
> >  }
> >
> > +static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
> > +{
> > +       struct virtnet_stats_map *m;
> > +       int i, j;
> > +       u8 *p = *data;
> > +
> > +       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> > +               return;
> > +
> > +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > +               m = &virtio_net_stats_map[i];
> > +
> > +               if (m->queue_type != type)
> > +                       continue;
> > +
> > +               if (!(vi->device_stats_cap & m->stat_type))
> > +                       continue;
> > +
> > +               for (j = 0; j < m->num; ++j) {
> > +                       switch (type) {
> > +                       case VIRTNET_Q_TYPE_RX:
> > +                               ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
> > +                               break;
> > +
> > +                       case VIRTNET_Q_TYPE_TX:
> > +                               ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
> > +                               break;
> > +
> > +                       case VIRTNET_Q_TYPE_CQ:
> > +                               ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
> > +                               break;
> > +                       }
> > +               }
> > +       }
> > +
> > +       *data = p;
> > +}
> > +
> > +struct virtnet_stats_ctx {
> > +       u32 desc_num[3];
> > +
> > +       u32 bitmap[3];
> > +
> > +       u32 size[3];
> > +
> > +       u64 *data;
> > +};
>
> Let's explain the meaning of each field here.
>
> > +
> > +static void virtnet_stats_ctx_init(struct virtnet_info *vi,
> > +                                  struct virtnet_stats_ctx *ctx,
> > +                                  u64 *data)
> > +{
> > +       struct virtnet_stats_map *m;
> > +       int i;
> > +
> > +       ctx->data = data;
> > +
> > +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > +               m = &virtio_net_stats_map[i];
> > +
> > +               if (!(vi->device_stats_cap & m->stat_type))
> > +                       continue;
> > +
> > +               ctx->bitmap[m->queue_type]   |= m->stat_type;
> > +               ctx->desc_num[m->queue_type] += m->num;
> > +               ctx->size[m->queue_type]     += m->len;
> > +       }
> > +}
> > +
> > +/* virtnet_fill_stats - copy the stats to ethtool -S
> > + * The stats source is the device.
> > + *
> > + * @vi: virtio net info
> > + * @qid: the vq id
> > + * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
> > + * @base: pointer to the device reply.
> > + * @type: the type of the device reply
> > + */
> > +static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> > +                              struct virtnet_stats_ctx *ctx,
> > +                              const u8 *base, u8 type)
> > +{
> > +       u32 queue_type, num_rx, num_tx, num_cq;
> > +       struct virtnet_stats_map *m;
> > +       u64 offset, bitmap;
> > +       const __le64 *v;
> > +       int i, j;
> > +
> > +       num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> > +       num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> > +       num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> > +
> > +       queue_type = vq_type(vi, qid);
> > +       bitmap = ctx->bitmap[queue_type];
> > +       offset = 0;
> > +
> > +       if (queue_type == VIRTNET_Q_TYPE_TX) {
> > +               offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> > +               offset += VIRTNET_SQ_STATS_LEN;
> > +       } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> > +               offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> > +       }
> > +
> > +       for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > +               m = &virtio_net_stats_map[i];
> > +
> > +               if (m->stat_type & bitmap)
> > +                       offset += m->num;
> > +
> > +               if (type != m->reply_type)
> > +                       continue;
> > +
> > +               for (j = 0; j < m->num; ++j) {
> > +                       v = (const __le64 *)(base + m->desc[j].offset);
> > +                       ctx->data[offset + j] = le64_to_cpu(*v);
> > +               }
> > +
> > +               break;
> > +       }
> > +}
> > +
> > +static int __virtnet_get_hw_stats(struct virtnet_info *vi,
> > +                                 struct virtnet_stats_ctx *ctx,
> > +                                 struct virtio_net_ctrl_queue_stats *req,
> > +                                 int req_size, void *reply, int res_size)
> > +{
> > +       struct virtio_net_stats_reply_hdr *hdr;
> > +       struct scatterlist sgs_in, sgs_out;
> > +       void *p;
> > +       u32 qid;
> > +       int ok;
> > +
> > +       sg_init_one(&sgs_out, req, req_size);
> > +       sg_init_one(&sgs_in, reply, res_size);
> > +
> > +       ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
> > +                                 VIRTIO_NET_CTRL_STATS_GET,
> > +                                 &sgs_out, &sgs_in);
> > +       kfree(req);
>
> I'd suggest letting the caller free this for simplicity.
>
> > +
> > +       if (!ok) {
> > +               kfree(reply);
> > +               return ok;
> > +       }
> > +
> > +       for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
>
> hdr->size is under the control of a device which might be malicious?
>
> Btw, I'd expect that type implies size but looks not, any reason for that?
>
> > +               hdr = p;
> > +               qid = le16_to_cpu(hdr->vq_index);
> > +               virtnet_fill_stats(vi, qid, ctx, p, hdr->type);
> > +       }
> > +
> > +       kfree(reply);
>
> As caller has the logic for err handling like:
>
>         reply = kmalloc(res_size, GFP_KERNEL);
>         if (!reply) {
>                 kfree(req);
>                 return -ENOMEM;
>         }
>
> So let's free it from the caller.
>
> > +       return 0;
> > +}
> > +
> > +static void virtnet_make_stat_req(struct virtnet_info *vi,
> > +                                 struct virtnet_stats_ctx *ctx,
> > +                                 struct virtio_net_ctrl_queue_stats *req,
> > +                                 int qid, int *idx)
> > +{
> > +       int qtype = vq_type(vi, qid);
> > +       u64 bitmap = ctx->bitmap[qtype];
> > +
> > +       if (!bitmap)
> > +               return;
> > +
> > +       req->stats[*idx].vq_index = cpu_to_le16(qid);
> > +       req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
> > +       *idx += 1;
> > +}
> > +
> > +static int virtnet_get_hw_stats(struct virtnet_info *vi,
> > +                               struct virtnet_stats_ctx *ctx)
> > +{
> > +       struct virtio_net_ctrl_queue_stats *req;
> > +       int qnum, i, j, res_size, qtype, last_vq;
> > +       void *reply;
> > +
> > +       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> > +               return 0;
> > +
> > +       last_vq = vi->curr_queue_pairs * 2 - 1;
> > +
> > +       qnum = 0;
> > +       res_size = 0;
> > +       for (i = 0; i <= last_vq ; ++i) {
> > +               qtype = vq_type(vi, i);
> > +               if (ctx->bitmap[qtype]) {
> > +                       ++qnum;
> > +                       res_size += ctx->size[qtype];
> > +               }
> > +       }
> > +
> > +       if (ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
> > +               res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
> > +               qnum += 1;
> > +       }
> > +
> > +       req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
> > +       if (!req)
> > +               return -ENOMEM;
> > +
> > +       reply = kmalloc(res_size, GFP_KERNEL);
> > +       if (!reply) {
> > +               kfree(req);
> > +               return -ENOMEM;
> > +       }
> > +
> > +       j = 0;
> > +       for (i = 0; i <= last_vq ; ++i)
> > +               virtnet_make_stat_req(vi, ctx, req, i, &j);
> > +
> > +       virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
>
> Instead of preparing those on the fly, could we prepare the request
> during the probe instead of here? As most of the field (except the
> data) won't be changed.
>
> > +
> > +       return __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
> > +}
> > +
> >  static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> >  {
> >         struct virtnet_info *vi = netdev_priv(dev);
> > @@ -3271,16 +3625,22 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> >
> >         switch (stringset) {
> >         case ETH_SS_STATS:
> > +               virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
> > +
> >                 for (i = 0; i < vi->curr_queue_pairs; i++) {
> >                         for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
> >                                 ethtool_sprintf(&p, "rx%u_%s", i,
> >                                                 virtnet_rq_stats_desc[j].desc);
> > +
> > +                       virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
> >                 }
> >
> >                 for (i = 0; i < vi->curr_queue_pairs; i++) {
> >                         for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
> >                                 ethtool_sprintf(&p, "tx%u_%s", i,
> >                                                 virtnet_sq_stats_desc[j].desc);
> > +
> > +                       virtnet_get_hw_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
> >                 }
> >                 break;
> >         }
> > @@ -3289,11 +3649,35 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> >  static int virtnet_get_sset_count(struct net_device *dev, int sset)
> >  {
> >         struct virtnet_info *vi = netdev_priv(dev);
> > +       struct virtnet_stats_ctx ctx = {0};
> > +       u32 pair_count;
> >
> >         switch (sset) {
> >         case ETH_SS_STATS:
> > -               return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
> > -                                              VIRTNET_SQ_STATS_LEN);
> > +               if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS) &&
> > +                   !vi->device_stats_cap) {
> > +                       struct scatterlist sg;
> > +
> > +                       sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
> > +
> > +                       if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
> > +                                                 VIRTIO_NET_CTRL_STATS_QUERY,
> > +                                                 NULL, &sg)) {
> > +                               dev_warn(&dev->dev, "Fail to get stats capability\n");
>
> Should we fail here?
>
> > +                       } else {
> > +                               __le64 v;
> > +
> > +                               v = vi->ctrl->stats_cap.supported_stats_types[0];
> > +                               vi->device_stats_cap = le64_to_cpu(v);
> > +                       }
> > +               }
> > +
> > +               virtnet_stats_ctx_init(vi, &ctx, NULL);
> > +
> > +               pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
> > +               pair_count += ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
> > +
> > +               return ctx.desc_num[VIRTNET_Q_TYPE_CQ] + vi->curr_queue_pairs * pair_count;
>
> I wonder why we don't do this during the probe?
>
> >         default:
> >                 return -EOPNOTSUPP;
> >         }
> > @@ -3303,11 +3687,18 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> >                                       struct ethtool_stats *stats, u64 *data)
> >  {
> >         struct virtnet_info *vi = netdev_priv(dev);
> > -       unsigned int idx = 0, start, i, j;
> > +       struct virtnet_stats_ctx ctx = {0};
> > +       unsigned int idx, start, i, j;
> >         const u8 *stats_base;
> >         const u64_stats_t *p;
> >         size_t offset;
> >
> > +       virtnet_stats_ctx_init(vi, &ctx, data);
> > +       if (virtnet_get_hw_stats(vi, &ctx))
> > +               dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
> > +
> > +       idx = ctx.desc_num[VIRTNET_Q_TYPE_CQ];
> > +
> >         for (i = 0; i < vi->curr_queue_pairs; i++) {
> >                 struct receive_queue *rq = &vi->rq[i];
> >
> > @@ -3321,6 +3712,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> >                         }
> >                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
> >                 idx += VIRTNET_RQ_STATS_LEN;
> > +               idx += ctx.desc_num[VIRTNET_Q_TYPE_RX];
> >         }
> >
> >         for (i = 0; i < vi->curr_queue_pairs; i++) {
> > @@ -3336,6 +3728,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> >                         }
> >                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
> >                 idx += VIRTNET_SQ_STATS_LEN;
> > +               idx += ctx.desc_num[VIRTNET_Q_TYPE_TX];
> >         }
> >  }
> >
> > @@ -4963,7 +5356,7 @@ static struct virtio_device_id id_table[] = {
> >         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
> >         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
> >         VIRTIO_NET_F_VQ_NOTF_COAL, \
> > -       VIRTIO_NET_F_GUEST_HDRLEN
> > +       VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
> >
> >  static unsigned int features[] = {
> >         VIRTNET_FEATURES,
> > --
> > 2.32.0.3.g01195cf9f
> >
>
> Thanks
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-10 10:52     ` Xuan Zhuo
@ 2024-04-11  6:09       ` Jason Wang
  2024-04-15  2:42         ` Xuan Zhuo
  0 siblings, 1 reply; 34+ messages in thread
From: Jason Wang @ 2024-04-11  6:09 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > >
> > > make virtio-net support getting the stats from the device by ethtool -S
> > > <eth0>.
> > >
> > > Due to the numerous descriptors stats, an organization method is
> > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > Utilizing this array simplifies coding tasks such as generating field
> > > names, calculating buffer sizes for requests and responses, and parsing
> > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > these operations become more streamlined and efficient.
> > >
> > > NIC statistics:
> > >      rx0_packets: 582951
> > >      rx0_bytes: 155307077
> > >      rx0_drops: 0
> > >      rx0_xdp_packets: 0
> > >      rx0_xdp_tx: 0
> > >      rx0_xdp_redirects: 0
> > >      rx0_xdp_drops: 0
> > >      rx0_kicks: 17007
> > >      rx0_hw_packets: 2179409
> > >      rx0_hw_bytes: 510015040
> > >      rx0_hw_notifications: 0
> > >      rx0_hw_interrupts: 0
> > >      rx0_hw_drops: 12964
> > >      rx0_hw_drop_overruns: 0
> > >      rx0_hw_csum_valid: 2179409
> > >      rx0_hw_csum_none: 0
> > >      rx0_hw_csum_bad: 0
> > >      rx0_hw_needs_csum: 2179409
> > >      rx0_hw_ratelimit_packets: 0
> > >      rx0_hw_ratelimit_bytes: 0
> > >      tx0_packets: 15361
> > >      tx0_bytes: 1918970
> > >      tx0_xdp_tx: 0
> > >      tx0_xdp_tx_drops: 0
> > >      tx0_kicks: 15361
> > >      tx0_timeouts: 0
> > >      tx0_hw_packets: 32272
> > >      tx0_hw_bytes: 4311698
> > >      tx0_hw_notifications: 0
> > >      tx0_hw_interrupts: 0
> > >      tx0_hw_drops: 0
> > >      tx0_hw_drop_malformed: 0
> > >      tx0_hw_csum_none: 0
> > >      tx0_hw_needs_csum: 32272
> > >      tx0_hw_ratelimit_packets: 0
> > >      tx0_hw_ratelimit_bytes: 0
> > >
> > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > ---
> > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > --- a/drivers/net/virtio_net.c
> > > +++ b/drivers/net/virtio_net.c
> > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > >
> > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > +
> > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > +
> > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > +
> > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > +
> > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > +
> > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > +
> > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > +
> > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > +};
> > > +
> > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > +};
> > > +
> > > +#define VIRTNET_Q_TYPE_RX 0
> > > +#define VIRTNET_Q_TYPE_TX 1
> > > +#define VIRTNET_Q_TYPE_CQ 2
> > > +
> > > +struct virtnet_stats_map {
> > > +       /* The stat type in bitmap. */
> > > +       u64 stat_type;
> > > +
> > > +       /* The bytes of the response for the stat. */
> > > +       u32 len;
> > > +
> > > +       /* The num of the response fields for the stat. */
> > > +       u32 num;
> > > +
> > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > +       u32 queue_type;
> > > +
> > > +       /* The reply type of the stat. */
> > > +       u8 reply_type;
> > > +
> > > +       /* Describe the name and the offset in the response. */
> > > +       const struct virtnet_stat_desc *desc;
> > > +};
> > > +
> > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > +       {                                                       \
> > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > +               &virtnet_stats_##type##_desc[0]                 \
> > > +       }
> > > +
> > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > +
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > +
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > +};
> >
> > I think the reason you did this is to ease the future extensions but
> > multiple levels of nested macros makes the code hard to review. Any
> > way to eliminate this?
>
>
> NOT only for the future extensions.
>
> When we parse the reply from the device, we need to check the reply stats
> one by one, we need the stats info to help parse the stats.

Yes, but I meant for example any reason why it can't be done by
extending virtnet_stat_desc ?

>
>         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
>                                       struct virtnet_stats_ctx *ctx,
>                                       const u8 *base, u8 type)
>         {
>                u32 queue_type, num_rx, num_tx, num_cq;
>                struct virtnet_stats_map *m;
>                u64 offset, bitmap;
>                const __le64 *v;
>                int i, j;
>
>                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
>                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
>                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
>
>                queue_type = vq_type(vi, qid);
>                bitmap = ctx->bitmap[queue_type];
>                offset = 0;
>
>                if (queue_type == VIRTNET_Q_TYPE_TX) {
>                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
>                        offset += VIRTNET_SQ_STATS_LEN;
>                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
>                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
>                }
>
>                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>                        m = &virtio_net_stats_map[i];
>
> ->                     if (m->stat_type & bitmap)
>                                offset += m->num;
>
> ->                     if (type != m->reply_type)
>                                continue;
>
>                        for (j = 0; j < m->num; ++j) {
>                                v = (const __le64 *)(base + m->desc[j].offset);
>                                ctx->data[offset + j] = le64_to_cpu(*v);
>                        }
>
>                        break;
>                }
>         }
>
> Thanks.

Btw, just a reminder, there are other comments for this patch.

Thanks


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-11  6:09       ` Jason Wang
@ 2024-04-15  2:42         ` Xuan Zhuo
  2024-04-15  6:45           ` Jason Wang
  0 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-04-15  2:42 UTC (permalink / raw)
  To: Jason Wang
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Thu, 11 Apr 2024 14:09:24 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > >
> > > > make virtio-net support getting the stats from the device by ethtool -S
> > > > <eth0>.
> > > >
> > > > Due to the numerous descriptors stats, an organization method is
> > > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > > Utilizing this array simplifies coding tasks such as generating field
> > > > names, calculating buffer sizes for requests and responses, and parsing
> > > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > > these operations become more streamlined and efficient.
> > > >
> > > > NIC statistics:
> > > >      rx0_packets: 582951
> > > >      rx0_bytes: 155307077
> > > >      rx0_drops: 0
> > > >      rx0_xdp_packets: 0
> > > >      rx0_xdp_tx: 0
> > > >      rx0_xdp_redirects: 0
> > > >      rx0_xdp_drops: 0
> > > >      rx0_kicks: 17007
> > > >      rx0_hw_packets: 2179409
> > > >      rx0_hw_bytes: 510015040
> > > >      rx0_hw_notifications: 0
> > > >      rx0_hw_interrupts: 0
> > > >      rx0_hw_drops: 12964
> > > >      rx0_hw_drop_overruns: 0
> > > >      rx0_hw_csum_valid: 2179409
> > > >      rx0_hw_csum_none: 0
> > > >      rx0_hw_csum_bad: 0
> > > >      rx0_hw_needs_csum: 2179409
> > > >      rx0_hw_ratelimit_packets: 0
> > > >      rx0_hw_ratelimit_bytes: 0
> > > >      tx0_packets: 15361
> > > >      tx0_bytes: 1918970
> > > >      tx0_xdp_tx: 0
> > > >      tx0_xdp_tx_drops: 0
> > > >      tx0_kicks: 15361
> > > >      tx0_timeouts: 0
> > > >      tx0_hw_packets: 32272
> > > >      tx0_hw_bytes: 4311698
> > > >      tx0_hw_notifications: 0
> > > >      tx0_hw_interrupts: 0
> > > >      tx0_hw_drops: 0
> > > >      tx0_hw_drop_malformed: 0
> > > >      tx0_hw_csum_none: 0
> > > >      tx0_hw_needs_csum: 32272
> > > >      tx0_hw_ratelimit_packets: 0
> > > >      tx0_hw_ratelimit_bytes: 0
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > ---
> > > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > > >
> > > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > > +
> > > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > > +
> > > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > > +
> > > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > > +
> > > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > > +
> > > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > > +
> > > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > > +
> > > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > > +};
> > > > +
> > > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > > +};
> > > > +
> > > > +#define VIRTNET_Q_TYPE_RX 0
> > > > +#define VIRTNET_Q_TYPE_TX 1
> > > > +#define VIRTNET_Q_TYPE_CQ 2
> > > > +
> > > > +struct virtnet_stats_map {
> > > > +       /* The stat type in bitmap. */
> > > > +       u64 stat_type;
> > > > +
> > > > +       /* The bytes of the response for the stat. */
> > > > +       u32 len;
> > > > +
> > > > +       /* The num of the response fields for the stat. */
> > > > +       u32 num;
> > > > +
> > > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > > +       u32 queue_type;
> > > > +
> > > > +       /* The reply type of the stat. */
> > > > +       u8 reply_type;
> > > > +
> > > > +       /* Describe the name and the offset in the response. */
> > > > +       const struct virtnet_stat_desc *desc;
> > > > +};
> > > > +
> > > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > > +       {                                                       \
> > > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > > +               &virtnet_stats_##type##_desc[0]                 \
> > > > +       }
> > > > +
> > > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > > +
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > > +
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > > +};
> > >
> > > I think the reason you did this is to ease the future extensions but
> > > multiple levels of nested macros makes the code hard to review. Any
> > > way to eliminate this?
> >
> >
> > NOT only for the future extensions.
> >
> > When we parse the reply from the device, we need to check the reply stats
> > one by one, we need the stats info to help parse the stats.
>
> Yes, but I meant for example any reason why it can't be done by
> extending virtnet_stat_desc ?



You know, virtio_net_stats_map is way to organize the descs.

This is used to avoid the big if-else when parsing the replys from the device.

If no this map, we will have a big if-else like:

 if (reply.type == rx_basic) {
 	 /* do the same something */
 }
 if (reply.type == tx_basic) {
 	 /* do the same something */
 }
 if (reply.type == rx_csum) {
 	 /* do the same something */
 }
 if (reply.type == tx_csum) {
 	 /* do the same something */
 }
 if (reply.type == rx_gso) {
 	 /* do the same something */
 }
 if (reply.type == tx_gso) {
 	 /* do the same something */
 }
 if (reply.type == rx_speed) {
 	 /* do the same something */
 }
 if (reply.type == tx_speed) {
 	 /* do the same something */
 }

I want to avoid this, so introducing this map.

YES. I noticed other comments, but I think we should
fix this problem firstly.

Thanks.


>
> >
> >         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> >                                       struct virtnet_stats_ctx *ctx,
> >                                       const u8 *base, u8 type)
> >         {
> >                u32 queue_type, num_rx, num_tx, num_cq;
> >                struct virtnet_stats_map *m;
> >                u64 offset, bitmap;
> >                const __le64 *v;
> >                int i, j;
> >
> >                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> >                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> >                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> >
> >                queue_type = vq_type(vi, qid);
> >                bitmap = ctx->bitmap[queue_type];
> >                offset = 0;
> >
> >                if (queue_type == VIRTNET_Q_TYPE_TX) {
> >                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> >                        offset += VIRTNET_SQ_STATS_LEN;
> >                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> >                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> >                }
> >
> >                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> >                        m = &virtio_net_stats_map[i];
> >
> > ->                     if (m->stat_type & bitmap)
> >                                offset += m->num;
> >
> > ->                     if (type != m->reply_type)
> >                                continue;
> >
> >                        for (j = 0; j < m->num; ++j) {
> >                                v = (const __le64 *)(base + m->desc[j].offset);
> >                                ctx->data[offset + j] = le64_to_cpu(*v);
> >                        }
> >
> >                        break;
> >                }
> >         }
> >
> > Thanks.
>
> Btw, just a reminder, there are other comments for this patch.
>
> Thanks
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-15  2:42         ` Xuan Zhuo
@ 2024-04-15  6:45           ` Jason Wang
  2024-04-15  8:11             ` Xuan Zhuo
  0 siblings, 1 reply; 34+ messages in thread
From: Jason Wang @ 2024-04-15  6:45 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Apr 15, 2024 at 10:51 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Thu, 11 Apr 2024 14:09:24 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > >
> > > > > make virtio-net support getting the stats from the device by ethtool -S
> > > > > <eth0>.
> > > > >
> > > > > Due to the numerous descriptors stats, an organization method is
> > > > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > > > Utilizing this array simplifies coding tasks such as generating field
> > > > > names, calculating buffer sizes for requests and responses, and parsing
> > > > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > > > these operations become more streamlined and efficient.
> > > > >
> > > > > NIC statistics:
> > > > >      rx0_packets: 582951
> > > > >      rx0_bytes: 155307077
> > > > >      rx0_drops: 0
> > > > >      rx0_xdp_packets: 0
> > > > >      rx0_xdp_tx: 0
> > > > >      rx0_xdp_redirects: 0
> > > > >      rx0_xdp_drops: 0
> > > > >      rx0_kicks: 17007
> > > > >      rx0_hw_packets: 2179409
> > > > >      rx0_hw_bytes: 510015040
> > > > >      rx0_hw_notifications: 0
> > > > >      rx0_hw_interrupts: 0
> > > > >      rx0_hw_drops: 12964
> > > > >      rx0_hw_drop_overruns: 0
> > > > >      rx0_hw_csum_valid: 2179409
> > > > >      rx0_hw_csum_none: 0
> > > > >      rx0_hw_csum_bad: 0
> > > > >      rx0_hw_needs_csum: 2179409
> > > > >      rx0_hw_ratelimit_packets: 0
> > > > >      rx0_hw_ratelimit_bytes: 0
> > > > >      tx0_packets: 15361
> > > > >      tx0_bytes: 1918970
> > > > >      tx0_xdp_tx: 0
> > > > >      tx0_xdp_tx_drops: 0
> > > > >      tx0_kicks: 15361
> > > > >      tx0_timeouts: 0
> > > > >      tx0_hw_packets: 32272
> > > > >      tx0_hw_bytes: 4311698
> > > > >      tx0_hw_notifications: 0
> > > > >      tx0_hw_interrupts: 0
> > > > >      tx0_hw_drops: 0
> > > > >      tx0_hw_drop_malformed: 0
> > > > >      tx0_hw_csum_none: 0
> > > > >      tx0_hw_needs_csum: 32272
> > > > >      tx0_hw_ratelimit_packets: 0
> > > > >      tx0_hw_ratelimit_bytes: 0
> > > > >
> > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > ---
> > > > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > > > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > > > >
> > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > > > --- a/drivers/net/virtio_net.c
> > > > > +++ b/drivers/net/virtio_net.c
> > > > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > > > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > > > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > > > >
> > > > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > > > +
> > > > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > > > +
> > > > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > > > +
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > > > +};
> > > > > +
> > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > > > +};
> > > > > +
> > > > > +#define VIRTNET_Q_TYPE_RX 0
> > > > > +#define VIRTNET_Q_TYPE_TX 1
> > > > > +#define VIRTNET_Q_TYPE_CQ 2
> > > > > +
> > > > > +struct virtnet_stats_map {
> > > > > +       /* The stat type in bitmap. */
> > > > > +       u64 stat_type;
> > > > > +
> > > > > +       /* The bytes of the response for the stat. */
> > > > > +       u32 len;
> > > > > +
> > > > > +       /* The num of the response fields for the stat. */
> > > > > +       u32 num;
> > > > > +
> > > > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > > > +       u32 queue_type;
> > > > > +
> > > > > +       /* The reply type of the stat. */
> > > > > +       u8 reply_type;
> > > > > +
> > > > > +       /* Describe the name and the offset in the response. */
> > > > > +       const struct virtnet_stat_desc *desc;
> > > > > +};
> > > > > +
> > > > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > > > +       {                                                       \
> > > > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > > > +               &virtnet_stats_##type##_desc[0]                 \
> > > > > +       }
> > > > > +
> > > > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > > > +
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > > > +
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > > > +};
> > > >
> > > > I think the reason you did this is to ease the future extensions but
> > > > multiple levels of nested macros makes the code hard to review. Any
> > > > way to eliminate this?
> > >
> > >
> > > NOT only for the future extensions.
> > >
> > > When we parse the reply from the device, we need to check the reply stats
> > > one by one, we need the stats info to help parse the stats.
> >
> > Yes, but I meant for example any reason why it can't be done by
> > extending virtnet_stat_desc ?
>
>
>
> You know, virtio_net_stats_map is way to organize the descs.
>
> This is used to avoid the big if-else when parsing the replys from the device.
>
> If no this map, we will have a big if-else like:
>
>  if (reply.type == rx_basic) {
>          /* do the same something */
>  }
>  if (reply.type == tx_basic) {
>          /* do the same something */
>  }
>  if (reply.type == rx_csum) {
>          /* do the same something */
>  }
>  if (reply.type == tx_csum) {
>          /* do the same something */
>  }
>  if (reply.type == rx_gso) {
>          /* do the same something */
>  }
>  if (reply.type == tx_gso) {
>          /* do the same something */
>  }
>  if (reply.type == rx_speed) {
>          /* do the same something */
>  }
>  if (reply.type == tx_speed) {
>          /* do the same something */
>  }
>
> I want to avoid this, so introducing this map.

Could we have a function pointers array indexed by the type?

Thanks

>
> YES. I noticed other comments, but I think we should
> fix this problem firstly.
>
> Thanks.
>
>
> >
> > >
> > >         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> > >                                       struct virtnet_stats_ctx *ctx,
> > >                                       const u8 *base, u8 type)
> > >         {
> > >                u32 queue_type, num_rx, num_tx, num_cq;
> > >                struct virtnet_stats_map *m;
> > >                u64 offset, bitmap;
> > >                const __le64 *v;
> > >                int i, j;
> > >
> > >                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> > >                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> > >                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> > >
> > >                queue_type = vq_type(vi, qid);
> > >                bitmap = ctx->bitmap[queue_type];
> > >                offset = 0;
> > >
> > >                if (queue_type == VIRTNET_Q_TYPE_TX) {
> > >                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> > >                        offset += VIRTNET_SQ_STATS_LEN;
> > >                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> > >                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> > >                }
> > >
> > >                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > >                        m = &virtio_net_stats_map[i];
> > >
> > > ->                     if (m->stat_type & bitmap)
> > >                                offset += m->num;
> > >
> > > ->                     if (type != m->reply_type)
> > >                                continue;
> > >
> > >                        for (j = 0; j < m->num; ++j) {
> > >                                v = (const __le64 *)(base + m->desc[j].offset);
> > >                                ctx->data[offset + j] = le64_to_cpu(*v);
> > >                        }
> > >
> > >                        break;
> > >                }
> > >         }
> > >
> > > Thanks.
> >
> > Btw, just a reminder, there are other comments for this patch.
> >
> > Thanks
> >
>


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-15  6:45           ` Jason Wang
@ 2024-04-15  8:11             ` Xuan Zhuo
  2024-04-15  8:34               ` Jason Wang
  0 siblings, 1 reply; 34+ messages in thread
From: Xuan Zhuo @ 2024-04-15  8:11 UTC (permalink / raw)
  To: Jason Wang
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, 15 Apr 2024 14:45:36 +0800, Jason Wang <jasowang@redhat.com> wrote:
> On Mon, Apr 15, 2024 at 10:51 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> >
> > On Thu, 11 Apr 2024 14:09:24 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > >
> > > > On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > >
> > > > > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > > >
> > > > > > make virtio-net support getting the stats from the device by ethtool -S
> > > > > > <eth0>.
> > > > > >
> > > > > > Due to the numerous descriptors stats, an organization method is
> > > > > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > > > > Utilizing this array simplifies coding tasks such as generating field
> > > > > > names, calculating buffer sizes for requests and responses, and parsing
> > > > > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > > > > these operations become more streamlined and efficient.
> > > > > >
> > > > > > NIC statistics:
> > > > > >      rx0_packets: 582951
> > > > > >      rx0_bytes: 155307077
> > > > > >      rx0_drops: 0
> > > > > >      rx0_xdp_packets: 0
> > > > > >      rx0_xdp_tx: 0
> > > > > >      rx0_xdp_redirects: 0
> > > > > >      rx0_xdp_drops: 0
> > > > > >      rx0_kicks: 17007
> > > > > >      rx0_hw_packets: 2179409
> > > > > >      rx0_hw_bytes: 510015040
> > > > > >      rx0_hw_notifications: 0
> > > > > >      rx0_hw_interrupts: 0
> > > > > >      rx0_hw_drops: 12964
> > > > > >      rx0_hw_drop_overruns: 0
> > > > > >      rx0_hw_csum_valid: 2179409
> > > > > >      rx0_hw_csum_none: 0
> > > > > >      rx0_hw_csum_bad: 0
> > > > > >      rx0_hw_needs_csum: 2179409
> > > > > >      rx0_hw_ratelimit_packets: 0
> > > > > >      rx0_hw_ratelimit_bytes: 0
> > > > > >      tx0_packets: 15361
> > > > > >      tx0_bytes: 1918970
> > > > > >      tx0_xdp_tx: 0
> > > > > >      tx0_xdp_tx_drops: 0
> > > > > >      tx0_kicks: 15361
> > > > > >      tx0_timeouts: 0
> > > > > >      tx0_hw_packets: 32272
> > > > > >      tx0_hw_bytes: 4311698
> > > > > >      tx0_hw_notifications: 0
> > > > > >      tx0_hw_interrupts: 0
> > > > > >      tx0_hw_drops: 0
> > > > > >      tx0_hw_drop_malformed: 0
> > > > > >      tx0_hw_csum_none: 0
> > > > > >      tx0_hw_needs_csum: 32272
> > > > > >      tx0_hw_ratelimit_packets: 0
> > > > > >      tx0_hw_ratelimit_bytes: 0
> > > > > >
> > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > ---
> > > > > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > > > > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > > > > >
> > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > > > > --- a/drivers/net/virtio_net.c
> > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > > > > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > > > > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > > > > >
> > > > > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > > > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > > > > +
> > > > > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > > > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > > > > +
> > > > > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > > > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > > > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > > > > +
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > > > > +
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > > > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > > > > +
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > > > > +
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > > > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > > > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > > > > +
> > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > > > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > > > > +};
> > > > > > +
> > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > > > > +};
> > > > > > +
> > > > > > +#define VIRTNET_Q_TYPE_RX 0
> > > > > > +#define VIRTNET_Q_TYPE_TX 1
> > > > > > +#define VIRTNET_Q_TYPE_CQ 2
> > > > > > +
> > > > > > +struct virtnet_stats_map {
> > > > > > +       /* The stat type in bitmap. */
> > > > > > +       u64 stat_type;
> > > > > > +
> > > > > > +       /* The bytes of the response for the stat. */
> > > > > > +       u32 len;
> > > > > > +
> > > > > > +       /* The num of the response fields for the stat. */
> > > > > > +       u32 num;
> > > > > > +
> > > > > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > > > > +       u32 queue_type;
> > > > > > +
> > > > > > +       /* The reply type of the stat. */
> > > > > > +       u8 reply_type;
> > > > > > +
> > > > > > +       /* Describe the name and the offset in the response. */
> > > > > > +       const struct virtnet_stat_desc *desc;
> > > > > > +};
> > > > > > +
> > > > > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > > > > +       {                                                       \
> > > > > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > > > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > > > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > > > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > > > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > > > > +               &virtnet_stats_##type##_desc[0]                 \
> > > > > > +       }
> > > > > > +
> > > > > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > > > > +
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > > > > +
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > > > > +};
> > > > >
> > > > > I think the reason you did this is to ease the future extensions but
> > > > > multiple levels of nested macros makes the code hard to review. Any
> > > > > way to eliminate this?
> > > >
> > > >
> > > > NOT only for the future extensions.
> > > >
> > > > When we parse the reply from the device, we need to check the reply stats
> > > > one by one, we need the stats info to help parse the stats.
> > >
> > > Yes, but I meant for example any reason why it can't be done by
> > > extending virtnet_stat_desc ?
> >
> >
> >
> > You know, virtio_net_stats_map is way to organize the descs.
> >
> > This is used to avoid the big if-else when parsing the replys from the device.
> >
> > If no this map, we will have a big if-else like:
> >
> >  if (reply.type == rx_basic) {
> >          /* do the same something */
> >  }
> >  if (reply.type == tx_basic) {
> >          /* do the same something */
> >  }
> >  if (reply.type == rx_csum) {
> >          /* do the same something */
> >  }
> >  if (reply.type == tx_csum) {
> >          /* do the same something */
> >  }
> >  if (reply.type == rx_gso) {
> >          /* do the same something */
> >  }
> >  if (reply.type == tx_gso) {
> >          /* do the same something */
> >  }
> >  if (reply.type == rx_speed) {
> >          /* do the same something */
> >  }
> >  if (reply.type == tx_speed) {
> >          /* do the same something */
> >  }
> >
> > I want to avoid this, so introducing this map.
>
> Could we have a function pointers array indexed by the type?

Then these functions will be similar and mass.

Maybe we can start with the if-else or the function.
That will be easy to review. We can optimize on that.

Thanks.



>
> Thanks
>
> >
> > YES. I noticed other comments, but I think we should
> > fix this problem firstly.
> >
> > Thanks.
> >
> >
> > >
> > > >
> > > >         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> > > >                                       struct virtnet_stats_ctx *ctx,
> > > >                                       const u8 *base, u8 type)
> > > >         {
> > > >                u32 queue_type, num_rx, num_tx, num_cq;
> > > >                struct virtnet_stats_map *m;
> > > >                u64 offset, bitmap;
> > > >                const __le64 *v;
> > > >                int i, j;
> > > >
> > > >                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> > > >                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> > > >                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> > > >
> > > >                queue_type = vq_type(vi, qid);
> > > >                bitmap = ctx->bitmap[queue_type];
> > > >                offset = 0;
> > > >
> > > >                if (queue_type == VIRTNET_Q_TYPE_TX) {
> > > >                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> > > >                        offset += VIRTNET_SQ_STATS_LEN;
> > > >                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> > > >                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> > > >                }
> > > >
> > > >                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > > >                        m = &virtio_net_stats_map[i];
> > > >
> > > > ->                     if (m->stat_type & bitmap)
> > > >                                offset += m->num;
> > > >
> > > > ->                     if (type != m->reply_type)
> > > >                                continue;
> > > >
> > > >                        for (j = 0; j < m->num; ++j) {
> > > >                                v = (const __le64 *)(base + m->desc[j].offset);
> > > >                                ctx->data[offset + j] = le64_to_cpu(*v);
> > > >                        }
> > > >
> > > >                        break;
> > > >                }
> > > >         }
> > > >
> > > > Thanks.
> > >
> > > Btw, just a reminder, there are other comments for this patch.
> > >
> > > Thanks
> > >
> >
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 4/9] virtio_net: support device stats
  2024-04-15  8:11             ` Xuan Zhuo
@ 2024-04-15  8:34               ` Jason Wang
  0 siblings, 0 replies; 34+ messages in thread
From: Jason Wang @ 2024-04-15  8:34 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Michael S. Tsirkin, Alexei Starovoitov,
	Daniel Borkmann, Jesper Dangaard Brouer, John Fastabend,
	Stanislav Fomichev, Amritha Nambiar, Larysa Zaremba,
	Sridhar Samudrala, Maciej Fijalkowski, virtualization, bpf

On Mon, Apr 15, 2024 at 4:15 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> On Mon, 15 Apr 2024 14:45:36 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > On Mon, Apr 15, 2024 at 10:51 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > >
> > > On Thu, 11 Apr 2024 14:09:24 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > On Wed, Apr 10, 2024 at 6:55 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > >
> > > > > On Wed, 10 Apr 2024 14:09:23 +0800, Jason Wang <jasowang@redhat.com> wrote:
> > > > > > On Mon, Mar 18, 2024 at 7:06 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
> > > > > > >
> > > > > > > As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> > > > > > >
> > > > > > > make virtio-net support getting the stats from the device by ethtool -S
> > > > > > > <eth0>.
> > > > > > >
> > > > > > > Due to the numerous descriptors stats, an organization method is
> > > > > > > required. For this purpose, I have introduced the "virtnet_stats_map".
> > > > > > > Utilizing this array simplifies coding tasks such as generating field
> > > > > > > names, calculating buffer sizes for requests and responses, and parsing
> > > > > > > replies from the device. By iterating over the "virtnet_stats_map,"
> > > > > > > these operations become more streamlined and efficient.
> > > > > > >
> > > > > > > NIC statistics:
> > > > > > >      rx0_packets: 582951
> > > > > > >      rx0_bytes: 155307077
> > > > > > >      rx0_drops: 0
> > > > > > >      rx0_xdp_packets: 0
> > > > > > >      rx0_xdp_tx: 0
> > > > > > >      rx0_xdp_redirects: 0
> > > > > > >      rx0_xdp_drops: 0
> > > > > > >      rx0_kicks: 17007
> > > > > > >      rx0_hw_packets: 2179409
> > > > > > >      rx0_hw_bytes: 510015040
> > > > > > >      rx0_hw_notifications: 0
> > > > > > >      rx0_hw_interrupts: 0
> > > > > > >      rx0_hw_drops: 12964
> > > > > > >      rx0_hw_drop_overruns: 0
> > > > > > >      rx0_hw_csum_valid: 2179409
> > > > > > >      rx0_hw_csum_none: 0
> > > > > > >      rx0_hw_csum_bad: 0
> > > > > > >      rx0_hw_needs_csum: 2179409
> > > > > > >      rx0_hw_ratelimit_packets: 0
> > > > > > >      rx0_hw_ratelimit_bytes: 0
> > > > > > >      tx0_packets: 15361
> > > > > > >      tx0_bytes: 1918970
> > > > > > >      tx0_xdp_tx: 0
> > > > > > >      tx0_xdp_tx_drops: 0
> > > > > > >      tx0_kicks: 15361
> > > > > > >      tx0_timeouts: 0
> > > > > > >      tx0_hw_packets: 32272
> > > > > > >      tx0_hw_bytes: 4311698
> > > > > > >      tx0_hw_notifications: 0
> > > > > > >      tx0_hw_interrupts: 0
> > > > > > >      tx0_hw_drops: 0
> > > > > > >      tx0_hw_drop_malformed: 0
> > > > > > >      tx0_hw_csum_none: 0
> > > > > > >      tx0_hw_needs_csum: 32272
> > > > > > >      tx0_hw_ratelimit_packets: 0
> > > > > > >      tx0_hw_ratelimit_bytes: 0
> > > > > > >
> > > > > > > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > ---
> > > > > > >  drivers/net/virtio_net.c | 401 ++++++++++++++++++++++++++++++++++++++-
> > > > > > >  1 file changed, 397 insertions(+), 4 deletions(-)
> > > > > > >
> > > > > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > > > > index 8cb5bdd7ad91..70c1d4e850e0 100644
> > > > > > > --- a/drivers/net/virtio_net.c
> > > > > > > +++ b/drivers/net/virtio_net.c
> > > > > > > @@ -128,6 +128,129 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> > > > > > >  #define VIRTNET_SQ_STATS_LEN   ARRAY_SIZE(virtnet_sq_stats_desc)
> > > > > > >  #define VIRTNET_RQ_STATS_LEN   ARRAY_SIZE(virtnet_rq_stats_desc)
> > > > > > >
> > > > > > > +#define VIRTNET_STATS_DESC_CQ(name) \
> > > > > > > +       {#name, offsetof(struct virtio_net_stats_cvq, name)}
> > > > > > > +
> > > > > > > +#define VIRTNET_STATS_DESC_RX(class, name) \
> > > > > > > +       {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name)}
> > > > > > > +
> > > > > > > +#define VIRTNET_STATS_DESC_TX(class, name) \
> > > > > > > +       {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name)}
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_CQ(command_num),
> > > > > > > +       VIRTNET_STATS_DESC_CQ(ok_num),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, packets),
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, bytes),
> > > > > > > +
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, notifications),
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, interrupts),
> > > > > > > +
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, drops),
> > > > > > > +       VIRTNET_STATS_DESC_RX(basic, drop_overruns),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, packets),
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, bytes),
> > > > > > > +
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, notifications),
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, interrupts),
> > > > > > > +
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, drops),
> > > > > > > +       VIRTNET_STATS_DESC_TX(basic, drop_malformed),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_valid),
> > > > > > > +       VIRTNET_STATS_DESC_RX(csum, needs_csum),
> > > > > > > +
> > > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_none),
> > > > > > > +       VIRTNET_STATS_DESC_RX(csum, csum_bad),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_TX(csum, needs_csum),
> > > > > > > +       VIRTNET_STATS_DESC_TX(csum, csum_none),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets),
> > > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes),
> > > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_packets_coalesced),
> > > > > > > +       VIRTNET_STATS_DESC_RX(gso, gso_bytes_coalesced),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets),
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes),
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments),
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_segments_bytes),
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
> > > > > > > +       VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_packets),
> > > > > > > +       VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
> > > > > > > +};
> > > > > > > +
> > > > > > > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > > > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_packets),
> > > > > > > +       VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
> > > > > > > +};
> > > > > > > +
> > > > > > > +#define VIRTNET_Q_TYPE_RX 0
> > > > > > > +#define VIRTNET_Q_TYPE_TX 1
> > > > > > > +#define VIRTNET_Q_TYPE_CQ 2
> > > > > > > +
> > > > > > > +struct virtnet_stats_map {
> > > > > > > +       /* The stat type in bitmap. */
> > > > > > > +       u64 stat_type;
> > > > > > > +
> > > > > > > +       /* The bytes of the response for the stat. */
> > > > > > > +       u32 len;
> > > > > > > +
> > > > > > > +       /* The num of the response fields for the stat. */
> > > > > > > +       u32 num;
> > > > > > > +
> > > > > > > +       /* The type of queue corresponding to the statistics. (cq, rq, sq) */
> > > > > > > +       u32 queue_type;
> > > > > > > +
> > > > > > > +       /* The reply type of the stat. */
> > > > > > > +       u8 reply_type;
> > > > > > > +
> > > > > > > +       /* Describe the name and the offset in the response. */
> > > > > > > +       const struct virtnet_stat_desc *desc;
> > > > > > > +};
> > > > > > > +
> > > > > > > +#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)  \
> > > > > > > +       {                                                       \
> > > > > > > +               VIRTIO_NET_STATS_TYPE_##TYPE,                   \
> > > > > > > +               sizeof(struct virtio_net_stats_ ## type),       \
> > > > > > > +               ARRAY_SIZE(virtnet_stats_ ## type ##_desc),     \
> > > > > > > +               VIRTNET_Q_TYPE_##queue_type,                    \
> > > > > > > +               VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,             \
> > > > > > > +               &virtnet_stats_##type##_desc[0]                 \
> > > > > > > +       }
> > > > > > > +
> > > > > > > +static struct virtnet_stats_map virtio_net_stats_map[] = {
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> > > > > > > +
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
> > > > > > > +
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
> > > > > > > +       VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> > > > > > > +};
> > > > > >
> > > > > > I think the reason you did this is to ease the future extensions but
> > > > > > multiple levels of nested macros makes the code hard to review. Any
> > > > > > way to eliminate this?
> > > > >
> > > > >
> > > > > NOT only for the future extensions.
> > > > >
> > > > > When we parse the reply from the device, we need to check the reply stats
> > > > > one by one, we need the stats info to help parse the stats.
> > > >
> > > > Yes, but I meant for example any reason why it can't be done by
> > > > extending virtnet_stat_desc ?
> > >
> > >
> > >
> > > You know, virtio_net_stats_map is way to organize the descs.
> > >
> > > This is used to avoid the big if-else when parsing the replys from the device.
> > >
> > > If no this map, we will have a big if-else like:
> > >
> > >  if (reply.type == rx_basic) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == tx_basic) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == rx_csum) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == tx_csum) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == rx_gso) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == tx_gso) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == rx_speed) {
> > >          /* do the same something */
> > >  }
> > >  if (reply.type == tx_speed) {
> > >          /* do the same something */
> > >  }
> > >
> > > I want to avoid this, so introducing this map.
> >
> > Could we have a function pointers array indexed by the type?
>
> Then these functions will be similar and mass.
>
> Maybe we can start with the if-else or the function.
> That will be easy to review. We can optimize on that.

Fine with me.

Thanks

>
> Thanks.
>
>
>
> >
> > Thanks
> >
> > >
> > > YES. I noticed other comments, but I think we should
> > > fix this problem firstly.
> > >
> > > Thanks.
> > >
> > >
> > > >
> > > > >
> > > > >         static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
> > > > >                                       struct virtnet_stats_ctx *ctx,
> > > > >                                       const u8 *base, u8 type)
> > > > >         {
> > > > >                u32 queue_type, num_rx, num_tx, num_cq;
> > > > >                struct virtnet_stats_map *m;
> > > > >                u64 offset, bitmap;
> > > > >                const __le64 *v;
> > > > >                int i, j;
> > > > >
> > > > >                num_rx = VIRTNET_RQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_RX];
> > > > >                num_tx = VIRTNET_SQ_STATS_LEN + ctx->desc_num[VIRTNET_Q_TYPE_TX];
> > > > >                num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
> > > > >
> > > > >                queue_type = vq_type(vi, qid);
> > > > >                bitmap = ctx->bitmap[queue_type];
> > > > >                offset = 0;
> > > > >
> > > > >                if (queue_type == VIRTNET_Q_TYPE_TX) {
> > > > >                        offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> > > > >                        offset += VIRTNET_SQ_STATS_LEN;
> > > > >                } else if (queue_type == VIRTNET_Q_TYPE_RX) {
> > > > >                        offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> > > > >                }
> > > > >
> > > > >                for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> > > > >                        m = &virtio_net_stats_map[i];
> > > > >
> > > > > ->                     if (m->stat_type & bitmap)
> > > > >                                offset += m->num;
> > > > >
> > > > > ->                     if (type != m->reply_type)
> > > > >                                continue;
> > > > >
> > > > >                        for (j = 0; j < m->num; ++j) {
> > > > >                                v = (const __le64 *)(base + m->desc[j].offset);
> > > > >                                ctx->data[offset + j] = le64_to_cpu(*v);
> > > > >                        }
> > > > >
> > > > >                        break;
> > > > >                }
> > > > >         }
> > > > >
> > > > > Thanks.
> > > >
> > > > Btw, just a reminder, there are other comments for this patch.
> > > >
> > > > Thanks
> > > >
> > >
> >
>


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
                   ` (10 preceding siblings ...)
  2024-03-20  9:45 ` Xuan Zhuo
@ 2024-04-22 20:33 ` Michael S. Tsirkin
  2024-04-23  5:54   ` Xuan Zhuo
  11 siblings, 1 reply; 34+ messages in thread
From: Michael S. Tsirkin @ 2024-04-22 20:33 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Jason Wang, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
	Amritha Nambiar, Larysa Zaremba, Sridhar Samudrala,
	Maciej Fijalkowski, virtualization, bpf

On Mon, Mar 18, 2024 at 07:05:53PM +0800, Xuan Zhuo wrote:
> As the spec:
> 
> https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> 
> The virtio net supports to get device stats.
> 
> Please review.

series:

Acked-by: Michael S. Tsirkin <mst@redhat.com>

I think you can now repost for net-next.


> Thanks.
> 
> v5:
>     1. Fix some small problems in last version
>     2. Not report stats that will be reported by netlink
>     3. remove "_queue" from  ethtool -S
> 
> v4:
>     1. Support per-queue statistics API
>     2. Fix some small problems in last version
> 
> v3:
>     1. rebase net-next
> 
> v2:
>     1. fix the usage of the leXX_to_cpu()
>     2. add comment to the structure virtnet_stats_map
> 
> v1:
>     1. fix some definitions of the marco and the struct
> 
> 
> 
> 
> 
> 
> Xuan Zhuo (9):
>   virtio_net: introduce device stats feature and structures
>   virtio_net: virtnet_send_command supports command-specific-result
>   virtio_net: remove "_queue" from ethtool -S
>   virtio_net: support device stats
>   virtio_net: stats map include driver stats
>   virtio_net: add the total stats field
>   virtio_net: rename stat tx_timeout to timeout
>   netdev: add queue stats
>   virtio-net: support queue stat
> 
>  Documentation/netlink/specs/netdev.yaml | 104 ++++
>  drivers/net/virtio_net.c                | 755 +++++++++++++++++++++---
>  include/net/netdev_queues.h             |  27 +
>  include/uapi/linux/netdev.h             |  19 +
>  include/uapi/linux/virtio_net.h         | 143 +++++
>  net/core/netdev-genl.c                  |  23 +-
>  tools/include/uapi/linux/netdev.h       |  19 +
>  7 files changed, 1013 insertions(+), 77 deletions(-)
> 
> --
> 2.32.0.3.g01195cf9f


^ permalink raw reply	[flat|nested] 34+ messages in thread

* Re: [PATCH net-next v5 0/9] virtio-net: support device stats
  2024-04-22 20:33 ` Michael S. Tsirkin
@ 2024-04-23  5:54   ` Xuan Zhuo
  0 siblings, 0 replies; 34+ messages in thread
From: Xuan Zhuo @ 2024-04-23  5:54 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: netdev, David S. Miller, Eric Dumazet, Jakub Kicinski,
	Paolo Abeni, Jason Wang, Alexei Starovoitov, Daniel Borkmann,
	Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
	Amritha Nambiar, Larysa Zaremba, Sridhar Samudrala,
	Maciej Fijalkowski, virtualization, bpf

On Mon, 22 Apr 2024 16:33:01 -0400, "Michael S. Tsirkin" <mst@redhat.com> wrote:
> On Mon, Mar 18, 2024 at 07:05:53PM +0800, Xuan Zhuo wrote:
> > As the spec:
> >
> > https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> >
> > The virtio net supports to get device stats.
> >
> > Please review.
>
> series:
>
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
>
> I think you can now repost for net-next.

Thanks for your ack.

I and Jason discussed  a way to remove the "maps".

I will post a new patch set with that.

Thanks.


>
>
> > Thanks.
> >
> > v5:
> >     1. Fix some small problems in last version
> >     2. Not report stats that will be reported by netlink
> >     3. remove "_queue" from  ethtool -S
> >
> > v4:
> >     1. Support per-queue statistics API
> >     2. Fix some small problems in last version
> >
> > v3:
> >     1. rebase net-next
> >
> > v2:
> >     1. fix the usage of the leXX_to_cpu()
> >     2. add comment to the structure virtnet_stats_map
> >
> > v1:
> >     1. fix some definitions of the marco and the struct
> >
> >
> >
> >
> >
> >
> > Xuan Zhuo (9):
> >   virtio_net: introduce device stats feature and structures
> >   virtio_net: virtnet_send_command supports command-specific-result
> >   virtio_net: remove "_queue" from ethtool -S
> >   virtio_net: support device stats
> >   virtio_net: stats map include driver stats
> >   virtio_net: add the total stats field
> >   virtio_net: rename stat tx_timeout to timeout
> >   netdev: add queue stats
> >   virtio-net: support queue stat
> >
> >  Documentation/netlink/specs/netdev.yaml | 104 ++++
> >  drivers/net/virtio_net.c                | 755 +++++++++++++++++++++---
> >  include/net/netdev_queues.h             |  27 +
> >  include/uapi/linux/netdev.h             |  19 +
> >  include/uapi/linux/virtio_net.h         | 143 +++++
> >  net/core/netdev-genl.c                  |  23 +-
> >  tools/include/uapi/linux/netdev.h       |  19 +
> >  7 files changed, 1013 insertions(+), 77 deletions(-)
> >
> > --
> > 2.32.0.3.g01195cf9f
>

^ permalink raw reply	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2024-04-23  5:56 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-18 11:05 [PATCH net-next v5 0/9] virtio-net: support device stats Xuan Zhuo
2024-03-18 11:05 ` [PATCH net-next v5 1/9] virtio_net: introduce device stats feature and structures Xuan Zhuo
2024-04-10  6:09   ` Jason Wang
2024-03-18 11:05 ` [PATCH net-next v5 2/9] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
2024-04-10  6:09   ` Jason Wang
2024-04-10 10:50     ` Xuan Zhuo
2024-03-18 11:05 ` [PATCH net-next v5 3/9] virtio_net: remove "_queue" from ethtool -S Xuan Zhuo
2024-04-10  6:09   ` Jason Wang
2024-03-18 11:05 ` [PATCH net-next v5 4/9] virtio_net: support device stats Xuan Zhuo
2024-04-10  6:09   ` Jason Wang
2024-04-10 10:52     ` Xuan Zhuo
2024-04-11  6:09       ` Jason Wang
2024-04-15  2:42         ` Xuan Zhuo
2024-04-15  6:45           ` Jason Wang
2024-04-15  8:11             ` Xuan Zhuo
2024-04-15  8:34               ` Jason Wang
2024-03-18 11:05 ` [PATCH net-next v5 5/9] virtio_net: stats map include driver stats Xuan Zhuo
2024-03-18 11:05 ` [PATCH net-next v5 6/9] virtio_net: add the total stats field Xuan Zhuo
2024-03-18 11:06 ` [PATCH net-next v5 7/9] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
2024-04-10  6:09   ` Jason Wang
2024-03-18 11:06 ` [PATCH net-next v5 8/9] netdev: add queue stats Xuan Zhuo
2024-03-18 11:06 ` [PATCH net-next v5 9/9] virtio-net: support queue stat Xuan Zhuo
2024-03-18 11:52 ` [PATCH net-next v5 0/9] virtio-net: support device stats Jiri Pirko
2024-03-18 11:53   ` Xuan Zhuo
2024-03-18 12:19     ` Jiri Pirko
2024-03-19 10:12       ` Paolo Abeni
2024-03-20  8:04         ` Xuan Zhuo
2024-03-20 12:23           ` Jiri Pirko
2024-03-21  3:38           ` Jakub Kicinski
2024-03-21  3:54             ` Xuan Zhuo
2024-03-21 12:42               ` Simon Horman
2024-03-20  9:45 ` Xuan Zhuo
2024-04-22 20:33 ` Michael S. Tsirkin
2024-04-23  5:54   ` Xuan Zhuo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).