netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next v3 0/6] virtio-net: support device stats
@ 2024-02-27  8:02 Xuan Zhuo
  2024-02-27  8:02 ` [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures Xuan Zhuo
                   ` (6 more replies)
  0 siblings, 7 replies; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:02 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

As the spec:

https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

The virtio net supports to get device stats.

Please review.

Thanks.

v3:
    1. rebase net-next

v2:
    1. fix the usage of the leXX_to_cpu()
    2. add comment to the structure virtnet_stats_map

v1:
    1. fix some definitions of the marco and the struct




Xuan Zhuo (6):
  virtio_net: introduce device stats feature and structures
  virtio_net: virtnet_send_command supports command-specific-result
  virtio_net: support device stats
  virtio_net: stats map include driver stats
  virtio_net: add the total stats field
  virtio_net: rename stat tx_timeout to timeout

 drivers/net/virtio_net.c        | 536 ++++++++++++++++++++++++++++----
 include/uapi/linux/virtio_net.h | 137 ++++++++
 2 files changed, 613 insertions(+), 60 deletions(-)

--
2.32.0.3.g01195cf9f


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
@ 2024-02-27  8:02 ` Xuan Zhuo
  2024-02-27 13:16   ` Jiri Pirko
  2024-02-27  8:02 ` [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:02 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

The virtio-net device stats spec:

https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

This commit introduces the relative feature and structures.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 include/uapi/linux/virtio_net.h | 137 ++++++++++++++++++++++++++++++++
 1 file changed, 137 insertions(+)

diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index cc65ef0f3c3e..8fca4d1b7635 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,6 +56,7 @@
 #define VIRTIO_NET_F_MQ	22	/* Device supports Receive Flow
 					 * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
+#define VIRTIO_NET_F_DEVICE_STATS 50	/* Device can provide device-level statistics. */
 #define VIRTIO_NET_F_VQ_NOTF_COAL 52	/* Device supports virtqueue notification coalescing */
 #define VIRTIO_NET_F_NOTF_COAL	53	/* Device supports notifications coalescing */
 #define VIRTIO_NET_F_GUEST_USO4	54	/* Guest can handle USOv4 in. */
@@ -406,4 +407,140 @@ struct  virtio_net_ctrl_coal_vq {
 	struct virtio_net_ctrl_coal coal;
 };
 
+/*
+ * Device Statistics
+ */
+#define VIRTIO_NET_CTRL_STATS         8
+#define VIRTIO_NET_CTRL_STATS_QUERY   0
+#define VIRTIO_NET_CTRL_STATS_GET     1
+
+struct virtio_net_stats_capabilities {
+
+#define VIRTIO_NET_STATS_TYPE_CVQ       (1ULL << 32)
+
+#define VIRTIO_NET_STATS_TYPE_RX_BASIC  (1ULL << 0)
+#define VIRTIO_NET_STATS_TYPE_RX_CSUM   (1ULL << 1)
+#define VIRTIO_NET_STATS_TYPE_RX_GSO    (1ULL << 2)
+#define VIRTIO_NET_STATS_TYPE_RX_SPEED  (1ULL << 3)
+
+#define VIRTIO_NET_STATS_TYPE_TX_BASIC  (1ULL << 16)
+#define VIRTIO_NET_STATS_TYPE_TX_CSUM   (1ULL << 17)
+#define VIRTIO_NET_STATS_TYPE_TX_GSO    (1ULL << 18)
+#define VIRTIO_NET_STATS_TYPE_TX_SPEED  (1ULL << 19)
+
+	__le64 supported_stats_types[1];
+};
+
+struct virtio_net_ctrl_queue_stats {
+	struct {
+		__le16 vq_index;
+		__le16 reserved[3];
+		__le64 types_bitmap[1];
+	} stats[1];
+};
+
+struct virtio_net_stats_reply_hdr {
+#define VIRTIO_NET_STATS_TYPE_REPLY_CVQ       32
+
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC  0
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM   1
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO    2
+#define VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED  3
+
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC  16
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM   17
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO    18
+#define VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED  19
+	__u8 type;
+	__u8 reserved;
+	__le16 vq_index;
+	__le16 reserved1;
+	__le16 size;
+};
+
+struct virtio_net_stats_cvq {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 command_num;
+	__le64 ok_num;
+};
+
+struct virtio_net_stats_rx_basic {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_notifications;
+
+	__le64 rx_packets;
+	__le64 rx_bytes;
+
+	__le64 rx_interrupts;
+
+	__le64 rx_drops;
+	__le64 rx_drop_overruns;
+};
+
+struct virtio_net_stats_tx_basic {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_notifications;
+
+	__le64 tx_packets;
+	__le64 tx_bytes;
+
+	__le64 tx_interrupts;
+
+	__le64 tx_drops;
+	__le64 tx_drop_malformed;
+};
+
+struct virtio_net_stats_rx_csum {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_csum_valid;
+	__le64 rx_needs_csum;
+	__le64 rx_csum_none;
+	__le64 rx_csum_bad;
+};
+
+struct virtio_net_stats_tx_csum {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_csum_none;
+	__le64 tx_needs_csum;
+};
+
+struct virtio_net_stats_rx_gso {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_gso_packets;
+	__le64 rx_gso_bytes;
+	__le64 rx_gso_packets_coalesced;
+	__le64 rx_gso_bytes_coalesced;
+};
+
+struct virtio_net_stats_tx_gso {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_gso_packets;
+	__le64 tx_gso_bytes;
+	__le64 tx_gso_segments;
+	__le64 tx_gso_segments_bytes;
+	__le64 tx_gso_packets_noseg;
+	__le64 tx_gso_bytes_noseg;
+};
+
+struct virtio_net_stats_rx_speed {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 rx_packets_allowance_exceeded;
+	__le64 rx_bytes_allowance_exceeded;
+};
+
+struct virtio_net_stats_tx_speed {
+	struct virtio_net_stats_reply_hdr hdr;
+
+	__le64 tx_packets_allowance_exceeded;
+	__le64 tx_bytes_allowance_exceeded;
+};
+
 #endif /* _UAPI_LINUX_VIRTIO_NET_H */
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
  2024-02-27  8:02 ` [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures Xuan Zhuo
@ 2024-02-27  8:02 ` Xuan Zhuo
  2024-02-27 13:23   ` Jiri Pirko
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:02 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

The virtnet cvq supports to get result from the device.
This commit implement this.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 47 +++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 20 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7ce4a1011ea..af512d85cd5b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2512,10 +2512,11 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
  * never fail unless improperly formatted.
  */
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
-				 struct scatterlist *out)
+				 struct scatterlist *out,
+				 struct scatterlist *in)
 {
-	struct scatterlist *sgs[4], hdr, stat;
-	unsigned out_num = 0, tmp;
+	struct scatterlist *sgs[5], hdr, stat;
+	u32 out_num = 0, tmp, in_num = 0;
 	int ret;
 
 	/* Caller should know better */
@@ -2533,10 +2534,13 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
 
 	/* Add return status. */
 	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
-	sgs[out_num] = &stat;
+	sgs[out_num + in_num++] = &stat;
 
-	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
-	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+	if (in)
+		sgs[out_num + in_num++] = in;
+
+	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
+	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
 	if (ret < 0) {
 		dev_warn(&vi->vdev->dev,
 			 "Failed to add sgs for command vq: %d\n.", ret);
@@ -2578,7 +2582,8 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
+					  &sg, NULL)) {
 			dev_warn(&vdev->dev,
 				 "Failed to set mac address by vq command.\n");
 			ret = -EINVAL;
@@ -2647,7 +2652,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
 	rtnl_lock();
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
-				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
+				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
 	rtnl_unlock();
 }
@@ -2664,7 +2669,7 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
-				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
+				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
 			 queue_pairs);
 		return -EINVAL;
@@ -2727,14 +2732,14 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
+				  VIRTIO_NET_CTRL_RX_PROMISC, sg, NULL))
 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
 			 vi->ctrl->promisc ? "en" : "dis");
 
 	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
-				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
+				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg, NULL))
 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
 			 vi->ctrl->allmulti ? "en" : "dis");
 
@@ -2770,7 +2775,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
+				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, NULL))
 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
 
 	kfree(buf);
@@ -2786,7 +2791,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
+				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -2801,7 +2806,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
 	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
-				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
+				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
 	return 0;
 }
@@ -2920,7 +2925,7 @@ static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
-				  &sgs))
+				  &sgs, NULL))
 		return -EINVAL;
 
 	return 0;
@@ -3062,7 +3067,7 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
 				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
-				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
+				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs, NULL)) {
 		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
 		return false;
 	}
@@ -3380,7 +3385,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
-				  &sgs_tx))
+				  &sgs_tx, NULL))
 		return -EINVAL;
 
 	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
@@ -3430,7 +3435,7 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-				  &sgs_rx))
+				  &sgs_rx, NULL))
 		return -EINVAL;
 
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
@@ -3899,7 +3904,8 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
 	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
-				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
+				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
+				  &sg, NULL)) {
 		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
 		return -EINVAL;
 	}
@@ -4822,7 +4828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
 
 		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
-					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
+					  &sg, NULL)) {
 			pr_debug("virtio_net: setting MAC address failed\n");
 			rtnl_unlock();
 			err = -EINVAL;
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
  2024-02-27  8:02 ` [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures Xuan Zhuo
  2024-02-27  8:02 ` [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
@ 2024-02-27  8:03 ` Xuan Zhuo
  2024-02-27 14:56   ` Jiri Pirko
                     ` (3 more replies)
  2024-02-27  8:03 ` [PATCH net-next v3 4/6] virtio_net: stats map include driver stats Xuan Zhuo
                   ` (3 subsequent siblings)
  6 siblings, 4 replies; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:03 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82

make virtio-net support getting the stats from the device by ethtool -S
<eth0>.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 362 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 358 insertions(+), 4 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index af512d85cd5b..5549fc8508bd 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -128,6 +128,121 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
 #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
 
+#define VIRTNET_STATS_DESC(qtype, class, name) \
+	{#name, offsetof(struct virtio_net_stats_ ## qtype ## _ ## class, qtype ## _ ## name)}
+
+static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
+	{"command_num", offsetof(struct virtio_net_stats_cvq, command_num)},
+	{"ok_num", offsetof(struct virtio_net_stats_cvq, ok_num)}
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
+	VIRTNET_STATS_DESC(rx, basic, packets),
+	VIRTNET_STATS_DESC(rx, basic, bytes),
+
+	VIRTNET_STATS_DESC(rx, basic, notifications),
+	VIRTNET_STATS_DESC(rx, basic, interrupts),
+
+	VIRTNET_STATS_DESC(rx, basic, drops),
+	VIRTNET_STATS_DESC(rx, basic, drop_overruns),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
+	VIRTNET_STATS_DESC(tx, basic, packets),
+	VIRTNET_STATS_DESC(tx, basic, bytes),
+
+	VIRTNET_STATS_DESC(tx, basic, notifications),
+	VIRTNET_STATS_DESC(tx, basic, interrupts),
+
+	VIRTNET_STATS_DESC(tx, basic, drops),
+	VIRTNET_STATS_DESC(tx, basic, drop_malformed),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
+	VIRTNET_STATS_DESC(rx, csum, csum_valid),
+	VIRTNET_STATS_DESC(rx, csum, needs_csum),
+
+	VIRTNET_STATS_DESC(rx, csum, csum_none),
+	VIRTNET_STATS_DESC(rx, csum, csum_bad),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
+	VIRTNET_STATS_DESC(tx, csum, needs_csum),
+	VIRTNET_STATS_DESC(tx, csum, csum_none),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
+	VIRTNET_STATS_DESC(rx, gso, gso_packets),
+	VIRTNET_STATS_DESC(rx, gso, gso_bytes),
+	VIRTNET_STATS_DESC(rx, gso, gso_packets_coalesced),
+	VIRTNET_STATS_DESC(rx, gso, gso_bytes_coalesced),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
+	VIRTNET_STATS_DESC(tx, gso, gso_packets),
+	VIRTNET_STATS_DESC(tx, gso, gso_bytes),
+	VIRTNET_STATS_DESC(tx, gso, gso_segments),
+	VIRTNET_STATS_DESC(tx, gso, gso_segments_bytes),
+	VIRTNET_STATS_DESC(tx, gso, gso_packets_noseg),
+	VIRTNET_STATS_DESC(tx, gso, gso_bytes_noseg),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
+	VIRTNET_STATS_DESC(rx, speed, packets_allowance_exceeded),
+	VIRTNET_STATS_DESC(rx, speed, bytes_allowance_exceeded),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
+	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
+	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
+};
+
+struct virtnet_stats_map {
+	/* the stat type in bitmap */
+	u64 stat_type;
+
+	/* the bytes of the response for the stat */
+	u32 len;
+
+	/* the num of the response fields for the stat */
+	u32 num;
+
+#define VIRTNET_STATS_Q_TYPE_RX 0
+#define VIRTNET_STATS_Q_TYPE_TX 1
+#define VIRTNET_STATS_Q_TYPE_CQ 2
+	u32 queue_type;
+
+	/* the reply type of the stat */
+	u8 reply_type;
+
+	/* describe the name and the offset in the response */
+	const struct virtnet_stat_desc *desc;
+};
+
+#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)	\
+	{							\
+		VIRTIO_NET_STATS_TYPE_##TYPE,			\
+		sizeof(struct virtio_net_stats_ ## type),	\
+		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
+		VIRTNET_STATS_Q_TYPE_##queue_type,		\
+		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
+		&virtnet_stats_##type##_desc[0]			\
+	}
+
+static struct virtnet_stats_map virtio_net_stats_map[] = {
+	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
+
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
+
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
+};
+
 struct virtnet_interrupt_coalesce {
 	u32 max_packets;
 	u32 max_usecs;
@@ -244,6 +359,7 @@ struct control_buf {
 	struct virtio_net_ctrl_coal_tx coal_tx;
 	struct virtio_net_ctrl_coal_rx coal_rx;
 	struct virtio_net_ctrl_coal_vq coal_vq;
+	struct virtio_net_stats_capabilities stats_cap;
 };
 
 struct virtnet_info {
@@ -329,6 +445,8 @@ struct virtnet_info {
 
 	/* failover when STANDBY feature enabled */
 	struct failover *failover;
+
+	u64 device_stats_cap;
 };
 
 struct padded_vnet_hdr {
@@ -3263,6 +3381,204 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
+static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+{
+	struct virtnet_stats_map *m;
+	int i, j;
+	u8 *p = *data;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (m->queue_type != type)
+			continue;
+
+		if (!(vi->device_stats_cap & m->stat_type))
+			continue;
+
+		for (j = 0; j < m->num; ++j) {
+			if (type == VIRTNET_STATS_Q_TYPE_RX)
+				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
+
+			else if (type == VIRTNET_STATS_Q_TYPE_TX)
+				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
+
+			else if (type == VIRTNET_STATS_Q_TYPE_CQ)
+				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
+		}
+	}
+
+	*data = p;
+}
+
+struct virtnet_stats_ctx {
+	u32 num_cq;
+	u32 num_rx;
+	u32 num_tx;
+
+	u64 bitmap_cq;
+	u64 bitmap_rx;
+	u64 bitmap_tx;
+
+	u32 size_cq;
+	u32 size_rx;
+	u32 size_tx;
+
+	u64 *data;
+};
+
+static void virtnet_stats_ctx_init(struct virtnet_info *vi,
+				   struct virtnet_stats_ctx *ctx,
+				   u64 *data)
+{
+	struct virtnet_stats_map *m;
+	int i;
+
+	ctx->data = data;
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (vi->device_stats_cap & m->stat_type) {
+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_CQ) {
+				ctx->bitmap_cq |= m->stat_type;
+				ctx->num_cq += m->num;
+				ctx->size_cq += m->len;
+			}
+
+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_RX) {
+				ctx->bitmap_rx |= m->stat_type;
+				ctx->num_rx += m->num;
+				ctx->size_rx += m->len;
+			}
+
+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_TX) {
+				ctx->bitmap_tx |= m->stat_type;
+				ctx->num_tx += m->num;
+				ctx->size_tx += m->len;
+			}
+		}
+	}
+}
+
+static int virtnet_get_hw_stats(struct virtnet_info *vi,
+				struct virtnet_stats_ctx *ctx)
+{
+	struct virtio_net_ctrl_queue_stats *req;
+	struct virtio_net_stats_reply_hdr *hdr;
+	struct scatterlist sgs_in, sgs_out;
+	u32 num_rx, num_tx, num_cq, offset;
+	int qnum, i, j,  qid, res_size;
+	struct virtnet_stats_map *m;
+	void *reply, *p;
+	u64 bitmap;
+	int ok;
+	u64 *v;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+		return 0;
+
+	qnum = 0;
+	if (ctx->bitmap_cq)
+		qnum += 1;
+
+	if (ctx->bitmap_rx)
+		qnum += vi->curr_queue_pairs;
+
+	if (ctx->bitmap_tx)
+		qnum += vi->curr_queue_pairs;
+
+	req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	res_size = (ctx->size_rx + ctx->size_tx) * vi->curr_queue_pairs + ctx->size_cq;
+	reply = kmalloc(res_size, GFP_KERNEL);
+	if (!reply) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	j = 0;
+	for (i = 0; i < vi->curr_queue_pairs; ++i) {
+		if (ctx->bitmap_rx) {
+			req->stats[j].vq_index = cpu_to_le16(i * 2);
+			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_rx);
+			++j;
+		}
+
+		if (ctx->bitmap_tx) {
+			req->stats[j].vq_index = cpu_to_le16(i * 2 + 1);
+			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_tx);
+			++j;
+		}
+	}
+
+	if (ctx->size_cq) {
+		req->stats[j].vq_index = cpu_to_le16(vi->max_queue_pairs * 2);
+		req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_cq);
+		++j;
+	}
+
+	sg_init_one(&sgs_out, req, sizeof(*req) * j);
+	sg_init_one(&sgs_in, reply, res_size);
+
+	ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
+				  VIRTIO_NET_CTRL_STATS_GET,
+				  &sgs_out, &sgs_in);
+	kfree(req);
+
+	if (!ok) {
+		kfree(reply);
+		return ok;
+	}
+
+	num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
+	num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
+	num_cq = ctx->num_tx;
+
+	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
+		hdr = p;
+
+		qid = le16_to_cpu(hdr->vq_index);
+
+		if (qid == vi->max_queue_pairs * 2) {
+			offset = 0;
+			bitmap = ctx->bitmap_cq;
+		} else if (qid % 2) {
+			offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
+			offset += VIRTNET_SQ_STATS_LEN;
+			bitmap = ctx->bitmap_tx;
+		} else {
+			offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
+			bitmap = ctx->bitmap_rx;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+			m = &virtio_net_stats_map[i];
+
+			if (m->stat_type & bitmap)
+				offset += m->num;
+
+			if (hdr->type != m->reply_type)
+				continue;
+
+			for (j = 0; j < m->num; ++j) {
+				v = p + m->desc[j].offset;
+				ctx->data[offset + j] = le64_to_cpu(*v);
+			}
+
+			break;
+		}
+	}
+
+	kfree(reply);
+	return 0;
+}
+
 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
@@ -3271,16 +3587,22 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 
 	switch (stringset) {
 	case ETH_SS_STATS:
+		virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
+
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
 				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
 						virtnet_rq_stats_desc[j].desc);
+
+			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
 		}
 
 		for (i = 0; i < vi->curr_queue_pairs; i++) {
 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
 				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
 						virtnet_sq_stats_desc[j].desc);
+
+			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
 		}
 		break;
 	}
@@ -3289,11 +3611,35 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 static int virtnet_get_sset_count(struct net_device *dev, int sset)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	struct virtnet_stats_ctx ctx = {0};
+	u32 pair_count;
 
 	switch (sset) {
 	case ETH_SS_STATS:
-		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
-					       VIRTNET_SQ_STATS_LEN);
+		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS) &&
+		    !vi->device_stats_cap) {
+			struct scatterlist sg;
+
+			sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
+
+			if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
+						  VIRTIO_NET_CTRL_STATS_QUERY,
+						  NULL, &sg)) {
+				dev_warn(&dev->dev, "Fail to get stats capability\n");
+			} else {
+				__le64 v;
+
+				v = vi->ctrl->stats_cap.supported_stats_types[0];
+				vi->device_stats_cap = le64_to_cpu(v);
+			}
+		}
+
+		virtnet_stats_ctx_init(vi, &ctx, NULL);
+
+		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
+		pair_count += ctx.num_rx + ctx.num_tx;
+
+		return ctx.num_cq + vi->curr_queue_pairs * pair_count;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3303,11 +3649,17 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 				      struct ethtool_stats *stats, u64 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	unsigned int idx = 0, start, i, j;
+	struct virtnet_stats_ctx ctx = {0};
+	unsigned int idx, start, i, j;
 	const u8 *stats_base;
 	const u64_stats_t *p;
 	size_t offset;
 
+	virtnet_stats_ctx_init(vi, &ctx, data);
+	virtnet_get_hw_stats(vi, &ctx);
+
+	idx = ctx.num_cq;
+
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
 
@@ -3321,6 +3673,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			}
 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
 		idx += VIRTNET_RQ_STATS_LEN;
+		idx += ctx.num_rx;
 	}
 
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
@@ -3336,6 +3689,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			}
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 		idx += VIRTNET_SQ_STATS_LEN;
+		idx += ctx.num_tx;
 	}
 }
 
@@ -4963,7 +5317,7 @@ static struct virtio_device_id id_table[] = {
 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
 	VIRTIO_NET_F_VQ_NOTF_COAL, \
-	VIRTIO_NET_F_GUEST_HDRLEN
+	VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
 
 static unsigned int features[] = {
 	VIRTNET_FEATURES,
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 4/6] virtio_net: stats map include driver stats
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
                   ` (2 preceding siblings ...)
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
@ 2024-02-27  8:03 ` Xuan Zhuo
  2024-02-27 15:05   ` Jiri Pirko
  2024-02-27  8:03 ` [PATCH net-next v3 5/6] virtio_net: add the total stats field Xuan Zhuo
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:03 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

In the last commit, we use the stats map to manage the device stats.

For the consistency, we let the stats map includes the driver stats.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 195 ++++++++++++++++++++-------------------
 1 file changed, 100 insertions(+), 95 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5549fc8508bd..95cbfb159a03 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -125,9 +125,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
 };
 
-#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
-#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
-
 #define VIRTNET_STATS_DESC(qtype, class, name) \
 	{#name, offsetof(struct virtio_net_stats_ ## qtype ## _ ## class, qtype ## _ ## name)}
 
@@ -198,10 +195,10 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
 };
 
 struct virtnet_stats_map {
-	/* the stat type in bitmap */
+	/* the stat type in bitmap. just for device stats */
 	u64 stat_type;
 
-	/* the bytes of the response for the stat */
+	/* the bytes of the response for the stat. just for device stats */
 	u32 len;
 
 	/* the num of the response fields for the stat */
@@ -212,9 +209,11 @@ struct virtnet_stats_map {
 #define VIRTNET_STATS_Q_TYPE_CQ 2
 	u32 queue_type;
 
-	/* the reply type of the stat */
+	/* the reply type of the stat. just for device stats */
 	u8 reply_type;
 
+	u8 from_driver;
+
 	/* describe the name and the offset in the response */
 	const struct virtnet_stat_desc *desc;
 };
@@ -226,10 +225,24 @@ struct virtnet_stats_map {
 		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
 		VIRTNET_STATS_Q_TYPE_##queue_type,		\
 		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
+		false, \
 		&virtnet_stats_##type##_desc[0]			\
 	}
 
+#define VIRTNET_DRIVER_STATS_MAP_ITEM(type, queue_type)	\
+	{							\
+		0, 0,	\
+		ARRAY_SIZE(virtnet_ ## type ## _stats_desc),	\
+		VIRTNET_STATS_Q_TYPE_##queue_type,		\
+		0, true, \
+		&virtnet_##type##_stats_desc[0]			\
+	}
+
 static struct virtnet_stats_map virtio_net_stats_map[] = {
+	/* driver stats should on the start. */
+	VIRTNET_DRIVER_STATS_MAP_ITEM(rq, RX),
+	VIRTNET_DRIVER_STATS_MAP_ITEM(sq, TX),
+
 	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
 
 	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
@@ -243,6 +256,11 @@ static struct virtnet_stats_map virtio_net_stats_map[] = {
 	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
 };
 
+#define virtnet_stats_supported(vi, m) ({				\
+	typeof(m) _m = (m);						\
+	(((vi)->device_stats_cap & _m->stat_type) || _m->from_driver);	\
+})
+
 struct virtnet_interrupt_coalesce {
 	u32 max_packets;
 	u32 max_usecs;
@@ -2247,7 +2265,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
 	u64_stats_set(&stats.packets, packets);
 	u64_stats_update_begin(&rq->stats.syncp);
-	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+	for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
 		size_t offset = virtnet_rq_stats_desc[i].offset;
 		u64_stats_t *item, *src;
 
@@ -3381,33 +3399,36 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
-static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
 {
 	struct virtnet_stats_map *m;
+	const char *tp;
 	int i, j;
 	u8 *p = *data;
 
-	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
-		return;
-
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
 
 		if (m->queue_type != type)
 			continue;
 
-		if (!(vi->device_stats_cap & m->stat_type))
+		if (!virtnet_stats_supported(vi, m))
 			continue;
 
 		for (j = 0; j < m->num; ++j) {
+			if (m->from_driver)
+				tp = "";
+			else
+				tp = "_hw";
+
 			if (type == VIRTNET_STATS_Q_TYPE_RX)
-				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
+				ethtool_sprintf(&p, "rx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
 
 			else if (type == VIRTNET_STATS_Q_TYPE_TX)
-				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
+				ethtool_sprintf(&p, "tx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
 
 			else if (type == VIRTNET_STATS_Q_TYPE_CQ)
-				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
+				ethtool_sprintf(&p, "cq%s_%s", tp, m->desc[j].desc);
 		}
 	}
 
@@ -3442,7 +3463,7 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
 		m = &virtio_net_stats_map[i];
 
-		if (vi->device_stats_cap & m->stat_type) {
+		if (virtnet_stats_supported(vi, m)) {
 			if (m->queue_type == VIRTNET_STATS_Q_TYPE_CQ) {
 				ctx->bitmap_cq |= m->stat_type;
 				ctx->num_cq += m->num;
@@ -3464,19 +3485,66 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 	}
 }
 
+static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
+			       struct virtnet_stats_ctx *ctx,
+			       const u8 *base, bool from_driver, u8 type)
+{
+	struct virtnet_stats_map *m;
+	const u64_stats_t *v_stat;
+	u32 queue_type;
+	const u64 *v;
+	u64 offset;
+	int i, j;
+
+	if (qid == vi->max_queue_pairs * 2) {
+		offset = 0;
+		queue_type = VIRTNET_STATS_Q_TYPE_CQ;
+	} else if (qid % 2) {
+		offset = ctx->num_cq + ctx->num_rx * vi->curr_queue_pairs + ctx->num_tx * (qid / 2);
+		queue_type = VIRTNET_STATS_Q_TYPE_TX;
+	} else {
+		offset = ctx->num_cq + ctx->num_rx * (qid / 2);
+		queue_type = VIRTNET_STATS_Q_TYPE_RX;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
+		m = &virtio_net_stats_map[i];
+
+		if (m->queue_type != queue_type)
+			continue;
+
+		if (from_driver != m->from_driver)
+			goto skip;
+
+		if (type != m->reply_type)
+			goto skip;
+
+		for (j = 0; j < m->num; ++j) {
+			if (!from_driver) {
+				v = (const u64 *)(base + m->desc[j].offset);
+				ctx->data[offset + j] = le64_to_cpu(*v);
+			} else {
+				v_stat = (const u64_stats_t *)(base + m->desc[j].offset);
+				ctx->data[offset + j] = u64_stats_read(v_stat);
+			}
+		}
+
+		break;
+skip:
+		if (virtnet_stats_supported(vi, m))
+			offset += m->num;
+	}
+}
+
 static int virtnet_get_hw_stats(struct virtnet_info *vi,
 				struct virtnet_stats_ctx *ctx)
 {
 	struct virtio_net_ctrl_queue_stats *req;
 	struct virtio_net_stats_reply_hdr *hdr;
 	struct scatterlist sgs_in, sgs_out;
-	u32 num_rx, num_tx, num_cq, offset;
 	int qnum, i, j,  qid, res_size;
-	struct virtnet_stats_map *m;
 	void *reply, *p;
-	u64 bitmap;
 	int ok;
-	u64 *v;
 
 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
 		return 0;
@@ -3536,43 +3604,10 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
 		return ok;
 	}
 
-	num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
-	num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
-	num_cq = ctx->num_tx;
-
 	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
 		hdr = p;
-
 		qid = le16_to_cpu(hdr->vq_index);
-
-		if (qid == vi->max_queue_pairs * 2) {
-			offset = 0;
-			bitmap = ctx->bitmap_cq;
-		} else if (qid % 2) {
-			offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
-			offset += VIRTNET_SQ_STATS_LEN;
-			bitmap = ctx->bitmap_tx;
-		} else {
-			offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
-			bitmap = ctx->bitmap_rx;
-		}
-
-		for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
-			m = &virtio_net_stats_map[i];
-
-			if (m->stat_type & bitmap)
-				offset += m->num;
-
-			if (hdr->type != m->reply_type)
-				continue;
-
-			for (j = 0; j < m->num; ++j) {
-				v = p + m->desc[j].offset;
-				ctx->data[offset + j] = le64_to_cpu(*v);
-			}
-
-			break;
-		}
+		virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
 	}
 
 	kfree(reply);
@@ -3582,28 +3617,18 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	unsigned int i, j;
+	unsigned int i;
 	u8 *p = data;
 
 	switch (stringset) {
 	case ETH_SS_STATS:
-		virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
+		virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
 
-		for (i = 0; i < vi->curr_queue_pairs; i++) {
-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
-						virtnet_rq_stats_desc[j].desc);
+		for (i = 0; i < vi->curr_queue_pairs; ++i)
+			virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
 
-			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
-		}
-
-		for (i = 0; i < vi->curr_queue_pairs; i++) {
-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
-				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
-						virtnet_sq_stats_desc[j].desc);
-
-			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
-		}
+		for (i = 0; i < vi->curr_queue_pairs; ++i)
+			virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
 		break;
 	}
 }
@@ -3636,8 +3661,7 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
 
 		virtnet_stats_ctx_init(vi, &ctx, NULL);
 
-		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
-		pair_count += ctx.num_rx + ctx.num_tx;
+		pair_count = ctx.num_rx + ctx.num_tx;
 
 		return ctx.num_cq + vi->curr_queue_pairs * pair_count;
 	default:
@@ -3650,46 +3674,27 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 	struct virtnet_stats_ctx ctx = {0};
-	unsigned int idx, start, i, j;
+	unsigned int start, i;
 	const u8 *stats_base;
-	const u64_stats_t *p;
-	size_t offset;
 
 	virtnet_stats_ctx_init(vi, &ctx, data);
 	virtnet_get_hw_stats(vi, &ctx);
 
-	idx = ctx.num_cq;
-
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
+		struct send_queue *sq = &vi->sq[i];
 
 		stats_base = (const u8 *)&rq->stats;
 		do {
 			start = u64_stats_fetch_begin(&rq->stats.syncp);
-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
-				offset = virtnet_rq_stats_desc[j].offset;
-				p = (const u64_stats_t *)(stats_base + offset);
-				data[idx + j] = u64_stats_read(p);
-			}
+			virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
-		idx += VIRTNET_RQ_STATS_LEN;
-		idx += ctx.num_rx;
-	}
-
-	for (i = 0; i < vi->curr_queue_pairs; i++) {
-		struct send_queue *sq = &vi->sq[i];
 
 		stats_base = (const u8 *)&sq->stats;
 		do {
 			start = u64_stats_fetch_begin(&sq->stats.syncp);
-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
-				offset = virtnet_sq_stats_desc[j].offset;
-				p = (const u64_stats_t *)(stats_base + offset);
-				data[idx + j] = u64_stats_read(p);
-			}
+			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
-		idx += VIRTNET_SQ_STATS_LEN;
-		idx += ctx.num_tx;
 	}
 }
 
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
                   ` (3 preceding siblings ...)
  2024-02-27  8:03 ` [PATCH net-next v3 4/6] virtio_net: stats map include driver stats Xuan Zhuo
@ 2024-02-27  8:03 ` Xuan Zhuo
  2024-02-27 14:54   ` Jakub Kicinski
  2024-02-27  8:03 ` [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
  2024-02-27 13:14 ` [PATCH net-next v3 0/6] virtio-net: support device stats Jiri Pirko
  6 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:03 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Now, we just show the stats of every queue.

But for the user, the total values of every stat may are valuable.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 72 ++++++++++++++++++++++++++++++++++------
 1 file changed, 61 insertions(+), 11 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 95cbfb159a03..91838d75cff2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -3399,6 +3399,7 @@ static int virtnet_set_channels(struct net_device *dev,
 	return err;
 }
 
+/* qid == -1: for rx/tx queue total field */
 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
 {
 	struct virtnet_stats_map *m;
@@ -3421,14 +3422,23 @@ static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid,
 			else
 				tp = "_hw";
 
-			if (type == VIRTNET_STATS_Q_TYPE_RX)
-				ethtool_sprintf(&p, "rx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
-
-			else if (type == VIRTNET_STATS_Q_TYPE_TX)
-				ethtool_sprintf(&p, "tx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
-
-			else if (type == VIRTNET_STATS_Q_TYPE_CQ)
+			if (type == VIRTNET_STATS_Q_TYPE_RX) {
+				if (qid < 0)
+					ethtool_sprintf(&p, "rx%s_%s", tp, m->desc[j].desc);
+				else
+					ethtool_sprintf(&p, "rx_queue%s_%u_%s", tp, qid,
+							m->desc[j].desc);
+
+			} else if (type == VIRTNET_STATS_Q_TYPE_TX) {
+				if (qid < 0)
+					ethtool_sprintf(&p, "tx%s_%s", tp, m->desc[j].desc);
+				else
+					ethtool_sprintf(&p, "tx_queue%s_%u_%s", tp, qid,
+							m->desc[j].desc);
+
+			} else if (type == VIRTNET_STATS_Q_TYPE_CQ) {
 				ethtool_sprintf(&p, "cq%s_%s", tp, m->desc[j].desc);
+			}
 		}
 	}
 
@@ -3485,6 +3495,38 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
 	}
 }
 
+static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
+{
+	u32 step = num;
+	int i, j;
+	u64 *p;
+
+	for (i = 0; i < num; ++i) {
+		p = sum + i;
+		*p = 0;
+
+		for (j = 0; j < q_num; ++j)
+			*p += *(q_value + i + j * step);
+	}
+}
+
+static void virtnet_fill_total_fields(struct virtnet_info *vi,
+				      struct virtnet_stats_ctx *ctx)
+{
+	u64 *data, *first_rx_q, *first_tx_q;
+
+	first_rx_q = ctx->data + ctx->num_rx + ctx->num_tx + ctx->num_cq;
+	first_tx_q = first_rx_q + vi->curr_queue_pairs * ctx->num_rx;
+
+	data = ctx->data;
+
+	stats_sum_queue(data, ctx->num_rx, first_rx_q, vi->curr_queue_pairs);
+
+	data = ctx->data + ctx->num_rx;
+
+	stats_sum_queue(data, ctx->num_tx, first_tx_q, vi->curr_queue_pairs);
+}
+
 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 			       struct virtnet_stats_ctx *ctx,
 			       const u8 *base, bool from_driver, u8 type)
@@ -3496,14 +3538,17 @@ static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
 	u64 offset;
 	int i, j;
 
+	/* skip the total fields of pairs */
+	offset = ctx->num_rx + ctx->num_tx;
+
 	if (qid == vi->max_queue_pairs * 2) {
-		offset = 0;
 		queue_type = VIRTNET_STATS_Q_TYPE_CQ;
 	} else if (qid % 2) {
-		offset = ctx->num_cq + ctx->num_rx * vi->curr_queue_pairs + ctx->num_tx * (qid / 2);
+		offset += ctx->num_cq + ctx->num_rx * vi->curr_queue_pairs +
+			ctx->num_tx * (qid / 2);
 		queue_type = VIRTNET_STATS_Q_TYPE_TX;
 	} else {
-		offset = ctx->num_cq + ctx->num_rx * (qid / 2);
+		offset += ctx->num_cq + ctx->num_rx * (qid / 2);
 		queue_type = VIRTNET_STATS_Q_TYPE_RX;
 	}
 
@@ -3622,6 +3667,9 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 
 	switch (stringset) {
 	case ETH_SS_STATS:
+		virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, -1, &p);
+		virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, -1, &p);
+
 		virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
 
 		for (i = 0; i < vi->curr_queue_pairs; ++i)
@@ -3663,7 +3711,7 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
 
 		pair_count = ctx.num_rx + ctx.num_tx;
 
-		return ctx.num_cq + vi->curr_queue_pairs * pair_count;
+		return pair_count + ctx.num_cq + vi->curr_queue_pairs * pair_count;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -3696,6 +3744,8 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
 			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 	}
+
+	virtnet_fill_total_fields(vi, &ctx);
 }
 
 static void virtnet_get_channels(struct net_device *dev,
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
                   ` (4 preceding siblings ...)
  2024-02-27  8:03 ` [PATCH net-next v3 5/6] virtio_net: add the total stats field Xuan Zhuo
@ 2024-02-27  8:03 ` Xuan Zhuo
  2024-02-27 15:08   ` Jiri Pirko
  2024-02-27 13:14 ` [PATCH net-next v3 0/6] virtio-net: support device stats Jiri Pirko
  6 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-02-27  8:03 UTC (permalink / raw)
  To: netdev
  Cc: Michael S. Tsirkin, Jason Wang, Xuan Zhuo, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Now, we have this:

    tx_queue_0_tx_timeouts

This is used to record the tx schedule timeout.
But this has two "tx". I think the below is enough.

    tx_queue_0_timeouts

So I rename this field.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 91838d75cff2..4312850fd770 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -87,7 +87,7 @@ struct virtnet_sq_stats {
 	u64_stats_t xdp_tx;
 	u64_stats_t xdp_tx_drops;
 	u64_stats_t kicks;
-	u64_stats_t tx_timeouts;
+	u64_stats_t timeouts;
 };
 
 struct virtnet_rq_stats {
@@ -111,7 +111,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
 	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
 	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
-	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
+	{ "timeouts",		VIRTNET_SQ_STAT(timeouts) },
 };
 
 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
@@ -2760,7 +2760,7 @@ static void virtnet_stats(struct net_device *dev,
 			start = u64_stats_fetch_begin(&sq->stats.syncp);
 			tpackets = u64_stats_read(&sq->stats.packets);
 			tbytes   = u64_stats_read(&sq->stats.bytes);
-			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
+			terrors  = u64_stats_read(&sq->stats.timeouts);
 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 
 		do {
@@ -4531,7 +4531,7 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
 
 	u64_stats_update_begin(&sq->stats.syncp);
-	u64_stats_inc(&sq->stats.tx_timeouts);
+	u64_stats_inc(&sq->stats.timeouts);
 	u64_stats_update_end(&sq->stats.syncp);
 
 	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
-- 
2.32.0.3.g01195cf9f


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 0/6] virtio-net: support device stats
  2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
                   ` (5 preceding siblings ...)
  2024-02-27  8:03 ` [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
@ 2024-02-27 13:14 ` Jiri Pirko
  6 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 13:14 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 09:02:57AM CET, xuanzhuo@linux.alibaba.com wrote:
>As the spec:
>
>https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
>The virtio net supports to get device stats.

Okay, you state what hw supports. It would be nice to throw in a
sentence or two about this patchset (as the cover letter should do)
what it actually does. Some details would be also nice, like example
commands and their outputs to show what is the actual benefit
for the user.

pw-bot: cr


>
>Please review.
>
>Thanks.
>
>v3:
>    1. rebase net-next
>
>v2:
>    1. fix the usage of the leXX_to_cpu()
>    2. add comment to the structure virtnet_stats_map
>
>v1:
>    1. fix some definitions of the marco and the struct
>
>
>
>
>Xuan Zhuo (6):
>  virtio_net: introduce device stats feature and structures
>  virtio_net: virtnet_send_command supports command-specific-result
>  virtio_net: support device stats
>  virtio_net: stats map include driver stats
>  virtio_net: add the total stats field
>  virtio_net: rename stat tx_timeout to timeout
>
> drivers/net/virtio_net.c        | 536 ++++++++++++++++++++++++++++----
> include/uapi/linux/virtio_net.h | 137 ++++++++
> 2 files changed, 613 insertions(+), 60 deletions(-)
>
>--
>2.32.0.3.g01195cf9f
>
>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures
  2024-02-27  8:02 ` [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures Xuan Zhuo
@ 2024-02-27 13:16   ` Jiri Pirko
  0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 13:16 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 09:02:58AM CET, xuanzhuo@linux.alibaba.com wrote:
>The virtio-net device stats spec:
>
>https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
>This commit introduces the relative feature and structures.

Don't talk about "this commit" in the patch description. Tell the
codebase what to do:
https://www.kernel.org/doc/html/v6.6/process/submitting-patches.html#describe-your-changes


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result
  2024-02-27  8:02 ` [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
@ 2024-02-27 13:23   ` Jiri Pirko
  0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 13:23 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

The patch subject should clearly indicate what should be changed,
something like this:

Subject: virtio_net: add support for command-specific-result in virtnet_send_command()

Tue, Feb 27, 2024 at 09:02:59AM CET, xuanzhuo@linux.alibaba.com wrote:
>As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
>The virtnet cvq supports to get result from the device.
>This commit implement this.

Again, be imperative to the codebase clearly saying what it should
change. Much easier to read and understand the patch description then.

The code looks ok.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27  8:03 ` [PATCH net-next v3 5/6] virtio_net: add the total stats field Xuan Zhuo
@ 2024-02-27 14:54   ` Jakub Kicinski
  2024-02-27 15:07     ` Jiri Pirko
  2024-03-07  9:36     ` Xuan Zhuo
  0 siblings, 2 replies; 24+ messages in thread
From: Jakub Kicinski @ 2024-02-27 14:54 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization

On Tue, 27 Feb 2024 16:03:02 +0800 Xuan Zhuo wrote:
> Now, we just show the stats of every queue.
> 
> But for the user, the total values of every stat may are valuable.

Please wait for this API to get merged:
https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
A lot of the stats you're adding here can go into the new API.
More drivers can report things like number of LSO / GRO packets.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
@ 2024-02-27 14:56   ` Jiri Pirko
  2024-02-27 19:19   ` Simon Horman
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 14:56 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 09:03:00AM CET, xuanzhuo@linux.alibaba.com wrote:
>As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
>
>make virtio-net support getting the stats from the device by ethtool -S
><eth0>.

Would be nice to have example output of this command included here as
well as in the cover letter.


>
>Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>---
> drivers/net/virtio_net.c | 362 ++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 358 insertions(+), 4 deletions(-)
>
>diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>index af512d85cd5b..5549fc8508bd 100644
>--- a/drivers/net/virtio_net.c
>+++ b/drivers/net/virtio_net.c
>@@ -128,6 +128,121 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> #define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
> #define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
> 
>+#define VIRTNET_STATS_DESC(qtype, class, name) \
>+	{#name, offsetof(struct virtio_net_stats_ ## qtype ## _ ## class, qtype ## _ ## name)}
>+
>+static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
>+	{"command_num", offsetof(struct virtio_net_stats_cvq, command_num)},
>+	{"ok_num", offsetof(struct virtio_net_stats_cvq, ok_num)}
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
>+	VIRTNET_STATS_DESC(rx, basic, packets),
>+	VIRTNET_STATS_DESC(rx, basic, bytes),
>+
>+	VIRTNET_STATS_DESC(rx, basic, notifications),
>+	VIRTNET_STATS_DESC(rx, basic, interrupts),
>+
>+	VIRTNET_STATS_DESC(rx, basic, drops),
>+	VIRTNET_STATS_DESC(rx, basic, drop_overruns),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
>+	VIRTNET_STATS_DESC(tx, basic, packets),
>+	VIRTNET_STATS_DESC(tx, basic, bytes),
>+
>+	VIRTNET_STATS_DESC(tx, basic, notifications),
>+	VIRTNET_STATS_DESC(tx, basic, interrupts),
>+
>+	VIRTNET_STATS_DESC(tx, basic, drops),
>+	VIRTNET_STATS_DESC(tx, basic, drop_malformed),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
>+	VIRTNET_STATS_DESC(rx, csum, csum_valid),
>+	VIRTNET_STATS_DESC(rx, csum, needs_csum),
>+
>+	VIRTNET_STATS_DESC(rx, csum, csum_none),
>+	VIRTNET_STATS_DESC(rx, csum, csum_bad),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
>+	VIRTNET_STATS_DESC(tx, csum, needs_csum),
>+	VIRTNET_STATS_DESC(tx, csum, csum_none),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
>+	VIRTNET_STATS_DESC(rx, gso, gso_packets),
>+	VIRTNET_STATS_DESC(rx, gso, gso_bytes),
>+	VIRTNET_STATS_DESC(rx, gso, gso_packets_coalesced),
>+	VIRTNET_STATS_DESC(rx, gso, gso_bytes_coalesced),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
>+	VIRTNET_STATS_DESC(tx, gso, gso_packets),
>+	VIRTNET_STATS_DESC(tx, gso, gso_bytes),
>+	VIRTNET_STATS_DESC(tx, gso, gso_segments),
>+	VIRTNET_STATS_DESC(tx, gso, gso_segments_bytes),
>+	VIRTNET_STATS_DESC(tx, gso, gso_packets_noseg),
>+	VIRTNET_STATS_DESC(tx, gso, gso_bytes_noseg),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
>+	VIRTNET_STATS_DESC(rx, speed, packets_allowance_exceeded),
>+	VIRTNET_STATS_DESC(rx, speed, bytes_allowance_exceeded),
>+};
>+
>+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
>+	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
>+	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
>+};
>+
>+struct virtnet_stats_map {
>+	/* the stat type in bitmap */
>+	u64 stat_type;
>+
>+	/* the bytes of the response for the stat */
>+	u32 len;
>+
>+	/* the num of the response fields for the stat */
>+	u32 num;
>+
>+#define VIRTNET_STATS_Q_TYPE_RX 0
>+#define VIRTNET_STATS_Q_TYPE_TX 1
>+#define VIRTNET_STATS_Q_TYPE_CQ 2

Enum? Then you don't need to have it here in struct but above it.


>+	u32 queue_type;
>+
>+	/* the reply type of the stat */
>+	u8 reply_type;
>+
>+	/* describe the name and the offset in the response */
>+	const struct virtnet_stat_desc *desc;
>+};
>+
>+#define VIRTNET_DEVICE_STATS_MAP_ITEM(TYPE, type, queue_type)	\
>+	{							\
>+		VIRTIO_NET_STATS_TYPE_##TYPE,			\
>+		sizeof(struct virtio_net_stats_ ## type),	\
>+		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
>+		VIRTNET_STATS_Q_TYPE_##queue_type,		\
>+		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
>+		&virtnet_stats_##type##_desc[0]			\
>+	}
>+
>+static struct virtnet_stats_map virtio_net_stats_map[] = {
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
>+
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_CSUM,  rx_csum,  RX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_GSO,   rx_gso,   RX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_SPEED, rx_speed, RX),
>+
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_BASIC, tx_basic, TX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_CSUM,  tx_csum,  TX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_GSO,   tx_gso,   TX),
>+	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
>+};
>+
> struct virtnet_interrupt_coalesce {
> 	u32 max_packets;
> 	u32 max_usecs;
>@@ -244,6 +359,7 @@ struct control_buf {
> 	struct virtio_net_ctrl_coal_tx coal_tx;
> 	struct virtio_net_ctrl_coal_rx coal_rx;
> 	struct virtio_net_ctrl_coal_vq coal_vq;
>+	struct virtio_net_stats_capabilities stats_cap;
> };
> 
> struct virtnet_info {
>@@ -329,6 +445,8 @@ struct virtnet_info {
> 
> 	/* failover when STANDBY feature enabled */
> 	struct failover *failover;
>+
>+	u64 device_stats_cap;
> };
> 
> struct padded_vnet_hdr {
>@@ -3263,6 +3381,204 @@ static int virtnet_set_channels(struct net_device *dev,
> 	return err;
> }
> 
>+static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
>+{
>+	struct virtnet_stats_map *m;
>+	int i, j;
>+	u8 *p = *data;

Reverse christmas tree:
https://www.kernel.org/doc/html/v6.6/process/maintainer-netdev.html#tl-dr


>+
>+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
>+		return;
>+
>+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>+		m = &virtio_net_stats_map[i];
>+
>+		if (m->queue_type != type)
>+			continue;
>+
>+		if (!(vi->device_stats_cap & m->stat_type))
>+			continue;
>+
>+		for (j = 0; j < m->num; ++j) {
>+			if (type == VIRTNET_STATS_Q_TYPE_RX)
>+				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
>+
>+			else if (type == VIRTNET_STATS_Q_TYPE_TX)
>+				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
>+
>+			else if (type == VIRTNET_STATS_Q_TYPE_CQ)
>+				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);

Switch-case?


>+		}
>+	}
>+
>+	*data = p;
>+}
>+
>+struct virtnet_stats_ctx {
>+	u32 num_cq;
>+	u32 num_rx;
>+	u32 num_tx;
>+
>+	u64 bitmap_cq;
>+	u64 bitmap_rx;
>+	u64 bitmap_tx;
>+
>+	u32 size_cq;
>+	u32 size_rx;
>+	u32 size_tx;
>+
>+	u64 *data;
>+};
>+
>+static void virtnet_stats_ctx_init(struct virtnet_info *vi,
>+				   struct virtnet_stats_ctx *ctx,
>+				   u64 *data)
>+{
>+	struct virtnet_stats_map *m;
>+	int i;
>+
>+	ctx->data = data;
>+
>+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>+		m = &virtio_net_stats_map[i];
>+
>+		if (vi->device_stats_cap & m->stat_type) {

if (!(vi->device_stats_cap & m->stat_type)
    continue;
would let you save one level of indent below.

>+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_CQ) {
>+				ctx->bitmap_cq |= m->stat_type;
>+				ctx->num_cq += m->num;
>+				ctx->size_cq += m->len;
>+			}
>+
>+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_RX) {
>+				ctx->bitmap_rx |= m->stat_type;
>+				ctx->num_rx += m->num;
>+				ctx->size_rx += m->len;
>+			}
>+
>+			if (m->queue_type == VIRTNET_STATS_Q_TYPE_TX) {
>+				ctx->bitmap_tx |= m->stat_type;
>+				ctx->num_tx += m->num;
>+				ctx->size_tx += m->len;
>+			}

Switch-case?


>+		}
>+	}
>+}
>+
>+static int virtnet_get_hw_stats(struct virtnet_info *vi,
>+				struct virtnet_stats_ctx *ctx)
>+{
>+	struct virtio_net_ctrl_queue_stats *req;
>+	struct virtio_net_stats_reply_hdr *hdr;
>+	struct scatterlist sgs_in, sgs_out;
>+	u32 num_rx, num_tx, num_cq, offset;
>+	int qnum, i, j,  qid, res_size;
>+	struct virtnet_stats_map *m;
>+	void *reply, *p;
>+	u64 bitmap;
>+	int ok;
>+	u64 *v;

Single letter variables are always frowned-upon:
m, v, p. The non-iterator variables could have meaningful name. The code
is then much easier to follow. Could you name them please? This applies
to the rest of the code as well of course.


>+
>+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
>+		return 0;
>+
>+	qnum = 0;
>+	if (ctx->bitmap_cq)
>+		qnum += 1;

qnum++ ?


>+
>+	if (ctx->bitmap_rx)
>+		qnum += vi->curr_queue_pairs;
>+
>+	if (ctx->bitmap_tx)
>+		qnum += vi->curr_queue_pairs;
>+
>+	req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
>+	if (!req)
>+		return -ENOMEM;
>+
>+	res_size = (ctx->size_rx + ctx->size_tx) * vi->curr_queue_pairs + ctx->size_cq;
>+	reply = kmalloc(res_size, GFP_KERNEL);
>+	if (!reply) {
>+		kfree(req);
>+		return -ENOMEM;
>+	}
>+
>+	j = 0;
>+	for (i = 0; i < vi->curr_queue_pairs; ++i) {
>+		if (ctx->bitmap_rx) {
>+			req->stats[j].vq_index = cpu_to_le16(i * 2);
>+			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_rx);
>+			++j;
>+		}
>+
>+		if (ctx->bitmap_tx) {
>+			req->stats[j].vq_index = cpu_to_le16(i * 2 + 1);
>+			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_tx);
>+			++j;
>+		}
>+	}
>+
>+	if (ctx->size_cq) {
>+		req->stats[j].vq_index = cpu_to_le16(vi->max_queue_pairs * 2);
>+		req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_cq);
>+		++j;
>+	}
>+
>+	sg_init_one(&sgs_out, req, sizeof(*req) * j);
>+	sg_init_one(&sgs_in, reply, res_size);
>+
>+	ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
>+				  VIRTIO_NET_CTRL_STATS_GET,
>+				  &sgs_out, &sgs_in);
>+	kfree(req);
>+
>+	if (!ok) {
>+		kfree(reply);
>+		return ok;

virtnet_send_command() returns bool. This function returns 0/-EXX.
Please fix the return value here. Or is it supposed to be 0? In that
case just return 0 here. But I think this should return error.


>+	}
>+
>+	num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
>+	num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
>+	num_cq = ctx->num_tx;
>+
>+	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
>+		hdr = p;




>+
>+		qid = le16_to_cpu(hdr->vq_index);
>+
>+		if (qid == vi->max_queue_pairs * 2) {
>+			offset = 0;
>+			bitmap = ctx->bitmap_cq;
>+		} else if (qid % 2) {
>+			offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
>+			offset += VIRTNET_SQ_STATS_LEN;
>+			bitmap = ctx->bitmap_tx;
>+		} else {
>+			offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
>+			bitmap = ctx->bitmap_rx;
>+		}
>+
>+		for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>+			m = &virtio_net_stats_map[i];
>+
>+			if (m->stat_type & bitmap)
>+				offset += m->num;
>+
>+			if (hdr->type != m->reply_type)
>+				continue;
>+
>+			for (j = 0; j < m->num; ++j) {
>+				v = p + m->desc[j].offset;
>+				ctx->data[offset + j] = le64_to_cpu(*v);
>+			}
>+
>+			break;
>+		}
>+	}
>+
>+	kfree(reply);
>+	return 0;
>+}
>+
> static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> {
> 	struct virtnet_info *vi = netdev_priv(dev);
>@@ -3271,16 +3587,22 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> 
> 	switch (stringset) {
> 	case ETH_SS_STATS:
>+		virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
>+
> 		for (i = 0; i < vi->curr_queue_pairs; i++) {
> 			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
> 				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
> 						virtnet_rq_stats_desc[j].desc);
>+
>+			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
> 		}
> 
> 		for (i = 0; i < vi->curr_queue_pairs; i++) {
> 			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
> 				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
> 						virtnet_sq_stats_desc[j].desc);
>+
>+			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
> 		}
> 		break;
> 	}
>@@ -3289,11 +3611,35 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> static int virtnet_get_sset_count(struct net_device *dev, int sset)
> {
> 	struct virtnet_info *vi = netdev_priv(dev);
>+	struct virtnet_stats_ctx ctx = {0};
>+	u32 pair_count;
> 
> 	switch (sset) {
> 	case ETH_SS_STATS:
>-		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
>-					       VIRTNET_SQ_STATS_LEN);
>+		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS) &&
>+		    !vi->device_stats_cap) {
>+			struct scatterlist sg;
>+
>+			sg_init_one(&sg, &vi->ctrl->stats_cap, sizeof(vi->ctrl->stats_cap));
>+
>+			if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
>+						  VIRTIO_NET_CTRL_STATS_QUERY,
>+						  NULL, &sg)) {
>+				dev_warn(&dev->dev, "Fail to get stats capability\n");
>+			} else {
>+				__le64 v;
>+
>+				v = vi->ctrl->stats_cap.supported_stats_types[0];
>+				vi->device_stats_cap = le64_to_cpu(v);
>+			}
>+		}
>+
>+		virtnet_stats_ctx_init(vi, &ctx, NULL);
>+
>+		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
>+		pair_count += ctx.num_rx + ctx.num_tx;
>+
>+		return ctx.num_cq + vi->curr_queue_pairs * pair_count;
> 	default:
> 		return -EOPNOTSUPP;
> 	}
>@@ -3303,11 +3649,17 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> 				      struct ethtool_stats *stats, u64 *data)
> {
> 	struct virtnet_info *vi = netdev_priv(dev);
>-	unsigned int idx = 0, start, i, j;
>+	struct virtnet_stats_ctx ctx = {0};
>+	unsigned int idx, start, i, j;
> 	const u8 *stats_base;
> 	const u64_stats_t *p;
> 	size_t offset;
> 
>+	virtnet_stats_ctx_init(vi, &ctx, data);
>+	virtnet_get_hw_stats(vi, &ctx);

Check the function return value. Print out an error in case there is one
at least.

Btw, did you consider obtaining these stats asynchronously?


>+
>+	idx = ctx.num_cq;
>+
> 	for (i = 0; i < vi->curr_queue_pairs; i++) {
> 		struct receive_queue *rq = &vi->rq[i];
> 
>@@ -3321,6 +3673,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> 			}
> 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
> 		idx += VIRTNET_RQ_STATS_LEN;
>+		idx += ctx.num_rx;
> 	}
> 
> 	for (i = 0; i < vi->curr_queue_pairs; i++) {
>@@ -3336,6 +3689,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> 			}
> 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
> 		idx += VIRTNET_SQ_STATS_LEN;
>+		idx += ctx.num_tx;
> 	}
> }
> 
>@@ -4963,7 +5317,7 @@ static struct virtio_device_id id_table[] = {
> 	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
> 	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
> 	VIRTIO_NET_F_VQ_NOTF_COAL, \
>-	VIRTIO_NET_F_GUEST_HDRLEN
>+	VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
> 
> static unsigned int features[] = {
> 	VIRTNET_FEATURES,
>-- 
>2.32.0.3.g01195cf9f
>
>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 4/6] virtio_net: stats map include driver stats
  2024-02-27  8:03 ` [PATCH net-next v3 4/6] virtio_net: stats map include driver stats Xuan Zhuo
@ 2024-02-27 15:05   ` Jiri Pirko
  0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 15:05 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 09:03:01AM CET, xuanzhuo@linux.alibaba.com wrote:
>In the last commit, we use the stats map to manage the device stats.

Who's "we"?

>
>For the consistency, we let the stats map includes the driver stats.

Again, be imperative to the codebase. Tell is exactly what to change and
how.


>
>Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
>---
> drivers/net/virtio_net.c | 195 ++++++++++++++++++++-------------------

Could this be split? Quite hard to follow.


> 1 file changed, 100 insertions(+), 95 deletions(-)
>
>diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>index 5549fc8508bd..95cbfb159a03 100644
>--- a/drivers/net/virtio_net.c
>+++ b/drivers/net/virtio_net.c
>@@ -125,9 +125,6 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
> 	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
> };
> 
>-#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
>-#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)

Why you don't leave this and rather open-code it?


>-
> #define VIRTNET_STATS_DESC(qtype, class, name) \
> 	{#name, offsetof(struct virtio_net_stats_ ## qtype ## _ ## class, qtype ## _ ## name)}
> 
>@@ -198,10 +195,10 @@ static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> };
> 
> struct virtnet_stats_map {
>-	/* the stat type in bitmap */
>+	/* the stat type in bitmap. just for device stats */

Sentence starts with capital letter, and ends with dot. Applies to the
rest of the code.


> 	u64 stat_type;
> 
>-	/* the bytes of the response for the stat */
>+	/* the bytes of the response for the stat. just for device stats */
> 	u32 len;
> 
> 	/* the num of the response fields for the stat */
>@@ -212,9 +209,11 @@ struct virtnet_stats_map {
> #define VIRTNET_STATS_Q_TYPE_CQ 2
> 	u32 queue_type;
> 
>-	/* the reply type of the stat */
>+	/* the reply type of the stat. just for device stats */
> 	u8 reply_type;
> 
>+	u8 from_driver;
>+
> 	/* describe the name and the offset in the response */
> 	const struct virtnet_stat_desc *desc;
> };
>@@ -226,10 +225,24 @@ struct virtnet_stats_map {
> 		ARRAY_SIZE(virtnet_stats_ ## type ##_desc),	\
> 		VIRTNET_STATS_Q_TYPE_##queue_type,		\
> 		VIRTIO_NET_STATS_TYPE_REPLY_##TYPE,		\
>+		false, \
> 		&virtnet_stats_##type##_desc[0]			\
> 	}
> 
>+#define VIRTNET_DRIVER_STATS_MAP_ITEM(type, queue_type)	\
>+	{							\
>+		0, 0,	\
>+		ARRAY_SIZE(virtnet_ ## type ## _stats_desc),	\
>+		VIRTNET_STATS_Q_TYPE_##queue_type,		\
>+		0, true, \
>+		&virtnet_##type##_stats_desc[0]			\
>+	}
>+
> static struct virtnet_stats_map virtio_net_stats_map[] = {
>+	/* driver stats should on the start. */
>+	VIRTNET_DRIVER_STATS_MAP_ITEM(rq, RX),
>+	VIRTNET_DRIVER_STATS_MAP_ITEM(sq, TX),
>+
> 	VIRTNET_DEVICE_STATS_MAP_ITEM(CVQ, cvq, CQ),
> 
> 	VIRTNET_DEVICE_STATS_MAP_ITEM(RX_BASIC, rx_basic, RX),
>@@ -243,6 +256,11 @@ static struct virtnet_stats_map virtio_net_stats_map[] = {
> 	VIRTNET_DEVICE_STATS_MAP_ITEM(TX_SPEED, tx_speed, TX),
> };
> 
>+#define virtnet_stats_supported(vi, m) ({				\

Could you have this as a function please?


>+	typeof(m) _m = (m);						\
>+	(((vi)->device_stats_cap & _m->stat_type) || _m->from_driver);	\
>+})
>+
> struct virtnet_interrupt_coalesce {
> 	u32 max_packets;
> 	u32 max_usecs;
>@@ -2247,7 +2265,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
> 
> 	u64_stats_set(&stats.packets, packets);
> 	u64_stats_update_begin(&rq->stats.syncp);
>-	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
>+	for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
> 		size_t offset = virtnet_rq_stats_desc[i].offset;
> 		u64_stats_t *item, *src;
> 
>@@ -3381,33 +3399,36 @@ static int virtnet_set_channels(struct net_device *dev,
> 	return err;
> }
> 
>-static void virtnet_get_hw_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
>+static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
> {
> 	struct virtnet_stats_map *m;
>+	const char *tp;
> 	int i, j;
> 	u8 *p = *data;
> 
>-	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
>-		return;

It is odd you added this in the previous patch and you remove it right
away. I think the ordering of the patches could be different, you do
this patch first and only after that introduce device stats feature
implementation. Makes sense?


>-
> 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> 		m = &virtio_net_stats_map[i];
> 
> 		if (m->queue_type != type)
> 			continue;
> 
>-		if (!(vi->device_stats_cap & m->stat_type))
>+		if (!virtnet_stats_supported(vi, m))
> 			continue;
> 
> 		for (j = 0; j < m->num; ++j) {
>+			if (m->from_driver)
>+				tp = "";
>+			else
>+				tp = "_hw";
>+
> 			if (type == VIRTNET_STATS_Q_TYPE_RX)
>-				ethtool_sprintf(&p, "rx_queue_hw_%u_%s", qid, m->desc[j].desc);
>+				ethtool_sprintf(&p, "rx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
> 
> 			else if (type == VIRTNET_STATS_Q_TYPE_TX)
>-				ethtool_sprintf(&p, "tx_queue_hw_%u_%s", qid, m->desc[j].desc);
>+				ethtool_sprintf(&p, "tx_queue%s_%u_%s", tp, qid, m->desc[j].desc);
> 
> 			else if (type == VIRTNET_STATS_Q_TYPE_CQ)
>-				ethtool_sprintf(&p, "cq_hw_%s", m->desc[j].desc);
>+				ethtool_sprintf(&p, "cq%s_%s", tp, m->desc[j].desc);
> 		}
> 	}
> 
>@@ -3442,7 +3463,7 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
> 	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> 		m = &virtio_net_stats_map[i];
> 
>-		if (vi->device_stats_cap & m->stat_type) {
>+		if (virtnet_stats_supported(vi, m)) {
> 			if (m->queue_type == VIRTNET_STATS_Q_TYPE_CQ) {
> 				ctx->bitmap_cq |= m->stat_type;
> 				ctx->num_cq += m->num;
>@@ -3464,19 +3485,66 @@ static void virtnet_stats_ctx_init(struct virtnet_info *vi,
> 	}
> }
> 
>+static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
>+			       struct virtnet_stats_ctx *ctx,
>+			       const u8 *base, bool from_driver, u8 type)
>+{
>+	struct virtnet_stats_map *m;
>+	const u64_stats_t *v_stat;
>+	u32 queue_type;
>+	const u64 *v;
>+	u64 offset;
>+	int i, j;
>+
>+	if (qid == vi->max_queue_pairs * 2) {
>+		offset = 0;
>+		queue_type = VIRTNET_STATS_Q_TYPE_CQ;
>+	} else if (qid % 2) {
>+		offset = ctx->num_cq + ctx->num_rx * vi->curr_queue_pairs + ctx->num_tx * (qid / 2);
>+		queue_type = VIRTNET_STATS_Q_TYPE_TX;
>+	} else {
>+		offset = ctx->num_cq + ctx->num_rx * (qid / 2);
>+		queue_type = VIRTNET_STATS_Q_TYPE_RX;
>+	}
>+
>+	for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>+		m = &virtio_net_stats_map[i];
>+
>+		if (m->queue_type != queue_type)
>+			continue;
>+
>+		if (from_driver != m->from_driver)
>+			goto skip;
>+
>+		if (type != m->reply_type)
>+			goto skip;
>+
>+		for (j = 0; j < m->num; ++j) {
>+			if (!from_driver) {
>+				v = (const u64 *)(base + m->desc[j].offset);

const le64?


>+				ctx->data[offset + j] = le64_to_cpu(*v);
>+			} else {
>+				v_stat = (const u64_stats_t *)(base + m->desc[j].offset);
>+				ctx->data[offset + j] = u64_stats_read(v_stat);
>+			}
>+		}
>+
>+		break;
>+skip:
>+		if (virtnet_stats_supported(vi, m))
>+			offset += m->num;
>+	}
>+}
>+
> static int virtnet_get_hw_stats(struct virtnet_info *vi,
> 				struct virtnet_stats_ctx *ctx)
> {
> 	struct virtio_net_ctrl_queue_stats *req;
> 	struct virtio_net_stats_reply_hdr *hdr;
> 	struct scatterlist sgs_in, sgs_out;
>-	u32 num_rx, num_tx, num_cq, offset;
> 	int qnum, i, j,  qid, res_size;
>-	struct virtnet_stats_map *m;
> 	void *reply, *p;
>-	u64 bitmap;
> 	int ok;
>-	u64 *v;
> 
> 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> 		return 0;
>@@ -3536,43 +3604,10 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
> 		return ok;
> 	}
> 
>-	num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
>-	num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
>-	num_cq = ctx->num_tx;
>-
> 	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
> 		hdr = p;
>-
> 		qid = le16_to_cpu(hdr->vq_index);
>-
>-		if (qid == vi->max_queue_pairs * 2) {
>-			offset = 0;
>-			bitmap = ctx->bitmap_cq;
>-		} else if (qid % 2) {
>-			offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
>-			offset += VIRTNET_SQ_STATS_LEN;
>-			bitmap = ctx->bitmap_tx;
>-		} else {
>-			offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
>-			bitmap = ctx->bitmap_rx;
>-		}
>-
>-		for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
>-			m = &virtio_net_stats_map[i];
>-
>-			if (m->stat_type & bitmap)
>-				offset += m->num;
>-
>-			if (hdr->type != m->reply_type)
>-				continue;
>-
>-			for (j = 0; j < m->num; ++j) {
>-				v = p + m->desc[j].offset;
>-				ctx->data[offset + j] = le64_to_cpu(*v);
>-			}
>-
>-			break;
>-		}
>+		virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
> 	}
> 
> 	kfree(reply);
>@@ -3582,28 +3617,18 @@ static int virtnet_get_hw_stats(struct virtnet_info *vi,
> static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
> {
> 	struct virtnet_info *vi = netdev_priv(dev);
>-	unsigned int i, j;
>+	unsigned int i;
> 	u8 *p = data;
> 
> 	switch (stringset) {
> 	case ETH_SS_STATS:
>-		virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
>+		virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_CQ, 0, &p);
> 
>-		for (i = 0; i < vi->curr_queue_pairs; i++) {
>-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
>-				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
>-						virtnet_rq_stats_desc[j].desc);
>+		for (i = 0; i < vi->curr_queue_pairs; ++i)
>+			virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
> 
>-			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_RX, i, &p);
>-		}
>-
>-		for (i = 0; i < vi->curr_queue_pairs; i++) {
>-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
>-				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
>-						virtnet_sq_stats_desc[j].desc);
>-
>-			virtnet_get_hw_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
>-		}
>+		for (i = 0; i < vi->curr_queue_pairs; ++i)
>+			virtnet_get_stats_string(vi, VIRTNET_STATS_Q_TYPE_TX, i, &p);
> 		break;
> 	}
> }
>@@ -3636,8 +3661,7 @@ static int virtnet_get_sset_count(struct net_device *dev, int sset)
> 
> 		virtnet_stats_ctx_init(vi, &ctx, NULL);
> 
>-		pair_count = VIRTNET_RQ_STATS_LEN + VIRTNET_SQ_STATS_LEN;
>-		pair_count += ctx.num_rx + ctx.num_tx;
>+		pair_count = ctx.num_rx + ctx.num_tx;
> 
> 		return ctx.num_cq + vi->curr_queue_pairs * pair_count;
> 	default:
>@@ -3650,46 +3674,27 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
> {
> 	struct virtnet_info *vi = netdev_priv(dev);
> 	struct virtnet_stats_ctx ctx = {0};
>-	unsigned int idx, start, i, j;
>+	unsigned int start, i;
> 	const u8 *stats_base;
>-	const u64_stats_t *p;
>-	size_t offset;
> 
> 	virtnet_stats_ctx_init(vi, &ctx, data);
> 	virtnet_get_hw_stats(vi, &ctx);
> 
>-	idx = ctx.num_cq;
>-
> 	for (i = 0; i < vi->curr_queue_pairs; i++) {
> 		struct receive_queue *rq = &vi->rq[i];
>+		struct send_queue *sq = &vi->sq[i];
> 
> 		stats_base = (const u8 *)&rq->stats;
> 		do {
> 			start = u64_stats_fetch_begin(&rq->stats.syncp);
>-			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
>-				offset = virtnet_rq_stats_desc[j].offset;
>-				p = (const u64_stats_t *)(stats_base + offset);
>-				data[idx + j] = u64_stats_read(p);
>-			}
>+			virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
> 		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
>-		idx += VIRTNET_RQ_STATS_LEN;
>-		idx += ctx.num_rx;
>-	}
>-
>-	for (i = 0; i < vi->curr_queue_pairs; i++) {
>-		struct send_queue *sq = &vi->sq[i];
> 
> 		stats_base = (const u8 *)&sq->stats;
> 		do {
> 			start = u64_stats_fetch_begin(&sq->stats.syncp);
>-			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
>-				offset = virtnet_sq_stats_desc[j].offset;
>-				p = (const u64_stats_t *)(stats_base + offset);
>-				data[idx + j] = u64_stats_read(p);
>-			}
>+			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
> 		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
>-		idx += VIRTNET_SQ_STATS_LEN;
>-		idx += ctx.num_tx;
> 	}
> }
> 
>-- 
>2.32.0.3.g01195cf9f
>
>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27 14:54   ` Jakub Kicinski
@ 2024-02-27 15:07     ` Jiri Pirko
  2024-02-27 15:41       ` Jakub Kicinski
  2024-03-07  9:36     ` Xuan Zhuo
  1 sibling, 1 reply; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 15:07 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Xuan Zhuo, netdev, Michael S. Tsirkin, Jason Wang,
	David S. Miller, Eric Dumazet, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 03:54:24PM CET, kuba@kernel.org wrote:
>On Tue, 27 Feb 2024 16:03:02 +0800 Xuan Zhuo wrote:
>> Now, we just show the stats of every queue.
>> 
>> But for the user, the total values of every stat may are valuable.
>
>Please wait for this API to get merged:
>https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
>A lot of the stats you're adding here can go into the new API.

Can. But does that mean that ethtool additions of things like this
will be rejected after that?


>More drivers can report things like number of LSO / GRO packets.
>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout
  2024-02-27  8:03 ` [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
@ 2024-02-27 15:08   ` Jiri Pirko
  0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 15:08 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 09:03:03AM CET, xuanzhuo@linux.alibaba.com wrote:
>Now, we have this:
>
>    tx_queue_0_tx_timeouts
>
>This is used to record the tx schedule timeout.
>But this has two "tx". I think the below is enough.
>
>    tx_queue_0_timeouts
>
>So I rename this field.
>
>Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Reviewed-by: Jiri Pirko <jiri@nvidia.com>

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27 15:07     ` Jiri Pirko
@ 2024-02-27 15:41       ` Jakub Kicinski
  2024-02-27 15:59         ` Jiri Pirko
  0 siblings, 1 reply; 24+ messages in thread
From: Jakub Kicinski @ 2024-02-27 15:41 UTC (permalink / raw)
  To: Jiri Pirko
  Cc: Xuan Zhuo, netdev, Michael S. Tsirkin, Jason Wang,
	David S. Miller, Eric Dumazet, Paolo Abeni, virtualization

On Tue, 27 Feb 2024 16:07:13 +0100 Jiri Pirko wrote:
> >Please wait for this API to get merged:
> >https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
> >A lot of the stats you're adding here can go into the new API.  
> 
> Can. But does that mean that ethtool additions of things like this
> will be rejected after that?

As a general policy, yes, the same way we reject duplicating other
existing stats.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27 15:41       ` Jakub Kicinski
@ 2024-02-27 15:59         ` Jiri Pirko
  0 siblings, 0 replies; 24+ messages in thread
From: Jiri Pirko @ 2024-02-27 15:59 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: Xuan Zhuo, netdev, Michael S. Tsirkin, Jason Wang,
	David S. Miller, Eric Dumazet, Paolo Abeni, virtualization

Tue, Feb 27, 2024 at 04:41:17PM CET, kuba@kernel.org wrote:
>On Tue, 27 Feb 2024 16:07:13 +0100 Jiri Pirko wrote:
>> >Please wait for this API to get merged:
>> >https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
>> >A lot of the stats you're adding here can go into the new API.  
>> 
>> Can. But does that mean that ethtool additions of things like this
>> will be rejected after that?
>
>As a general policy, yes, the same way we reject duplicating other
>existing stats.

Makes sense.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
  2024-02-27 14:56   ` Jiri Pirko
@ 2024-02-27 19:19   ` Simon Horman
  2024-02-29 10:35   ` kernel test robot
  2024-03-07 16:50   ` Jakub Kicinski
  3 siblings, 0 replies; 24+ messages in thread
From: Simon Horman @ 2024-02-27 19:19 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

On Tue, Feb 27, 2024 at 04:03:00PM +0800, Xuan Zhuo wrote:
> As the spec https://github.com/oasis-tcs/virtio-spec/commit/42f389989823039724f95bbbd243291ab0064f82
> 
> make virtio-net support getting the stats from the device by ethtool -S
> <eth0>.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

...

> +static int virtnet_get_hw_stats(struct virtnet_info *vi,
> +				struct virtnet_stats_ctx *ctx)
> +{
> +	struct virtio_net_ctrl_queue_stats *req;
> +	struct virtio_net_stats_reply_hdr *hdr;
> +	struct scatterlist sgs_in, sgs_out;
> +	u32 num_rx, num_tx, num_cq, offset;
> +	int qnum, i, j,  qid, res_size;
> +	struct virtnet_stats_map *m;
> +	void *reply, *p;
> +	u64 bitmap;
> +	int ok;
> +	u64 *v;
> +
> +	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
> +		return 0;
> +
> +	qnum = 0;
> +	if (ctx->bitmap_cq)
> +		qnum += 1;
> +
> +	if (ctx->bitmap_rx)
> +		qnum += vi->curr_queue_pairs;
> +
> +	if (ctx->bitmap_tx)
> +		qnum += vi->curr_queue_pairs;
> +
> +	req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
> +	if (!req)
> +		return -ENOMEM;
> +
> +	res_size = (ctx->size_rx + ctx->size_tx) * vi->curr_queue_pairs + ctx->size_cq;
> +	reply = kmalloc(res_size, GFP_KERNEL);
> +	if (!reply) {
> +		kfree(req);
> +		return -ENOMEM;
> +	}
> +
> +	j = 0;
> +	for (i = 0; i < vi->curr_queue_pairs; ++i) {
> +		if (ctx->bitmap_rx) {
> +			req->stats[j].vq_index = cpu_to_le16(i * 2);
> +			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_rx);
> +			++j;
> +		}
> +
> +		if (ctx->bitmap_tx) {
> +			req->stats[j].vq_index = cpu_to_le16(i * 2 + 1);
> +			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_tx);
> +			++j;
> +		}
> +	}
> +
> +	if (ctx->size_cq) {
> +		req->stats[j].vq_index = cpu_to_le16(vi->max_queue_pairs * 2);
> +		req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_cq);
> +		++j;
> +	}
> +
> +	sg_init_one(&sgs_out, req, sizeof(*req) * j);
> +	sg_init_one(&sgs_in, reply, res_size);
> +
> +	ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
> +				  VIRTIO_NET_CTRL_STATS_GET,
> +				  &sgs_out, &sgs_in);
> +	kfree(req);
> +
> +	if (!ok) {
> +		kfree(reply);
> +		return ok;
> +	}
> +
> +	num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
> +	num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
> +	num_cq = ctx->num_tx;
> +
> +	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
> +		hdr = p;
> +
> +		qid = le16_to_cpu(hdr->vq_index);
> +
> +		if (qid == vi->max_queue_pairs * 2) {
> +			offset = 0;
> +			bitmap = ctx->bitmap_cq;
> +		} else if (qid % 2) {
> +			offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
> +			offset += VIRTNET_SQ_STATS_LEN;
> +			bitmap = ctx->bitmap_tx;
> +		} else {
> +			offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
> +			bitmap = ctx->bitmap_rx;
> +		}
> +
> +		for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
> +			m = &virtio_net_stats_map[i];
> +
> +			if (m->stat_type & bitmap)
> +				offset += m->num;
> +
> +			if (hdr->type != m->reply_type)
> +				continue;
> +
> +			for (j = 0; j < m->num; ++j) {
> +				v = p + m->desc[j].offset;
> +				ctx->data[offset + j] = le64_to_cpu(*v);

Hi Xuan Zhuo,

Sparse complains about the line above because the type of *v is u64,
but le64_to_cpu() expects __le64.

> +			}
> +
> +			break;
> +		}
> +	}
> +
> +	kfree(reply);
> +	return 0;
> +}
> +
>  static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
>  {
>  	struct virtnet_info *vi = netdev_priv(dev);

...

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
  2024-02-27 14:56   ` Jiri Pirko
  2024-02-27 19:19   ` Simon Horman
@ 2024-02-29 10:35   ` kernel test robot
  2024-03-07 16:50   ` Jakub Kicinski
  3 siblings, 0 replies; 24+ messages in thread
From: kernel test robot @ 2024-02-29 10:35 UTC (permalink / raw)
  To: Xuan Zhuo, netdev
  Cc: oe-kbuild-all, Michael S. Tsirkin, Jason Wang, Xuan Zhuo,
	Eric Dumazet, Jakub Kicinski, Paolo Abeni, virtualization

Hi Xuan,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Xuan-Zhuo/virtio_net-introduce-device-stats-feature-and-structures/20240227-161123
base:   net-next/main
patch link:    https://lore.kernel.org/r/20240227080303.63894-4-xuanzhuo%40linux.alibaba.com
patch subject: [PATCH net-next v3 3/6] virtio_net: support device stats
config: x86_64-randconfig-121-20240229 (https://download.01.org/0day-ci/archive/20240229/202402291808.cmzZAiYX-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240229/202402291808.cmzZAiYX-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202402291808.cmzZAiYX-lkp@intel.com/

sparse warnings: (new ones prefixed by >>)
>> drivers/net/virtio_net.c:3571:57: sparse: sparse: cast to restricted __le64

vim +3571 drivers/net/virtio_net.c

  3466	
  3467	static int virtnet_get_hw_stats(struct virtnet_info *vi,
  3468					struct virtnet_stats_ctx *ctx)
  3469	{
  3470		struct virtio_net_ctrl_queue_stats *req;
  3471		struct virtio_net_stats_reply_hdr *hdr;
  3472		struct scatterlist sgs_in, sgs_out;
  3473		u32 num_rx, num_tx, num_cq, offset;
  3474		int qnum, i, j,  qid, res_size;
  3475		struct virtnet_stats_map *m;
  3476		void *reply, *p;
  3477		u64 bitmap;
  3478		int ok;
  3479		u64 *v;
  3480	
  3481		if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
  3482			return 0;
  3483	
  3484		qnum = 0;
  3485		if (ctx->bitmap_cq)
  3486			qnum += 1;
  3487	
  3488		if (ctx->bitmap_rx)
  3489			qnum += vi->curr_queue_pairs;
  3490	
  3491		if (ctx->bitmap_tx)
  3492			qnum += vi->curr_queue_pairs;
  3493	
  3494		req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
  3495		if (!req)
  3496			return -ENOMEM;
  3497	
  3498		res_size = (ctx->size_rx + ctx->size_tx) * vi->curr_queue_pairs + ctx->size_cq;
  3499		reply = kmalloc(res_size, GFP_KERNEL);
  3500		if (!reply) {
  3501			kfree(req);
  3502			return -ENOMEM;
  3503		}
  3504	
  3505		j = 0;
  3506		for (i = 0; i < vi->curr_queue_pairs; ++i) {
  3507			if (ctx->bitmap_rx) {
  3508				req->stats[j].vq_index = cpu_to_le16(i * 2);
  3509				req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_rx);
  3510				++j;
  3511			}
  3512	
  3513			if (ctx->bitmap_tx) {
  3514				req->stats[j].vq_index = cpu_to_le16(i * 2 + 1);
  3515				req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_tx);
  3516				++j;
  3517			}
  3518		}
  3519	
  3520		if (ctx->size_cq) {
  3521			req->stats[j].vq_index = cpu_to_le16(vi->max_queue_pairs * 2);
  3522			req->stats[j].types_bitmap[0] = cpu_to_le64(ctx->bitmap_cq);
  3523			++j;
  3524		}
  3525	
  3526		sg_init_one(&sgs_out, req, sizeof(*req) * j);
  3527		sg_init_one(&sgs_in, reply, res_size);
  3528	
  3529		ok = virtnet_send_command(vi, VIRTIO_NET_CTRL_STATS,
  3530					  VIRTIO_NET_CTRL_STATS_GET,
  3531					  &sgs_out, &sgs_in);
  3532		kfree(req);
  3533	
  3534		if (!ok) {
  3535			kfree(reply);
  3536			return ok;
  3537		}
  3538	
  3539		num_rx = VIRTNET_RQ_STATS_LEN + ctx->num_rx;
  3540		num_tx = VIRTNET_SQ_STATS_LEN + ctx->num_tx;
  3541		num_cq = ctx->num_tx;
  3542	
  3543		for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
  3544			hdr = p;
  3545	
  3546			qid = le16_to_cpu(hdr->vq_index);
  3547	
  3548			if (qid == vi->max_queue_pairs * 2) {
  3549				offset = 0;
  3550				bitmap = ctx->bitmap_cq;
  3551			} else if (qid % 2) {
  3552				offset = num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
  3553				offset += VIRTNET_SQ_STATS_LEN;
  3554				bitmap = ctx->bitmap_tx;
  3555			} else {
  3556				offset = num_cq + num_rx * (qid / 2) + VIRTNET_RQ_STATS_LEN;
  3557				bitmap = ctx->bitmap_rx;
  3558			}
  3559	
  3560			for (i = 0; i < ARRAY_SIZE(virtio_net_stats_map); ++i) {
  3561				m = &virtio_net_stats_map[i];
  3562	
  3563				if (m->stat_type & bitmap)
  3564					offset += m->num;
  3565	
  3566				if (hdr->type != m->reply_type)
  3567					continue;
  3568	
  3569				for (j = 0; j < m->num; ++j) {
  3570					v = p + m->desc[j].offset;
> 3571					ctx->data[offset + j] = le64_to_cpu(*v);
  3572				}
  3573	
  3574				break;
  3575			}
  3576		}
  3577	
  3578		kfree(reply);
  3579		return 0;
  3580	}
  3581	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-02-27 14:54   ` Jakub Kicinski
  2024-02-27 15:07     ` Jiri Pirko
@ 2024-03-07  9:36     ` Xuan Zhuo
  2024-03-07 16:03       ` Jakub Kicinski
  1 sibling, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-03-07  9:36 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization

On Tue, 27 Feb 2024 06:54:24 -0800, Jakub Kicinski <kuba@kernel.org> wrote:
> On Tue, 27 Feb 2024 16:03:02 +0800 Xuan Zhuo wrote:
> > Now, we just show the stats of every queue.
> >
> > But for the user, the total values of every stat may are valuable.
>
> Please wait for this API to get merged:
> https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
> A lot of the stats you're adding here can go into the new API.
> More drivers can report things like number of LSO / GRO packets.


In this patch set, I just see two for tx, three for rx.
And what stats do you want to put into this API?

And on the other side, how should we judge whether a stat is placed in this api
or the interface of ethtool -S?

Thanks.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 5/6] virtio_net: add the total stats field
  2024-03-07  9:36     ` Xuan Zhuo
@ 2024-03-07 16:03       ` Jakub Kicinski
  0 siblings, 0 replies; 24+ messages in thread
From: Jakub Kicinski @ 2024-03-07 16:03 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization

On Thu, 7 Mar 2024 17:36:35 +0800 Xuan Zhuo wrote:
> > Please wait for this API to get merged:
> > https://lore.kernel.org/all/20240226211015.1244807-1-kuba@kernel.org/
> > A lot of the stats you're adding here can go into the new API.
> > More drivers can report things like number of LSO / GRO packets.  
> 
> In this patch set, I just see two for tx, three for rx.
> And what stats do you want to put into this API?
> 
> And on the other side, how should we judge whether a stat is placed in this api
> or the interface of ethtool -S?

A bit of a judgment call indeed, let me reply on patch 3 and we 
can go over them one by one before you invest the time re-coding.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
                     ` (2 preceding siblings ...)
  2024-02-29 10:35   ` kernel test robot
@ 2024-03-07 16:50   ` Jakub Kicinski
  2024-03-11 10:48     ` Xuan Zhuo
  3 siblings, 1 reply; 24+ messages in thread
From: Jakub Kicinski @ 2024-03-07 16:50 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization, Willem de Bruijn,
	Tariq Toukan, Michael Chan, Jesse Brandeburg, Alexander Lobakin,
	Shannon Nelson

CC: Willem and some driver folks for more input, context: extending
https://lore.kernel.org/all/20240306195509.1502746-1-kuba@kernel.org/
to cover virtio stats.

On Tue, 27 Feb 2024 16:03:00 +0800 Xuan Zhuo wrote:
> +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> +	VIRTNET_STATS_DESC(rx, basic, packets),
> +	VIRTNET_STATS_DESC(rx, basic, bytes),

Covered.

> +	VIRTNET_STATS_DESC(rx, basic, notifications),
> +	VIRTNET_STATS_DESC(rx, basic, interrupts),

I haven't seen HW devices count interrupts coming from a specific
queue (there's usually a lot more queues than IRQs these days),
let's keep these in ethtool -S for now, unless someone has a HW use
case.

> +	VIRTNET_STATS_DESC(rx, basic, drops),
> +	VIRTNET_STATS_DESC(rx, basic, drop_overruns),

These are important, but we need to make sure we have a good definition
for vendors to follow...

drops I'd define as "sum of all packets which came into the device, but
never left it, including but not limited to: packets dropped due to
lack of buffer space, processing errors, explicitly set policies and
packet filters." 
Call it hw-rx-drops ?

overruns is a bit harder to precisely define. I was thinking of
something more broad, like: "packets dropped due to transient lack of
resources, such as buffer space, host descriptors etc."

For context why not just go with virtio spec definition of "no
descriptors" - for HW devices, what exact point in the pipeline drops
depends on how back pressure is configured/implemented, and fetching
descriptors is high latency, so differentiating between "PCIe is slow"
and "host didn't post descriptors" is hard in practice.
Call it hw-rx-drop-overruns ?

> +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> +	VIRTNET_STATS_DESC(tx, basic, packets),
> +	VIRTNET_STATS_DESC(tx, basic, bytes),
> +
> +	VIRTNET_STATS_DESC(tx, basic, notifications),
> +	VIRTNET_STATS_DESC(tx, basic, interrupts),
> +
> +	VIRTNET_STATS_DESC(tx, basic, drops),

These 5 same as rx.

> +	VIRTNET_STATS_DESC(tx, basic, drop_malformed),

These I'd call hw-tx-drop-errors, "packets dropped because they were
invalid or malformed"?

> +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> +	VIRTNET_STATS_DESC(rx, csum, csum_valid),

I think in kernel parlance that would translate to CHECKSUM_UNNECESSARY?
So let's call it rx-csum-unnecessary ?
I'd skip the hw- prefix for this one, it doesn't matter to the user if
the HW or SW counted it.

> +	VIRTNET_STATS_DESC(rx, csum, needs_csum),

Hm, I think this is a bit software/virt device specific, presumably
rx-csum-partial for the kernel, up to you whether to make it ethtool -S
or netlink.

> +	VIRTNET_STATS_DESC(rx, csum, csum_none),
> +	VIRTNET_STATS_DESC(rx, csum, csum_bad),

These two make sense as is in netlink, should be fairly commonly
reported by devices. Maybe add a note in "bad" that packets with
bad csum are not discarded, but still delivered to the stack.

> +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> +	VIRTNET_STATS_DESC(tx, csum, needs_csum),
> +	VIRTNET_STATS_DESC(tx, csum, csum_none),

tx- version of what names we pick for rx-, netlink seems appropriate.

> +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> +	VIRTNET_STATS_DESC(rx, gso, gso_packets),
> +	VIRTNET_STATS_DESC(rx, gso, gso_bytes),

I used the term "GSO" in conversations about Rx and it often confuses
people. Let's use "GRO", so hw-gro-packets, and hw-gro-bytes ?
Or maybe coalesce? "hw-rx-coalesce" ? That's quite a bit longer..

Ah, and please mention in the doc that these counters "do not cover LRO
i.e. any coalescing implementation which doesn't follow GRO rules".

> +	VIRTNET_STATS_DESC(rx, gso, gso_packets_coalesced),

hw-gro-wire-packets ?
No strong preference on the naming, but I find that saying -wire
makes it 100% clear to everyone what the meaning is.

> +	VIRTNET_STATS_DESC(rx, gso, gso_bytes_coalesced),

The documentation in the virtio spec seems to be identical 
to the one for gso_packets, which gotta be unintentional?
I'm guessing this is hw-gro-wire-bytes? I.e. headers counted
multiple times?

> +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> +	VIRTNET_STATS_DESC(tx, gso, gso_packets),
> +	VIRTNET_STATS_DESC(tx, gso, gso_bytes),
> +	VIRTNET_STATS_DESC(tx, gso, gso_segments),
> +	VIRTNET_STATS_DESC(tx, gso, gso_segments_bytes),

these 4 make sense as mirror of the Rx

> +	VIRTNET_STATS_DESC(tx, gso, gso_packets_noseg),
> +	VIRTNET_STATS_DESC(tx, gso, gso_bytes_noseg),

Not sure what these are :) unless someone knows what it is and that
HW devices report it, let's keep them in ethtool -S ?

> +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> +	VIRTNET_STATS_DESC(rx, speed, packets_allowance_exceeded),

hw-rx-drop-ratelimits ?
"Allowance exceeded" is a bit of a mouthful to me, perhaps others
disagree. The description from the virtio spec is quite good.

> +	VIRTNET_STATS_DESC(rx, speed, bytes_allowance_exceeded),

No strong preference whether to expose this as a standard stat or
ethtool -S, we don't generally keep byte counters for drops, so
this would be special.

> +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> +	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
> +	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),

same as rx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-03-07 16:50   ` Jakub Kicinski
@ 2024-03-11 10:48     ` Xuan Zhuo
  2024-03-11 15:43       ` Jakub Kicinski
  0 siblings, 1 reply; 24+ messages in thread
From: Xuan Zhuo @ 2024-03-11 10:48 UTC (permalink / raw)
  To: Jakub Kicinski
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization, Willem de Bruijn,
	Tariq Toukan, Michael Chan, Jesse Brandeburg, Alexander Lobakin,
	Shannon Nelson

On Thu, 7 Mar 2024 08:50:21 -0800, Jakub Kicinski <kuba@kernel.org> wrote:
> CC: Willem and some driver folks for more input, context: extending
> https://lore.kernel.org/all/20240306195509.1502746-1-kuba@kernel.org/
> to cover virtio stats.
>
> On Tue, 27 Feb 2024 16:03:00 +0800 Xuan Zhuo wrote:
> > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > +	VIRTNET_STATS_DESC(rx, basic, packets),
> > +	VIRTNET_STATS_DESC(rx, basic, bytes),
>
> Covered.

About "packets" and "bytes", here is coming from the hw device.
Actually the driver also count "packets" and "bytes" in SW.
So there are HW and SW versions. Do we need to distinguish them?

>
> > +	VIRTNET_STATS_DESC(rx, basic, notifications),
> > +	VIRTNET_STATS_DESC(rx, basic, interrupts),
>
> I haven't seen HW devices count interrupts coming from a specific
> queue (there's usually a lot more queues than IRQs these days),
> let's keep these in ethtool -S for now, unless someone has a HW use
> case.

OK.

>
> > +	VIRTNET_STATS_DESC(rx, basic, drops),
> > +	VIRTNET_STATS_DESC(rx, basic, drop_overruns),
>
> These are important, but we need to make sure we have a good definition
> for vendors to follow...
>
> drops I'd define as "sum of all packets which came into the device, but
> never left it, including but not limited to: packets dropped due to
> lack of buffer space, processing errors, explicitly set policies and
> packet filters."
> Call it hw-rx-drops ?

I agree.

>
> overruns is a bit harder to precisely define. I was thinking of
> something more broad, like: "packets dropped due to transient lack of
> resources, such as buffer space, host descriptors etc."
>
> For context why not just go with virtio spec definition of "no
> descriptors" - for HW devices, what exact point in the pipeline drops
> depends on how back pressure is configured/implemented, and fetching
> descriptors is high latency, so differentiating between "PCIe is slow"
> and "host didn't post descriptors" is hard in practice.
> Call it hw-rx-drop-overruns ?

OK.

>
> > +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
> > +	VIRTNET_STATS_DESC(tx, basic, packets),
> > +	VIRTNET_STATS_DESC(tx, basic, bytes),
> > +
> > +	VIRTNET_STATS_DESC(tx, basic, notifications),
> > +	VIRTNET_STATS_DESC(tx, basic, interrupts),
> > +
> > +	VIRTNET_STATS_DESC(tx, basic, drops),
>
> These 5 same as rx.
>
> > +	VIRTNET_STATS_DESC(tx, basic, drop_malformed),
>
> These I'd call hw-tx-drop-errors, "packets dropped because they were
> invalid or malformed"?

OK.

>
> > +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
> > +	VIRTNET_STATS_DESC(rx, csum, csum_valid),
>
> I think in kernel parlance that would translate to CHECKSUM_UNNECESSARY?
> So let's call it rx-csum-unnecessary ?
> I'd skip the hw- prefix for this one, it doesn't matter to the user if
> the HW or SW counted it.

OK.

>
> > +	VIRTNET_STATS_DESC(rx, csum, needs_csum),
>
> Hm, I think this is a bit software/virt device specific, presumably
> rx-csum-partial for the kernel, up to you whether to make it ethtool -S
> or netlink.

YES. This is specific for virt device.
I will make it ethtool -S. So somebody has other advice.

>
> > +	VIRTNET_STATS_DESC(rx, csum, csum_none),
> > +	VIRTNET_STATS_DESC(rx, csum, csum_bad),
>
> These two make sense as is in netlink, should be fairly commonly
> reported by devices. Maybe add a note in "bad" that packets with
> bad csum are not discarded, but still delivered to the stack.

OK.


>
> > +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc[] = {
> > +	VIRTNET_STATS_DESC(tx, csum, needs_csum),
> > +	VIRTNET_STATS_DESC(tx, csum, csum_none),
>
> tx- version of what names we pick for rx-, netlink seems appropriate.
>
> > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > +	VIRTNET_STATS_DESC(rx, gso, gso_packets),
> > +	VIRTNET_STATS_DESC(rx, gso, gso_bytes),
>
> I used the term "GSO" in conversations about Rx and it often confuses
> people. Let's use "GRO", so hw-gro-packets, and hw-gro-bytes ?
> Or maybe coalesce? "hw-rx-coalesce" ? That's quite a bit longer..

GRO may also confuse people.

I like hw-rx-coalesce-packets, hw-rx-coalesce-bytes.

>
> Ah, and please mention in the doc that these counters "do not cover LRO
> i.e. any coalescing implementation which doesn't follow GRO rules".

OK.

>
> > +	VIRTNET_STATS_DESC(rx, gso, gso_packets_coalesced),
>
> hw-gro-wire-packets ?
> No strong preference on the naming, but I find that saying -wire
> makes it 100% clear to everyone what the meaning is.

ok.


>
> > +	VIRTNET_STATS_DESC(rx, gso, gso_bytes_coalesced),
>
> The documentation in the virtio spec seems to be identical
> to the one for gso_packets, which gotta be unintentional?

One for num, one for bytes.


> I'm guessing this is hw-gro-wire-bytes? I.e. headers counted
> multiple times?

This is used to count the bytes of the small packets before coalescing.

> > +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
> > +	VIRTNET_STATS_DESC(tx, gso, gso_packets),
> > +	VIRTNET_STATS_DESC(tx, gso, gso_bytes),
> > +	VIRTNET_STATS_DESC(tx, gso, gso_segments),
> > +	VIRTNET_STATS_DESC(tx, gso, gso_segments_bytes),
>
> these 4 make sense as mirror of the Rx
>
> > +	VIRTNET_STATS_DESC(tx, gso, gso_packets_noseg),
> > +	VIRTNET_STATS_DESC(tx, gso, gso_bytes_noseg),
>
> Not sure what these are :) unless someone knows what it is and that
> HW devices report it, let's keep them in ethtool -S ?

Just for the virtio. Let's keep them in ethtool -S.

>
> > +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
> > +	VIRTNET_STATS_DESC(rx, speed, packets_allowance_exceeded),
>
> hw-rx-drop-ratelimits ?
> "Allowance exceeded" is a bit of a mouthful to me, perhaps others
> disagree. The description from the virtio spec is quite good.

OK.

>
> > +	VIRTNET_STATS_DESC(rx, speed, bytes_allowance_exceeded),
>
> No strong preference whether to expose this as a standard stat or
> ethtool -S, we don't generally keep byte counters for drops, so
> this would be special.

OK.
>
> > +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
> > +	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
> > +	VIRTNET_STATS_DESC(tx, speed, packets_allowance_exceeded),
>
> same as rx


Thanks.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH net-next v3 3/6] virtio_net: support device stats
  2024-03-11 10:48     ` Xuan Zhuo
@ 2024-03-11 15:43       ` Jakub Kicinski
  0 siblings, 0 replies; 24+ messages in thread
From: Jakub Kicinski @ 2024-03-11 15:43 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: netdev, Michael S. Tsirkin, Jason Wang, David S. Miller,
	Eric Dumazet, Paolo Abeni, virtualization, Willem de Bruijn,
	Tariq Toukan, Michael Chan, Jesse Brandeburg, Alexander Lobakin,
	Shannon Nelson

On Mon, 11 Mar 2024 18:48:45 +0800 Xuan Zhuo wrote:
> On Thu, 7 Mar 2024 08:50:21 -0800, Jakub Kicinski <kuba@kernel.org> wrote:
> > CC: Willem and some driver folks for more input, context: extending
> > https://lore.kernel.org/all/20240306195509.1502746-1-kuba@kernel.org/
> > to cover virtio stats.
> >
> > On Tue, 27 Feb 2024 16:03:00 +0800 Xuan Zhuo wrote:  
> > > +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
> > > +	VIRTNET_STATS_DESC(rx, basic, packets),
> > > +	VIRTNET_STATS_DESC(rx, basic, bytes),  
> >
> > Covered.  
> 
> About "packets" and "bytes", here is coming from the hw device.
> Actually the driver also count "packets" and "bytes" in SW.
> So there are HW and SW versions. Do we need to distinguish them?

Yup, there are already separate counters defined for SW 
and HW packets / bytes. For the feature specific counters
I don't think we need to have both SW and HW flavors defined.
But for pure rx / tx packets / bytes users may want to see both.

> > > +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc[] = {
> > > +	VIRTNET_STATS_DESC(rx, gso, gso_packets),
> > > +	VIRTNET_STATS_DESC(rx, gso, gso_bytes),  
> >
> > I used the term "GSO" in conversations about Rx and it often confuses
> > people. Let's use "GRO", so hw-gro-packets, and hw-gro-bytes ?
> > Or maybe coalesce? "hw-rx-coalesce" ? That's quite a bit longer..  
> 
> GRO may also confuse people.
> 
> I like hw-rx-coalesce-packets, hw-rx-coalesce-bytes.

FWIW the HW offload feature in ethtool -k is called 'rx-gro-hw',
but we can use "hw-rx-coalesce-*" and mention the feature in the
documentation.

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2024-03-11 15:43 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-27  8:02 [PATCH net-next v3 0/6] virtio-net: support device stats Xuan Zhuo
2024-02-27  8:02 ` [PATCH net-next v3 1/6] virtio_net: introduce device stats feature and structures Xuan Zhuo
2024-02-27 13:16   ` Jiri Pirko
2024-02-27  8:02 ` [PATCH net-next v3 2/6] virtio_net: virtnet_send_command supports command-specific-result Xuan Zhuo
2024-02-27 13:23   ` Jiri Pirko
2024-02-27  8:03 ` [PATCH net-next v3 3/6] virtio_net: support device stats Xuan Zhuo
2024-02-27 14:56   ` Jiri Pirko
2024-02-27 19:19   ` Simon Horman
2024-02-29 10:35   ` kernel test robot
2024-03-07 16:50   ` Jakub Kicinski
2024-03-11 10:48     ` Xuan Zhuo
2024-03-11 15:43       ` Jakub Kicinski
2024-02-27  8:03 ` [PATCH net-next v3 4/6] virtio_net: stats map include driver stats Xuan Zhuo
2024-02-27 15:05   ` Jiri Pirko
2024-02-27  8:03 ` [PATCH net-next v3 5/6] virtio_net: add the total stats field Xuan Zhuo
2024-02-27 14:54   ` Jakub Kicinski
2024-02-27 15:07     ` Jiri Pirko
2024-02-27 15:41       ` Jakub Kicinski
2024-02-27 15:59         ` Jiri Pirko
2024-03-07  9:36     ` Xuan Zhuo
2024-03-07 16:03       ` Jakub Kicinski
2024-02-27  8:03 ` [PATCH net-next v3 6/6] virtio_net: rename stat tx_timeout to timeout Xuan Zhuo
2024-02-27 15:08   ` Jiri Pirko
2024-02-27 13:14 ` [PATCH net-next v3 0/6] virtio-net: support device stats Jiri Pirko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).