All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vhost: add pmd xstats
@ 2016-08-19 12:16 Zhiyong Yang
  2016-08-22  7:52 ` Panu Matilainen
  2016-09-09  8:15 ` [PATCH v2] net/vhost: " Zhiyong Yang
  0 siblings, 2 replies; 57+ messages in thread
From: Zhiyong Yang @ 2016-08-19 12:16 UTC (permalink / raw)
  To: dev; +Cc: Zhiyong Yang

This feature adds vhost pmd extended statistics from per queue perspective
for the application such as OVS etc.

The statistics counters are based on RFC 2819 and 2863 as follows:

rx/tx_good_packets
rx/tx_total_bytes
rx/tx_dropped_pkts
rx/tx_broadcast_packets
rx/tx_multicast_packets
rx/tx_ucast_packets
rx/tx_undersize_errors
rx/tx_size_64_packets
rx/tx_size_65_to_127_packets;
rx/tx_size_128_to_255_packets;
rx/tx_size_256_to_511_packets;
rx/tx_size_512_to_1023_packets;
rx/tx_size_1024_to_1522_packets;
rx/tx_1523_to_max_packets;
rx/tx_errors
rx_fragmented_errors
rx_jabber_errors
rx_unknown_protos_packets;

No API is changed or added.
rte_eth_xstats_get_names() to retrieve what kinds of vhost xstats are
supported,
rte_eth_xstats_get() to retrieve vhost extended statistics,
rte_eth_xstats_reset() to reset vhost extended statistics. 

Since collecting data of vhost_update_packet_xstats will have some effect
on RX/TX performance, so, Setting compiling switch
CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n by default in the file
config/common_base, if needing xstats data, you can enable it(y).

The usage of vhost pmd is the same as virtio pmd xstats.
for example, when test-pmd application is running in interactive mode
vhost pmd xstats will support the two following commands:

show port xstats all|port_id will show vhost xstats
clear port xstats all|port_id will reset vhost xstats

Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
---
 config/common_base                |   1 +
 drivers/net/vhost/rte_eth_vhost.c | 295 +++++++++++++++++++++++++++++++++++++-
 2 files changed, 295 insertions(+), 1 deletion(-)

diff --git a/config/common_base b/config/common_base
index 7830535..57fcb3f 100644
--- a/config/common_base
+++ b/config/common_base
@@ -561,6 +561,7 @@ CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
 # To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.
 #
 CONFIG_RTE_LIBRTE_PMD_VHOST=n
+CONFIG_RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS=n
 
 #
 #Compile Xen domain0 support
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd4..20b77ca 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -45,6 +45,9 @@
 #include <rte_kvargs.h>
 #include <rte_virtio_net.h>
 #include <rte_spinlock.h>
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+#include <rte_common.h>
+#endif
 
 #include "rte_eth_vhost.h"
 
@@ -72,6 +75,12 @@ static struct ether_addr base_eth_addr = {
 	}
 };
 
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+struct vhost_xstats {
+	uint64_t stat[16];
+};
+#endif
+
 struct vhost_queue {
 	int vid;
 	rte_atomic32_t allow_queuing;
@@ -85,7 +94,10 @@ struct vhost_queue {
 	uint64_t missed_pkts;
 	uint64_t rx_bytes;
 	uint64_t tx_bytes;
-};
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+	struct vhost_xstats xstats;
+#endif
+	};
 
 struct pmd_internal {
 	char *dev_name;
@@ -127,6 +139,274 @@ struct rte_vhost_vring_state {
 
 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
 
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+enum rte_vhostqueue_rxtx {
+	RTE_VHOSTQUEUE_RX = 0,
+	RTE_VHOSTQUEUE_TX = 1
+};
+
+#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64
+
+struct rte_vhost_xstats_name_off {
+	char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE];
+	uint64_t offset;
+};
+
+/* [rt]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_rxq_stat_strings[] = {
+	{"good_packets",
+			offsetof(struct vhost_queue, rx_pkts)},
+	{"total_bytes",
+			offsetof(struct vhost_queue, rx_bytes)},
+	{"dropped_pkts",
+			offsetof(struct vhost_queue, missed_pkts)},
+	{"broadcast_packets",
+			offsetof(struct vhost_queue, xstats.stat[8])},
+	{"multicast_packets",
+			offsetof(struct vhost_queue, xstats.stat[9])},
+	{"ucast_packets",
+			offsetof(struct vhost_queue, xstats.stat[10])},
+	{"undersize_errors",
+			offsetof(struct vhost_queue, xstats.stat[0])},
+	{"size_64_packets",
+			offsetof(struct vhost_queue, xstats.stat[1])},
+	{"size_65_to_127_packets",
+			offsetof(struct vhost_queue, xstats.stat[2])},
+	{"size_128_to_255_packets",
+			offsetof(struct vhost_queue, xstats.stat[3])},
+	{"size_256_to_511_packets",
+			offsetof(struct vhost_queue, xstats.stat[4])},
+	{"size_512_to_1023_packets",
+			offsetof(struct vhost_queue, xstats.stat[5])},
+	{"size_1024_to_1522_packets",
+			offsetof(struct vhost_queue, xstats.stat[6])},
+	{"size_1523_to_max_packets",
+			offsetof(struct vhost_queue, xstats.stat[7])},
+	{"errors",
+			offsetof(struct vhost_queue, xstats.stat[11])},
+	{"fragmented_errors",
+			offsetof(struct vhost_queue, xstats.stat[12])},
+	{"jabber_errors",
+			offsetof(struct vhost_queue, xstats.stat[13])},
+	{"unknown_protos_packets",
+			offsetof(struct vhost_queue, xstats.stat[14])},
+};
+
+/* [tx]_qX_ is prepended to the name string here */
+static const struct rte_vhost_xstats_name_off rte_vhost_txq_stat_strings[] = {
+	{"good_packets",
+			offsetof(struct vhost_queue, tx_pkts)},
+	{"total_bytes",
+			offsetof(struct vhost_queue, tx_bytes)},
+	{"dropped_pkts",
+			offsetof(struct vhost_queue, missed_pkts)},
+	{"broadcast_packets",
+			offsetof(struct vhost_queue, xstats.stat[8])},
+	{"multicast_packets",
+			offsetof(struct vhost_queue, xstats.stat[9])},
+	{"ucast_packets",
+			offsetof(struct vhost_queue, xstats.stat[10])},
+	{"size_64_packets",
+			offsetof(struct vhost_queue, xstats.stat[1])},
+	{"size_65_to_127_packets",
+			offsetof(struct vhost_queue, xstats.stat[2])},
+	{"size_128_to_255_packets",
+			offsetof(struct vhost_queue, xstats.stat[3])},
+	{"size_256_to_511_packets",
+			offsetof(struct vhost_queue, xstats.stat[4])},
+	{"size_512_to_1023_packets",
+			offsetof(struct vhost_queue, xstats.stat[5])},
+	{"size_1024_to_1522_packets",
+			offsetof(struct vhost_queue, xstats.stat[6])},
+	{"size_1523_to_max_packets",
+			offsetof(struct vhost_queue, xstats.stat[7])},
+	{"errors",
+			offsetof(struct vhost_queue, xstats.stat[11])},
+};
+
+#define VHOST_NB_RXQ_XSTATS (sizeof(rte_vhost_rxq_stat_strings) / \
+			     sizeof(rte_vhost_rxq_stat_strings[0]))
+
+#define VHOST_NB_TXQ_XSTATS (sizeof(rte_vhost_txq_stat_strings) / \
+			     sizeof(rte_vhost_txq_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct vhost_queue *vqrx = NULL;
+	struct vhost_queue *vqtx = NULL;
+	unsigned int i = 0;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		if (!dev->data->rx_queues[i])
+			continue;
+		vqrx = (struct vhost_queue *)dev->data->rx_queues[i];
+		vqrx->rx_pkts = 0;
+		vqrx->rx_bytes = 0;
+		memset(&vqrx->xstats, 0, sizeof(vqrx->xstats));
+	}
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		if (!dev->data->tx_queues[i])
+			continue;
+		vqtx = (struct vhost_queue *)dev->data->tx_queues[i];
+		vqtx->tx_pkts = 0;
+		vqtx->tx_bytes = 0;
+		vqtx->missed_pkts = 0;
+		memset(&vqtx->xstats, 0, sizeof(vqtx->xstats));
+	}
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev,
+			   struct rte_eth_xstat_name *xstats_names,
+			   __rte_unused unsigned int limit)
+{
+	unsigned int i = 0;
+	unsigned int t = 0;
+	int count = 0;
+	int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+			+ dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+	if (xstats_names) {
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			struct vhost_queue *rxvq = dev->data->rx_queues[i];
+
+			if (!rxvq)
+				continue;
+			for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+				snprintf(xstats_names[count].name,
+					 sizeof(xstats_names[count].name),
+					 "rx_q%u_%s", i,
+					 rte_vhost_rxq_stat_strings[t].name);
+				count++;
+			}
+		}
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			struct vhost_queue *txvq = dev->data->tx_queues[i];
+
+			if (!txvq)
+				continue;
+			for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+				snprintf(xstats_names[count].name,
+					 sizeof(xstats_names[count].name),
+					 "tx_q%u_%s", i,
+					 rte_vhost_txq_stat_strings[t].name);
+				count++;
+			}
+		}
+		return count;
+	}
+	return nstats;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+		     unsigned int n)
+{
+	unsigned int i;
+	unsigned int t;
+	unsigned int count = 0;
+
+	unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS
+				+ dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS;
+
+	if (n < nxstats)
+		return nxstats;
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		struct vhost_queue *rxvq =
+			(struct vhost_queue *)dev->data->rx_queues[i];
+
+		if (!rxvq)
+			continue;
+
+		for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) {
+			xstats[count].value = *(uint64_t *)(((char *)rxvq)
+				+ rte_vhost_rxq_stat_strings[t].offset);
+			count++;
+		}
+	}
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		struct vhost_queue *txvq =
+			(struct vhost_queue *)dev->data->tx_queues[i];
+
+		if (!txvq)
+			continue;
+
+		for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) {
+			xstats[count].value = *(uint64_t *)(((char *)txvq)
+				+ rte_vhost_txq_stat_strings[t].offset);
+			count++;
+		}
+	}
+
+	return count;
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+			   struct rte_mbuf **bufs,
+			   uint16_t nb_rxtx,
+			   uint16_t nb_bufs,
+			   enum rte_vhostqueue_rxtx vqueue_rxtx)
+{
+	uint32_t pkt_len = 0;
+	uint64_t i = 0;
+	uint64_t index;
+	struct ether_addr *ea = NULL;
+	struct vhost_xstats *xstats_update = &vq->xstats;
+
+	for (i = 0; i < nb_rxtx ; i++) {
+		pkt_len = bufs[i]->pkt_len;
+		if (pkt_len == 64) {
+			xstats_update->stat[1]++;
+
+		} else if (pkt_len > 64 && pkt_len < 1024) {
+			index = (sizeof(pkt_len) * 8)
+				- __builtin_clz(pkt_len) - 5;
+			xstats_update->stat[index]++;
+		} else {
+			if (pkt_len < 64)
+				xstats_update->stat[0]++;
+			else if (pkt_len <= 1522)
+				xstats_update->stat[6]++;
+			else if (pkt_len > 1522)
+				xstats_update->stat[7]++;
+		}
+
+		ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *);
+		if (is_multicast_ether_addr(ea)) {
+			if (is_broadcast_ether_addr(ea))
+				/* broadcast++; */
+				xstats_update->stat[8]++;
+			else
+				/* multicast++; */
+				xstats_update->stat[9]++;
+		}
+	}
+	/* non-multi/broadcast, multi/broadcast, including those
+	 * that were discarded or not sent. from rfc2863
+	 */
+	if (vqueue_rxtx == RTE_VHOSTQUEUE_RX) {
+		xstats_update->stat[10] =  vq->rx_pkts + vq->missed_pkts
+					   - (xstats_update->stat[8]
+					   + xstats_update->stat[9]);
+	} else {
+		for (i = nb_rxtx; i < nb_bufs ; i++) {
+			if (is_multicast_ether_addr(ea)) {
+				if (is_broadcast_ether_addr(ea))
+					xstats_update->stat[8]++;
+				else
+					xstats_update->stat[9]++;
+			}
+		}
+		xstats_update->stat[10] =  vq->tx_pkts + vq->missed_pkts
+			- (xstats_update->stat[8] + xstats_update->stat[9]);
+	}
+}
+#endif
+
 static uint16_t
 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
@@ -152,6 +432,10 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		r->rx_bytes += bufs[i]->pkt_len;
 	}
 
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_rx, RTE_VHOSTQUEUE_RX);
+#endif
+
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
 
@@ -182,6 +466,10 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	for (i = 0; likely(i < nb_tx); i++)
 		r->tx_bytes += bufs[i]->pkt_len;
 
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bufs, RTE_VHOSTQUEUE_TX);
+#endif
+
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
 out:
@@ -682,6 +970,11 @@ static const struct eth_dev_ops ops = {
 	.link_update = eth_link_update,
 	.stats_get = eth_stats_get,
 	.stats_reset = eth_stats_reset,
+#ifdef RTE_LIBRTE_PMD_VHOST_UPDATE_XSTATS
+	.xstats_reset = vhost_dev_xstats_reset,
+	.xstats_get = vhost_dev_xstats_get,
+	.xstats_get_names = vhost_dev_xstats_get_names,
+#endif
 };
 
 static int
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 57+ messages in thread

end of thread, other threads:[~2016-09-29 13:50 UTC | newest]

Thread overview: 57+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-19 12:16 [PATCH] vhost: add pmd xstats Zhiyong Yang
2016-08-22  7:52 ` Panu Matilainen
2016-08-23  8:04   ` Yang, Zhiyong
2016-08-23  9:45     ` Panu Matilainen
2016-08-24  5:46       ` Yuanhan Liu
2016-08-24  8:44         ` Thomas Monjalon
2016-08-24 12:37           ` Panu Matilainen
2016-08-25  9:21             ` Yang, Zhiyong
2016-08-30  2:45               ` Yao, Lei A
2016-08-30  3:03                 ` Xu, Qian Q
2016-08-30  3:21                   ` Yao, Lei A
2016-08-31  7:18                     ` Yang, Zhiyong
2016-09-01  6:37                       ` Yang, Zhiyong
2016-09-09  8:15 ` [PATCH v2] net/vhost: " Zhiyong Yang
2016-09-09  8:40   ` Van Haaren, Harry
2016-09-09  8:54     ` Yang, Zhiyong
2016-09-14  6:20   ` Yuanhan Liu
2016-09-14  7:43     ` Yang, Zhiyong
2016-09-18 13:16       ` Yuanhan Liu
2016-09-19  2:48         ` Yang, Zhiyong
2016-09-14  8:30     ` Yang, Zhiyong
2016-09-20  9:36   ` [PATCH v3 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-20  9:36     ` [PATCH v3 1/2] net/vhost: add a new stats struct Zhiyong Yang
2016-09-20 10:44       ` Yuanhan Liu
2016-09-21  5:12         ` Yang, Zhiyong
2016-09-21 10:05       ` [PATCH v4 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-21 10:05         ` Zhiyong Yang
2016-09-21 10:05         ` [PATCH v4 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-21 10:57           ` Yuanhan Liu
2016-09-22  1:42             ` Yang, Zhiyong
2016-09-22  2:09               ` Yuanhan Liu
2016-09-21 10:13       ` [PATCH v4 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-22  8:19         ` [PATCH v5 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-22  8:19           ` [PATCH v5 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28  8:33             ` [PATCH v6 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28  8:33               ` [PATCH v6 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-28 13:26                 ` [PATCH v7 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-28 13:26                   ` [PATCH v7 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29  1:55                     ` Yuanhan Liu
2016-09-29 12:35                     ` [PATCH v8 0/2] net/vhost: add pmd xstats support Zhiyong Yang
2016-09-29 12:35                       ` [PATCH v8 1/2] net/vhost: add a new defined stats struct Zhiyong Yang
2016-09-29 12:35                       ` [PATCH v8 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-29 13:04                         ` Yuanhan Liu
2016-09-29 13:50                           ` Yang, Zhiyong
2016-09-28 13:26                   ` [PATCH v7 " Zhiyong Yang
2016-09-29  8:48                     ` Loftus, Ciara
2016-09-29 12:02                       ` Yuanhan Liu
2016-09-29 12:22                         ` Yang, Zhiyong
2016-09-28  8:33               ` [PATCH v6 " Zhiyong Yang
2016-09-22  8:19           ` [PATCH v5 " Zhiyong Yang
2016-09-23  3:56           ` [PATCH v5 0/2] net/vhost: add pmd xstats support Yuanhan Liu
2016-09-28  2:35             ` Yuanhan Liu
2016-09-20  9:36     ` [PATCH v3 2/2] net/vhost: add pmd xstats Zhiyong Yang
2016-09-20 10:56       ` Yuanhan Liu
2016-09-21  7:22         ` Yang, Zhiyong
2016-09-20 11:50       ` Yuanhan Liu
2016-09-21  6:15         ` Yang, Zhiyong

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.