All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gaoxiang Liu <gaoxiangliu0@163.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, liugaoxiang@huawei.com,
	Gaoxiang Liu <gaoxiangliu0@163.com>
Subject: [dpdk-dev] [PATCH v2] net/vhost: merge vhost stats loop in vhost Tx/Rx
Date: Mon, 27 Sep 2021 09:30:22 +0800	[thread overview]
Message-ID: <20210927013022.131-1-gaoxiangliu0@163.com> (raw)
In-Reply-To: <20210926125623.833-1-gaoxiangliu0@163.com>

To improve performance in vhost Tx/Rx, merge vhost stats loop.
eth_vhost_tx has 2 loop of send num iteraion.
It can be merge into one.
eth_vhost_rx has the same issue as Tx.

Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")

Signed-off-by: Gaoxiang Liu <gaoxiangliu0@163.com>
---

v2:
 * Fix coding style issues.
---
 drivers/net/vhost/rte_eth_vhost.c | 62 ++++++++++++++-----------------
 1 file changed, 28 insertions(+), 34 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index a202931e9a..1d554caf9e 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -336,38 +336,29 @@ vhost_count_xcast_packets(struct vhost_queue *vq,
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
-			   uint16_t count, uint64_t nb_bytes,
-			   uint64_t nb_missed)
+vhost_update_single_packet_xstats(struct vhost_queue *vq, struct rte_mbuf *buf)
 {
 	uint32_t pkt_len = 0;
-	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
-	pstats->xstats[VHOST_BYTE] += nb_bytes;
-	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
-	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
-
-	for (i = 0; i < count ; i++) {
-		pstats->xstats[VHOST_PKT]++;
-		pkt_len = bufs[i]->pkt_len;
-		if (pkt_len == 64) {
-			pstats->xstats[VHOST_64_PKT]++;
-		} else if (pkt_len > 64 && pkt_len < 1024) {
-			index = (sizeof(pkt_len) * 8)
-				- __builtin_clz(pkt_len) - 5;
-			pstats->xstats[index]++;
-		} else {
-			if (pkt_len < 64)
-				pstats->xstats[VHOST_UNDERSIZE_PKT]++;
-			else if (pkt_len <= 1522)
-				pstats->xstats[VHOST_1024_TO_1522_PKT]++;
-			else if (pkt_len > 1522)
-				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
-		}
-		vhost_count_xcast_packets(vq, bufs[i]);
+	pstats->xstats[VHOST_PKT]++;
+	pkt_len = buf->pkt_len;
+	if (pkt_len == 64) {
+		pstats->xstats[VHOST_64_PKT]++;
+	} else if (pkt_len > 64 && pkt_len < 1024) {
+		index = (sizeof(pkt_len) * 8)
+			- __builtin_clz(pkt_len) - 5;
+		pstats->xstats[index]++;
+	} else {
+		if (pkt_len < 64)
+			pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+		else if (pkt_len <= 1522)
+			pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+		else if (pkt_len > 1522)
+			pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
 	}
+	vhost_count_xcast_packets(vq, buf);
 }
 
 static uint16_t
@@ -376,7 +367,6 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
-	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -411,11 +401,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		nb_bytes += bufs[i]->pkt_len;
-	}
+		r->stats.bytes += bufs[i]->pkt_len;
+		r->stats->xstats[VHOST_BYTE] += bufs[i]->pkt_len;
 
-	r->stats.bytes += nb_bytes;
-	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
+		vhost_update_single_packet_xstats(r, bufs);
+	}
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -471,16 +461,20 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 			break;
 	}
 
-	for (i = 0; likely(i < nb_tx); i++)
+	for (i = 0; likely(i < nb_tx); i++) {
 		nb_bytes += bufs[i]->pkt_len;
+		vhost_update_single_packet_xstats(r, bufs);
+	}
 
 	nb_missed = nb_bufs - nb_tx;
 
 	r->stats.pkts += nb_tx;
 	r->stats.bytes += nb_bytes;
-	r->stats.missed_pkts += nb_bufs - nb_tx;
+	r->stats.missed_pkts += nb_missed;
 
-	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
+	r->stats->xstats[VHOST_BYTE] += nb_bytes;
+	r->xstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	r->xstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
 
 	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
 	 * ifHCOutBroadcastPkts counters are increased when packets are not
-- 
2.32.0



  reply	other threads:[~2021-09-27  1:30 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-26 12:56 [dpdk-dev] [PATCH] net/vhost: merge vhost stats loop in vhost Tx/Rx Gaoxiang Liu
2021-09-27  1:30 ` Gaoxiang Liu [this message]
2021-09-28  1:43   ` [dpdk-dev] [PATCH v2] " Gaoxiang Liu
2021-10-06 10:42     ` Gaoxiang Liu
2021-10-15 12:16     ` Maxime Coquelin
2021-10-16  8:59       ` Gaoxiang Liu
2021-10-17 23:19     ` [dpdk-dev] [PATCH v3] " Gaoxiang Liu
2021-10-21 10:04       ` Maxime Coquelin
2021-10-21 12:34       ` Maxime Coquelin
2021-10-21 18:56       ` Ferruh Yigit
2021-10-22  1:35         ` Gaoxiang Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210927013022.131-1-gaoxiangliu0@163.com \
    --to=gaoxiangliu0@163.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=liugaoxiang@huawei.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.