All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: David Christensen <drc@linux.vnet.ibm.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	 "maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"Wang, Zhihong" <zhihong.wang@intel.com>
Cc: "stable@dpdk.org" <stable@dpdk.org>,
	"Yang, Zhiyong" <zhiyong.yang@intel.com>
Subject: Re: [dpdk-dev] [PATCH v2] net/vhost: fix xstats wrong after clearing stats
Date: Fri, 9 Oct 2020 03:13:37 +0000	[thread overview]
Message-ID: <MN2PR11MB40635CBAF4F5DAD9040A2B7E9C080@MN2PR11MB4063.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20201006212316.409587-1-drc@linux.vnet.ibm.com>

Hi David,

> -----Original Message-----
> From: David Christensen <drc@linux.vnet.ibm.com>
> Sent: Wednesday, October 7, 2020 5:23 AM
> To: dev@dpdk.org; maxime.coquelin@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; Wang, Zhihong <zhihong.wang@intel.com>
> Cc: stable@dpdk.org; David Christensen <drc@linux.vnet.ibm.com>; Yang,
> Zhiyong <zhiyong.yang@intel.com>
> Subject: [PATCH v2] net/vhost: fix xstats wrong after clearing stats
> 
> The PMD API allows stats and xstats values to be cleared separately.
> This is a problem for the vhost PMD since some of the xstats values are
> derived from existing stats values.  For example:
> 
> testpmd> show port xstats all
> ...
> tx_unicast_packets: 17562959
> ...
> testpmd> clear port stats all
> ...
> show port xstats all
> ...
> tx_unicast_packets: 18446744073709551615
> ...
> 
> Modify the driver so that stats and xstats values are stored, updated,
> and cleared separately.
> 
> Fixes: 4d6cf2ac93dc ("net/vhost: add extended statistics")
> Cc: zhiyong.yang@intel.com

Better to replace this Cc with 'Cc: stable@dpdk.org' as other fix patches
do. You can cc zhiyong with git send-email command.

> 
> Signed-off-by: David Christensen <drc@linux.vnet.ibm.com>
> ---
> v2:
> * Removed newly unused vq loops
> * Added "fixes" message
> * Renamed vhost_count_multicast_broadcast to vhost_count_xcast_packets
> 
>  drivers/net/vhost/rte_eth_vhost.c | 70 +++++++++++++++----------------
>  1 file changed, 35 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c
> b/drivers/net/vhost/rte_eth_vhost.c
> index e55278af6..163cf9409 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>  	VHOST_BROADCAST_PKT,
>  	VHOST_MULTICAST_PKT,
>  	VHOST_UNICAST_PKT,
> +	VHOST_PKT,
> +	VHOST_BYTE,
> +	VHOST_MISSED_PKT,
>  	VHOST_ERRORS_PKT,
>  	VHOST_ERRORS_FRAGMENTED,
>  	VHOST_ERRORS_JABBER,
> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>  /* [rx]_is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
> vhost_rxport_stat_strings[] = {
>  /* [tx]_ is prepended to the name string here */
>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>  	{"good_packets",
> -	 offsetof(struct vhost_queue, stats.pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>  	{"total_bytes",
> -	 offsetof(struct vhost_queue, stats.bytes)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>  	{"missed_pkts",
> -	 offsetof(struct vhost_queue, stats.missed_pkts)},
> +	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>  	{"broadcast_packets",
>  	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>  	{"multicast_packets",
> @@ -287,23 +290,6 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  	if (n < nxstats)
>  		return nxstats;
> 
> -	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		vq = dev->data->rx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
> -	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		vq = dev->data->tx_queues[i];
> -		if (!vq)
> -			continue;
> -		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
> -				+ vq->stats.missed_pkts
> -				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
> -				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
> -	}
>  	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>  		xstats[count].value = 0;
>  		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> @@ -334,7 +320,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
> rte_eth_xstat *xstats,
>  }
> 
>  static inline void
> -vhost_count_multicast_broadcast(struct vhost_queue *vq,
> +vhost_count_xcast_packets(struct vhost_queue *vq,
>  				struct rte_mbuf *mbuf)
>  {
>  	struct rte_ether_addr *ea = NULL;
> @@ -346,20 +332,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
> *vq,
>  			pstats->xstats[VHOST_BROADCAST_PKT]++;
>  		else
>  			pstats->xstats[VHOST_MULTICAST_PKT]++;
> +	} else {
> +		pstats->xstats[VHOST_UNICAST_PKT]++;
>  	}
>  }
> 
>  static void
> -vhost_update_packet_xstats(struct vhost_queue *vq,
> -			   struct rte_mbuf **bufs,
> -			   uint16_t count)
> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
> +			   uint16_t count, uint64_t nb_bytes,
> +			   uint64_t nb_missed)
>  {
>  	uint32_t pkt_len = 0;
>  	uint64_t i = 0;
>  	uint64_t index;
>  	struct vhost_stats *pstats = &vq->stats;
> 
> +	pstats->xstats[VHOST_BYTE] += nb_bytes;
> +	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
> +	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
> +
>  	for (i = 0; i < count ; i++) {
> +		pstats->xstats[VHOST_PKT]++;
>  		pkt_len = bufs[i]->pkt_len;
>  		if (pkt_len == 64) {
>  			pstats->xstats[VHOST_64_PKT]++;
> @@ -375,7 +368,7 @@ vhost_update_packet_xstats(struct vhost_queue *vq,
>  			else if (pkt_len > 1522)
>  				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
>  		}
> -		vhost_count_multicast_broadcast(vq, bufs[i]);
> +		vhost_count_xcast_packets(vq, bufs[i]);
>  	}
>  }
> 
> @@ -385,6 +378,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_rx = 0;
>  	uint16_t nb_receive = nb_bufs;
> +	uint64_t nb_bytes = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -419,10 +413,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  		if (r->internal->vlan_strip)
>  			rte_vlan_strip(bufs[i]);
> 
> -		r->stats.bytes += bufs[i]->pkt_len;
> +		nb_bytes += bufs[i]->pkt_len;
>  	}
> 
> -	vhost_update_packet_xstats(r, bufs, nb_rx);
> +	r->stats.bytes += nb_bytes;
> +	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
> 
>  out:
>  	rte_atomic32_set(&r->while_queuing, 0);
> @@ -436,6 +431,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
> nb_bufs)
>  	struct vhost_queue *r = q;
>  	uint16_t i, nb_tx = 0;
>  	uint16_t nb_send = 0;
> +	uint64_t nb_bytes = 0;
> +	uint64_t nb_missed = 0;
> 
>  	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>  		return 0;
> @@ -476,20 +473,23 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
> uint16_t nb_bufs)
>  			break;
>  	}
> 
> +	for (i = 0; likely(i < nb_tx); i++)
> +		nb_bytes += bufs[i]->pkt_len;
> +
> +	nb_missed = nb_bufs - nb_tx;
> +
>  	r->stats.pkts += nb_tx;
> +	r->stats.bytes += nb_bytes;
>  	r->stats.missed_pkts += nb_bufs - nb_tx;
> 
> -	for (i = 0; likely(i < nb_tx); i++)
> -		r->stats.bytes += bufs[i]->pkt_len;
> -
> -	vhost_update_packet_xstats(r, bufs, nb_tx);
> +	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
> 
>  	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
>  	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
>  	 * are increased when packets are not transmitted successfully.
>  	 */

I think the above comment should be updated because in the below function,
we also update unicast pkts too (Based on RFC2863).

Thanks!
Chenbo

>  	for (i = nb_tx; i < nb_bufs; i++)
> -		vhost_count_multicast_broadcast(r, bufs[i]);
> +		vhost_count_xcast_packets(r, bufs[i]);
> 
>  	for (i = 0; likely(i < nb_tx); i++)
>  		rte_pktmbuf_free(bufs[i]);
> --
> 2.18.4


  reply	other threads:[~2020-10-09  3:13 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-02 17:03 [dpdk-dev] [PATCH] net/vhost: fix xstats wrong after clearing stats David Christensen
2020-09-11  7:44 ` Xia, Chenbo
2020-09-23  8:07   ` Maxime Coquelin
2020-10-05 17:43     ` David Christensen
2020-10-06  7:32       ` Maxime Coquelin
2020-10-06 21:23 ` [dpdk-dev] [PATCH v2] " David Christensen
2020-10-09  3:13   ` Xia, Chenbo [this message]
2020-10-15 17:49   ` [dpdk-dev] [PATCH v3] " David Christensen
2020-10-16  1:38     ` Xia, Chenbo
2020-10-23 10:54     ` Maxime Coquelin
2020-10-23 11:22     ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MN2PR11MB40635CBAF4F5DAD9040A2B7E9C080@MN2PR11MB4063.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=drc@linux.vnet.ibm.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=zhihong.wang@intel.com \
    --cc=zhiyong.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.