All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Guo, Junfeng" <junfeng.guo@intel.com>
To: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>,
	"Zhang, Qi Z" <qi.z.zhang@intel.com>,
	"Wu, Jingjing" <jingjing.wu@intel.com>,
	"ferruh.yigit@amd.com" <ferruh.yigit@amd.com>,
	"Xing, Beilei" <beilei.xing@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
	"Li, Xiaoyun" <xiaoyun.li@intel.com>,
	"Zhang, Helin" <helin.zhang@intel.com>,
	Rushil Gupta <rushilg@google.com>,
	Jordan Kimbrough <jrkim@google.com>,
	Jeroen de Borst <jeroendb@google.com>, nd <nd@arm.com>,
	nd <nd@arm.com>
Subject: RE: [RFC v3 06/10] net/gve: support basic Rx data path for DQO
Date: Thu, 23 Feb 2023 05:32:55 +0000	[thread overview]
Message-ID: <DM6PR11MB3723AF11C7BAAFDC480C607FE7AB9@DM6PR11MB3723.namprd11.prod.outlook.com> (raw)
In-Reply-To: <DBAPR08MB581417E8808AC435828AAE5A98A19@DBAPR08MB5814.eurprd08.prod.outlook.com>



> -----Original Message-----
> From: Honnappa Nagarahalli <Honnappa.Nagarahalli@arm.com>
> Sent: Friday, February 17, 2023 23:18
> To: Guo, Junfeng <junfeng.guo@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> ferruh.yigit@amd.com; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Li, Xiaoyun <xiaoyun.li@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; Rushil Gupta <rushilg@google.com>; Jordan
> Kimbrough <jrkim@google.com>; Jeroen de Borst
> <jeroendb@google.com>; nd <nd@arm.com>; Honnappa Nagarahalli
> <Honnappa.Nagarahalli@arm.com>; nd <nd@arm.com>
> Subject: RE: [RFC v3 06/10] net/gve: support basic Rx data path for DQO
> 
> 
> 
> > -----Original Message-----
> > From: Junfeng Guo <junfeng.guo@intel.com>
> > Sent: Friday, February 17, 2023 1:32 AM
> > To: qi.z.zhang@intel.com; jingjing.wu@intel.com;
> ferruh.yigit@amd.com;
> > beilei.xing@intel.com
> > Cc: dev@dpdk.org; xiaoyun.li@intel.com; helin.zhang@intel.com;
> Junfeng
> > Guo <junfeng.guo@intel.com>; Rushil Gupta <rushilg@google.com>;
> Jordan
> > Kimbrough <jrkim@google.com>; Jeroen de Borst
> <jeroendb@google.com>
> > Subject: [RFC v3 06/10] net/gve: support basic Rx data path for DQO
> >
> > Add basic Rx data path support for DQO.
> >
> > Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> > Signed-off-by: Rushil Gupta <rushilg@google.com>
> > Signed-off-by: Jordan Kimbrough <jrkim@google.com>
> > Signed-off-by: Jeroen de Borst <jeroendb@google.com>
> > ---
> >  drivers/net/gve/gve_ethdev.c |   1 +
> >  drivers/net/gve/gve_ethdev.h |   3 +
> >  drivers/net/gve/gve_rx_dqo.c | 128
> > +++++++++++++++++++++++++++++++++++
> >  3 files changed, 132 insertions(+)
> >
> > diff --git a/drivers/net/gve/gve_ethdev.c
> b/drivers/net/gve/gve_ethdev.c
> > index 1197194e41..1c9d272c2b 100644
> > --- a/drivers/net/gve/gve_ethdev.c
> > +++ b/drivers/net/gve/gve_ethdev.c
> > @@ -766,6 +766,7 @@ gve_dev_init(struct rte_eth_dev *eth_dev)
> >  		eth_dev->tx_pkt_burst = gve_tx_burst;
> >  	} else {
> >  		eth_dev->dev_ops = &gve_eth_dev_ops_dqo;
> > +		eth_dev->rx_pkt_burst = gve_rx_burst_dqo;
> >  		eth_dev->tx_pkt_burst = gve_tx_burst_dqo;
> >  	}
> >
> > diff --git a/drivers/net/gve/gve_ethdev.h
> b/drivers/net/gve/gve_ethdev.h
> > index f39a0884f2..a8e0dd5f3d 100644
> > --- a/drivers/net/gve/gve_ethdev.h
> > +++ b/drivers/net/gve/gve_ethdev.h
> > @@ -377,6 +377,9 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev
> *dev);
> > void  gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
> >
> > +uint16_t
> > +gve_rx_burst_dqo(void *rxq, struct rte_mbuf **rx_pkts, uint16_t
> > +nb_pkts);
> > +
> >  uint16_t
> >  gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t
> nb_pkts);
> >
> > diff --git a/drivers/net/gve/gve_rx_dqo.c
> b/drivers/net/gve/gve_rx_dqo.c
> > index 8236cd7b50..a281b237a4 100644
> > --- a/drivers/net/gve/gve_rx_dqo.c
> > +++ b/drivers/net/gve/gve_rx_dqo.c
> > @@ -5,6 +5,134 @@
> >  #include "gve_ethdev.h"
> >  #include "base/gve_adminq.h"
> >
> > +static inline void
> > +gve_rx_refill_dqo(struct gve_rx_queue *rxq) {
> > +	volatile struct gve_rx_desc_dqo *rx_buf_ring;
> > +	volatile struct gve_rx_desc_dqo *rx_buf_desc;
> > +	struct rte_mbuf *nmb[rxq->free_thresh];
> > +	uint16_t nb_refill = rxq->free_thresh;
> > +	uint16_t nb_desc = rxq->nb_rx_desc;
> > +	uint16_t next_avail = rxq->bufq_tail;
> > +	struct rte_eth_dev *dev;
> > +	uint64_t dma_addr;
> > +	uint16_t delta;
> > +	int i;
> > +
> > +	if (rxq->nb_rx_hold < rxq->free_thresh)
> > +		return;
> > +
> > +	rx_buf_ring = rxq->rx_ring;
> > +	delta = nb_desc - next_avail;
> > +	if (unlikely(delta < nb_refill)) {
> > +		if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta)
> ==
> > 0)) {
> > +			for (i = 0; i < delta; i++) {
> > +				rx_buf_desc = &rx_buf_ring[next_avail +
> i];
> > +				rxq->sw_ring[next_avail + i] = nmb[i];
> > +				dma_addr =
> > rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
> > +				rx_buf_desc->header_buf_addr = 0;
> > +				rx_buf_desc->buf_addr = dma_addr;
> > +			}
> > +			nb_refill -= delta;
> > +			next_avail = 0;
> > +			rxq->nb_rx_hold -= delta;
> > +		} else {
> > +			dev = &rte_eth_devices[rxq->port_id];
> > +			dev->data->rx_mbuf_alloc_failed += nb_desc -
> > next_avail;
> > +			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed
> > port_id=%u queue_id=%u",
> > +				    rxq->port_id, rxq->queue_id);
> > +			return;
> > +		}
> > +	}
> > +
> > +	if (nb_desc - next_avail >= nb_refill) {
> > +		if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb,
> nb_refill)
> > == 0)) {
> > +			for (i = 0; i < nb_refill; i++) {
> > +				rx_buf_desc = &rx_buf_ring[next_avail +
> i];
> > +				rxq->sw_ring[next_avail + i] = nmb[i];
> > +				dma_addr =
> > rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
> > +				rx_buf_desc->header_buf_addr = 0;
> > +				rx_buf_desc->buf_addr = dma_addr;
> > +			}
> > +			next_avail += nb_refill;
> > +			rxq->nb_rx_hold -= nb_refill;
> > +		} else {
> > +			dev = &rte_eth_devices[rxq->port_id];
> > +			dev->data->rx_mbuf_alloc_failed += nb_desc -
> > next_avail;
> > +			PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed
> > port_id=%u queue_id=%u",
> > +				    rxq->port_id, rxq->queue_id);
> > +		}
> > +	}
> > +
> > +	rte_write32(next_avail, rxq->qrx_tail);
> > +
> > +	rxq->bufq_tail = next_avail;
> > +}
> > +
> > +uint16_t
> > +gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t
> > +nb_pkts) {
> > +	volatile struct gve_rx_compl_desc_dqo *rx_compl_ring;
> > +	volatile struct gve_rx_compl_desc_dqo *rx_desc;
> > +	struct gve_rx_queue *rxq;
> > +	struct rte_mbuf *rxm;
> > +	uint16_t rx_id_bufq;
> > +	uint16_t pkt_len;
> > +	uint16_t rx_id;
> > +	uint16_t nb_rx;
> > +
> > +	nb_rx = 0;
> > +	rxq = rx_queue;
> > +	rx_id = rxq->rx_tail;
> > +	rx_id_bufq = rxq->next_avail;
> > +	rx_compl_ring = rxq->compl_ring;
> > +
> > +	while (nb_rx < nb_pkts) {
> > +		rx_desc = &rx_compl_ring[rx_id];
> > +
> > +		/* check status */
> > +		if (rx_desc->generation != rxq->cur_gen_bit)
> > +			break;
> From my experience with other PMDs, I think an IO read barrier is needed
> here to ensure other parts of descriptors are not loaded before loading
> rx_desc->generation.

Yes, the memory barrier should be added here to prevent the code lines 
being reordered by the compiler for some optimizations.
We will refine this in the coming versions. Thanks a lot!

> 
> > +
> > +		if (unlikely(rx_desc->rx_error))
> > +			continue;
> > +
> > +		pkt_len = rx_desc->packet_len;
> > +
> > +		rx_id++;
> > +		if (rx_id == rxq->nb_rx_desc) {
> > +			rx_id = 0;
> > +			rxq->cur_gen_bit ^= 1;
> > +		}
> > +
> > +		rxm = rxq->sw_ring[rx_id_bufq];
> > +		rx_id_bufq++;
> > +		if (rx_id_bufq == rxq->nb_rx_desc)
> > +			rx_id_bufq = 0;
> > +		rxq->nb_rx_hold++;
> > +
> > +		rxm->pkt_len = pkt_len;
> > +		rxm->data_len = pkt_len;
> > +		rxm->port = rxq->port_id;
> > +		rxm->ol_flags = 0;
> > +
> > +		rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
> > +		rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash);
> > +
> > +		rx_pkts[nb_rx++] = rxm;
> > +	}
> > +
> > +	if (nb_rx > 0) {
> > +		rxq->rx_tail = rx_id;
> > +		if (rx_id_bufq != rxq->next_avail)
> > +			rxq->next_avail = rx_id_bufq;
> > +
> > +		gve_rx_refill_dqo(rxq);
> > +	}
> > +
> > +	return nb_rx;
> > +}
> > +
> >  static inline void
> >  gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)  {
> > --
> > 2.34.1


  reply	other threads:[~2023-02-23  5:33 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-18  2:53 [RFC 0/8] gve PMD enhancement Junfeng Guo
2023-01-18  2:53 ` [RFC 1/8] net/gve: add Rx queue setup for DQO Junfeng Guo
2023-01-18  2:53 ` [RFC 2/8] net/gve: support device start and close " Junfeng Guo
2023-01-18  2:53 ` [RFC 3/8] net/gve: support queue release and stop " Junfeng Guo
2023-01-18  2:53 ` [RFC 4/8] net/gve: support basic Tx data path " Junfeng Guo
2023-01-18  2:53 ` [RFC 5/8] net/gve: support basic Rx " Junfeng Guo
2023-01-18  2:53 ` [RFC 6/8] net/gve: support basic stats " Junfeng Guo
2023-01-18  2:53 ` [RFC 7/8] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-18  2:53 ` [RFC 8/8] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-01-25 13:37 ` [RFC 0/8] gve PMD enhancement Li, Xiaoyun
2023-01-30  6:26 ` [RFC v2 0/9] " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 1/9] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-01-30  6:26   ` [RFC v2 2/9] net/gve: add Rx " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 3/9] net/gve: support device start and close " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 4/9] net/gve: support queue release and stop " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 5/9] net/gve: support basic Tx data path " Junfeng Guo
2023-01-30  6:26   ` [RFC v2 6/9] net/gve: support basic Rx " Junfeng Guo
2023-01-30 18:32     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 7/9] net/gve: support basic stats " Junfeng Guo
2023-01-30 18:27     ` Honnappa Nagarahalli
2023-01-30  6:26   ` [RFC v2 8/9] net/gve: support jumbo frame for GQI Junfeng Guo
2023-01-30  6:26   ` [RFC v2 9/9] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo
2023-02-17  7:32   ` [RFC v3 00/10] gve PMD enhancement Junfeng Guo
2023-02-17  7:32     ` [RFC v3 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-02-17  7:32     ` [RFC v3 02/10] net/gve: add Rx " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 03/10] net/gve: support device start and close " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 04/10] net/gve: support queue release and stop " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 05/10] net/gve: support basic Tx data path " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 06/10] net/gve: support basic Rx " Junfeng Guo
2023-02-17 15:17       ` Honnappa Nagarahalli
2023-02-23  5:32         ` Guo, Junfeng [this message]
2023-02-17  7:32     ` [RFC v3 07/10] net/gve: support basic stats " Junfeng Guo
2023-02-17 15:28       ` Honnappa Nagarahalli
2023-02-17  7:32     ` [RFC v3 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
2023-02-17  7:32     ` [RFC v3 09/10] net/gve: support jumbo frame for GQI Junfeng Guo
2023-02-17  7:32     ` [RFC v3 10/10] net/gve: add AdminQ command to verify driver compatibility Junfeng Guo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM6PR11MB3723AF11C7BAAFDC480C607FE7AB9@DM6PR11MB3723.namprd11.prod.outlook.com \
    --to=junfeng.guo@intel.com \
    --cc=Honnappa.Nagarahalli@arm.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=helin.zhang@intel.com \
    --cc=jeroendb@google.com \
    --cc=jingjing.wu@intel.com \
    --cc=jrkim@google.com \
    --cc=nd@arm.com \
    --cc=qi.z.zhang@intel.com \
    --cc=rushilg@google.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.