DPDK-dev Archive on lore.kernel.org
 help / color / Atom feed
From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Xiao Zhang <xiao.zhang@intel.com>
Cc: dev@dpdk.org, qiming.yang@intel.com, qi.z.zhang@intel.com,
	stable@dpdk.org
Subject: Re: [dpdk-dev] [v3] net/ice: enable multi-process support
Date: Thu, 25 Jul 2019 18:10:47 +0800
Message-ID: <20190725101047.GC18332@intel.com> (raw)
In-Reply-To: <1564053515-38009-1-git-send-email-xiao.zhang@intel.com>

On 07/25, Xiao Zhang wrote:
>Add multiple processes support for ice, secondary processes will share
>memory and configuration with primary process, do not need further
>initialization for secondary processes.
>
>Cc: stable@dpdk.org

This is a new feature, it's no need to cc stable@dpdk.org.

Thanks,
Xiaolong

>
>Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
>---
>v3 Update the title and commit message.
>v2 Remove limitation for secondary processes control path configuration.
>---
> drivers/net/ice/ice_ethdev.c | 12 +++++++
> drivers/net/ice/ice_ethdev.h |  2 ++
> drivers/net/ice/ice_rxtx.c   | 74 ++++++++++++++++++++++++++++----------------
> 3 files changed, 62 insertions(+), 26 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 9ce730c..532f4db 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
> 	dev->tx_pkt_burst = ice_xmit_pkts;
> 	dev->tx_pkt_prepare = ice_prep_pkts;
> 
>+	/* for secondary processes, we don't initialise any further as primary
>+	 * has already done this work.
>+	 */
>+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>+		ice_set_rx_function(dev);
>+		ice_set_tx_function(dev);
>+		return 0;
>+	}
>+
> 	ice_set_default_ptype_table(dev);
> 	pci_dev = RTE_DEV_TO_PCI(dev->device);
> 	intr_handle = &pci_dev->intr_handle;
>@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> 	struct rte_flow *p_flow;
> 
>+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>+		return 0;
>+
> 	ice_dev_close(dev);
> 
> 	dev->dev_ops = NULL;
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 8a52239..a083616 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -282,6 +282,8 @@ struct ice_adapter {
> 	struct rte_eth_dev *eth_dev;
> 	struct ice_pf pf;
> 	bool rx_bulk_alloc_allowed;
>+	bool rx_vec_allowed;
>+	bool tx_vec_allowed;
> 	bool tx_simple_allowed;
> 	/* ptype mapping table */
> 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index 035ed84..d67de8f 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> 	int i;
> 	bool use_avx2 = false;
> 
>-	if (!ice_rx_vec_dev_check(dev)) {
>-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
>-			rxq = dev->data->rx_queues[i];
>-			(void)ice_rxq_vec_setup(rxq);
>-		}
>+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
>+			ad->rx_vec_allowed = true;
>+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+				rxq = dev->data->rx_queues[i];
>+				if (rxq && ice_rxq_vec_setup(rxq)) {
>+					ad->rx_vec_allowed = false;
>+					break;
>+				}
>+			}
>+
>+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+				use_avx2 = true;
> 
>-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>-			use_avx2 = true;
>+		} else {
>+			ad->rx_vec_allowed = false;
>+		}
>+	}
> 
>+	if (ad->rx_vec_allowed) {
> 		if (dev->data->scattered_rx) {
> 			PMD_DRV_LOG(DEBUG,
>-				    "Using %sVector Scattered Rx (port %d).",
>-				    use_avx2 ? "avx2 " : "",
>-				    dev->data->port_id);
>+					"Using %sVector Scattered Rx (port %d).",
>+					use_avx2 ? "avx2 " : "",
>+					dev->data->port_id);
> 			dev->rx_pkt_burst = use_avx2 ?
>-					    ice_recv_scattered_pkts_vec_avx2 :
>-					    ice_recv_scattered_pkts_vec;
>+					ice_recv_scattered_pkts_vec_avx2 :
>+					ice_recv_scattered_pkts_vec;
> 		} else {
> 			PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
>-				    use_avx2 ? "avx2 " : "",
>-				    dev->data->port_id);
>+					use_avx2 ? "avx2 " : "",
>+					dev->data->port_id);
> 			dev->rx_pkt_burst = use_avx2 ?
>-					    ice_recv_pkts_vec_avx2 :
>-					    ice_recv_pkts_vec;
>+						ice_recv_pkts_vec_avx2 :
>+						ice_recv_pkts_vec;
> 		}
>-
> 		return;
> 	}
>+
> #endif
> 
> 	if (dev->data->scattered_rx) {
>@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> 	int i;
> 	bool use_avx2 = false;
> 
>-	if (!ice_tx_vec_dev_check(dev)) {
>-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
>-			txq = dev->data->tx_queues[i];
>-			(void)ice_txq_vec_setup(txq);
>-		}
>+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+		if (!ice_tx_vec_dev_check(dev)) {
>+			ad->tx_vec_allowed = true;
>+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
>+				txq = dev->data->tx_queues[i];
>+				if (txq && ice_txq_vec_setup(txq)) {
>+					ad->tx_vec_allowed = false;
>+					break;
>+				}
>+			}
> 
>-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>-			use_avx2 = true;
>+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+				use_avx2 = true;
>+
>+		} else {
>+			ad->tx_vec_allowed = false;
>+		}
>+	}
> 
>+	if (ad->tx_vec_allowed) {
> 		PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
> 			    use_avx2 ? "avx2 " : "",
> 			    dev->data->port_id);
>-- 
>2.7.4
>

      parent reply index

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-19 16:13 [dpdk-dev] [DPDK] net/ice: CVL " Xiao Zhang
2019-07-19 17:53 ` [dpdk-dev] " Xiao Zhang
2019-07-24 16:56   ` [dpdk-dev] [v2] " Xiao Zhang
2019-07-25 11:18     ` [dpdk-dev] [v3] net/ice: enable " Xiao Zhang
2019-07-25  4:56       ` Zhang, Qi Z
2019-07-25 10:10       ` Ye Xiaolong [this message]

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190725101047.GC18332@intel.com \
    --to=xiaolong.ye@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    --cc=xiao.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK-dev Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/dpdk-dev/0 dpdk-dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dpdk-dev dpdk-dev/ https://lore.kernel.org/dpdk-dev \
		dev@dpdk.org
	public-inbox-index dpdk-dev

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git