dev.dpdk.org archive mirror
 help / color / mirror / Atom feed
* [dpdk-dev] [DPDK] net/ice: CVL multi-process support
@ 2019-07-19 16:13 Xiao Zhang
  2019-07-19 17:53 ` [dpdk-dev] " Xiao Zhang
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Zhang @ 2019-07-19 16:13 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, haiyue.wang, Xiao Zhang, stable

Add multiple process support for CVL, secondary processes will share
memory with primary process, do not need allocation for secondary
processes.
Restrict configuration ops permission for secondary processes, only
allow primary process to do configuration ops since secondary processes
should not be allowed to do configuration but share from primary process.

Cc: stable@dpdk.org

Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 85 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.c   | 24 +++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 9ce730c..b2ef21f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1408,6 +1408,12 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = ice_xmit_pkts;
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_set_default_ptype_table(dev);
 	pci_dev = RTE_DEV_TO_PCI(dev->device);
 	intr_handle = &pci_dev->intr_handle;
@@ -1574,6 +1580,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint16_t i;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* avoid stopping again */
 	if (pf->adapter_stopped)
 		return;
@@ -1610,6 +1619,9 @@ ice_dev_close(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Since stop will make link down, then the link event will be
 	 * triggered, disable the irq firstly to avoid the port_infoe etc
 	 * resources deallocation causing the interrupt service thread
@@ -1638,6 +1650,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct rte_flow *p_flow;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_dev_close(dev);
 
 	dev->dev_ops = NULL;
@@ -1670,6 +1685,9 @@ ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
 	struct ice_adapter *ad =
 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * bulk allocation or vector Rx preconditions we will reset it.
 	 */
@@ -1948,6 +1966,9 @@ ice_dev_start(struct rte_eth_dev *dev)
 	uint16_t nb_txq, i;
 	int mask, ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* program Tx queues' context in hardware */
 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
 		ret = ice_tx_queue_start(dev, nb_txq);
@@ -2031,6 +2052,9 @@ ice_dev_reset(struct rte_eth_dev *dev)
 {
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (dev->data->sriov.active)
 		return -ENOTSUP;
 
@@ -2211,6 +2235,9 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	unsigned int rep_cnt = MAX_REPEAT_TIME;
 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&link, 0, sizeof(link));
 	memset(&old, 0, sizeof(old));
 	memset(&link_status, 0, sizeof(link_status));
@@ -2350,6 +2377,8 @@ ice_dev_set_link_up(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, true);
 }
 
@@ -2358,6 +2387,8 @@ ice_dev_set_link_down(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, false);
 }
 
@@ -2368,6 +2399,9 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* check if mtu is within the allowed range */
 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
 		return -EINVAL;
@@ -2402,6 +2436,9 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,
 	uint8_t flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
 		return -EINVAL;
@@ -2448,6 +2485,9 @@ ice_macaddr_add(struct rte_eth_dev *dev,
 	struct ice_vsi *vsi = pf->main_vsi;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	ret = ice_add_mac_filter(vsi, mac_addr);
 	if (ret != ICE_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
@@ -2467,6 +2507,9 @@ ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 	struct rte_ether_addr *macaddr;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	macaddr = &data->mac_addrs[index];
 	ret = ice_remove_mac_filter(vsi, macaddr);
 	if (ret) {
@@ -2484,6 +2527,9 @@ ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (on) {
 		ret = ice_add_vlan_filter(vsi, vlan_id);
 		if (ret < 0) {
@@ -2602,6 +2648,9 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct ice_vsi *vsi = pf->main_vsi;
 	struct rte_eth_rxmode *rxmode;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	rxmode = &dev->data->dev_conf.rxmode;
 	if (mask & ETH_VLAN_FILTER_MASK) {
 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
@@ -2639,6 +2688,9 @@ ice_vlan_tpid_set(struct rte_eth_dev *dev,
 	int qinq = dev->data->dev_conf.rxmode.offloads &
 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	switch (vlan_type) {
 	case ETH_VLAN_TYPE_OUTER:
 		if (qinq)
@@ -2749,6 +2801,9 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 	uint8_t *lut;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
@@ -2891,6 +2946,9 @@ ice_rss_hash_update(struct rte_eth_dev *dev,
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_vsi *vsi = pf->main_vsi;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* set hash key */
 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
 	if (status)
@@ -2924,6 +2982,9 @@ ice_promisc_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2943,6 +3004,9 @@ ice_promisc_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2960,6 +3024,9 @@ ice_allmulti_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
@@ -2976,6 +3043,9 @@ ice_allmulti_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	if (dev->data->promiscuous == 1)
 		return; /* must remain in all_multicast mode */
 
@@ -2995,6 +3065,9 @@ static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
 	uint32_t val;
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3015,6 +3088,9 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
@@ -3059,6 +3135,9 @@ ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
 	uint8_t vlan_flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!vsi || !info) {
 		PMD_DRV_LOG(ERR, "invalid parameters");
 		return -EINVAL;
@@ -3113,6 +3192,9 @@ ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
 	struct ice_vsi_vlan_pvid_info info;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&info, 0, sizeof(info));
 	info.on = on;
 	if (info.on) {
@@ -3555,6 +3637,9 @@ ice_stats_reset(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Mark PF and VSI stats to update the offset, aka "reset" */
 	pf->offset_loaded = false;
 	if (pf->main_vsi)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 035ed84..2a8b888 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -337,6 +337,9 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
 			    rx_queue_id, dev->data->nb_rx_queues);
@@ -391,6 +394,9 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	int err;
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id < dev->data->nb_rx_queues) {
 		rxq = dev->data->rx_queues[rx_queue_id];
 
@@ -421,6 +427,9 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -548,6 +557,9 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	uint32_t q_teids[1];
 	uint16_t q_handle = tx_queue_id;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -597,6 +609,9 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	int use_def_burst_func = 1;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
 	    nb_desc < ICE_MIN_RING_DESC) {
@@ -714,6 +729,9 @@ ice_rx_queue_release(void *rxq)
 {
 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
 		return;
@@ -739,6 +757,9 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint64_t offloads;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
@@ -910,6 +931,9 @@ ice_tx_queue_release(void *txq)
 {
 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
 		return;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [dpdk-dev] net/ice: CVL multi-process support
  2019-07-19 16:13 [dpdk-dev] [DPDK] net/ice: CVL multi-process support Xiao Zhang
@ 2019-07-19 17:53 ` Xiao Zhang
  2019-07-24 16:56   ` [dpdk-dev] [v2] " Xiao Zhang
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Zhang @ 2019-07-19 17:53 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, haiyue.wang, Xiao Zhang, stable

Add multiple process support for CVL, secondary processes will share
memory with primary process, do not need allocation for secondary
processes.
Restrict configuration ops permission for secondary processes, only
allow primary process to do configuration ops since secondary processes
should not be allowed to do configuration but share from primary process.

Cc: stable@dpdk.org

Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 85 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.c   | 24 +++++++++++++
 2 files changed, 109 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 9ce730c..b2ef21f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1408,6 +1408,12 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = ice_xmit_pkts;
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_set_default_ptype_table(dev);
 	pci_dev = RTE_DEV_TO_PCI(dev->device);
 	intr_handle = &pci_dev->intr_handle;
@@ -1574,6 +1580,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint16_t i;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* avoid stopping again */
 	if (pf->adapter_stopped)
 		return;
@@ -1610,6 +1619,9 @@ ice_dev_close(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Since stop will make link down, then the link event will be
 	 * triggered, disable the irq firstly to avoid the port_infoe etc
 	 * resources deallocation causing the interrupt service thread
@@ -1638,6 +1650,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct rte_flow *p_flow;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_dev_close(dev);
 
 	dev->dev_ops = NULL;
@@ -1670,6 +1685,9 @@ ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
 	struct ice_adapter *ad =
 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* Initialize to TRUE. If any of Rx queues doesn't meet the
 	 * bulk allocation or vector Rx preconditions we will reset it.
 	 */
@@ -1948,6 +1966,9 @@ ice_dev_start(struct rte_eth_dev *dev)
 	uint16_t nb_txq, i;
 	int mask, ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* program Tx queues' context in hardware */
 	for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
 		ret = ice_tx_queue_start(dev, nb_txq);
@@ -2031,6 +2052,9 @@ ice_dev_reset(struct rte_eth_dev *dev)
 {
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (dev->data->sriov.active)
 		return -ENOTSUP;
 
@@ -2211,6 +2235,9 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 	unsigned int rep_cnt = MAX_REPEAT_TIME;
 	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&link, 0, sizeof(link));
 	memset(&old, 0, sizeof(old));
 	memset(&link_status, 0, sizeof(link_status));
@@ -2350,6 +2377,8 @@ ice_dev_set_link_up(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, true);
 }
 
@@ -2358,6 +2387,8 @@ ice_dev_set_link_down(struct rte_eth_dev *dev)
 {
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
 	return ice_force_phys_link_state(hw, false);
 }
 
@@ -2368,6 +2399,9 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* check if mtu is within the allowed range */
 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
 		return -EINVAL;
@@ -2402,6 +2436,9 @@ static int ice_macaddr_set(struct rte_eth_dev *dev,
 	uint8_t flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
 		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
 		return -EINVAL;
@@ -2448,6 +2485,9 @@ ice_macaddr_add(struct rte_eth_dev *dev,
 	struct ice_vsi *vsi = pf->main_vsi;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	ret = ice_add_mac_filter(vsi, mac_addr);
 	if (ret != ICE_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Failed to add MAC filter");
@@ -2467,6 +2507,9 @@ ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 	struct rte_ether_addr *macaddr;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	macaddr = &data->mac_addrs[index];
 	ret = ice_remove_mac_filter(vsi, macaddr);
 	if (ret) {
@@ -2484,6 +2527,9 @@ ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (on) {
 		ret = ice_add_vlan_filter(vsi, vlan_id);
 		if (ret < 0) {
@@ -2602,6 +2648,9 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	struct ice_vsi *vsi = pf->main_vsi;
 	struct rte_eth_rxmode *rxmode;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	rxmode = &dev->data->dev_conf.rxmode;
 	if (mask & ETH_VLAN_FILTER_MASK) {
 		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
@@ -2639,6 +2688,9 @@ ice_vlan_tpid_set(struct rte_eth_dev *dev,
 	int qinq = dev->data->dev_conf.rxmode.offloads &
 		   DEV_RX_OFFLOAD_VLAN_EXTEND;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	switch (vlan_type) {
 	case ETH_VLAN_TYPE_OUTER:
 		if (qinq)
@@ -2749,6 +2801,9 @@ ice_rss_reta_update(struct rte_eth_dev *dev,
 	uint8_t *lut;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
 	    reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
@@ -2891,6 +2946,9 @@ ice_rss_hash_update(struct rte_eth_dev *dev,
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_vsi *vsi = pf->main_vsi;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	/* set hash key */
 	status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
 	if (status)
@@ -2924,6 +2982,9 @@ ice_promisc_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2943,6 +3004,9 @@ ice_promisc_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
 		ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
@@ -2960,6 +3024,9 @@ ice_allmulti_enable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
 
 	status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
@@ -2976,6 +3043,9 @@ ice_allmulti_disable(struct rte_eth_dev *dev)
 	enum ice_status status;
 	uint8_t pmask;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	if (dev->data->promiscuous == 1)
 		return; /* must remain in all_multicast mode */
 
@@ -2995,6 +3065,9 @@ static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
 	uint32_t val;
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3015,6 +3088,9 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint16_t msix_intr;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	msix_intr = intr_handle->intr_vec[queue_id];
 
 	ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
@@ -3059,6 +3135,9 @@ ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
 	uint8_t vlan_flags = 0;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	if (!vsi || !info) {
 		PMD_DRV_LOG(ERR, "invalid parameters");
 		return -EINVAL;
@@ -3113,6 +3192,9 @@ ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
 	struct ice_vsi_vlan_pvid_info info;
 	int ret;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -E_RTE_SECONDARY;
+
 	memset(&info, 0, sizeof(info));
 	info.on = on;
 	if (info.on) {
@@ -3555,6 +3637,9 @@ ice_stats_reset(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return;
+
 	/* Mark PF and VSI stats to update the offset, aka "reset" */
 	pf->offset_loaded = false;
 	if (pf->main_vsi)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 035ed84..2a8b888 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -337,6 +337,9 @@ ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
 			    rx_queue_id, dev->data->nb_rx_queues);
@@ -391,6 +394,9 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	int err;
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (rx_queue_id < dev->data->nb_rx_queues) {
 		rxq = dev->data->rx_queues[rx_queue_id];
 
@@ -421,6 +427,9 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	PMD_INIT_FUNC_TRACE();
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -548,6 +557,9 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	uint32_t q_teids[1];
 	uint16_t q_handle = tx_queue_id;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
 			    tx_queue_id, dev->data->nb_tx_queues);
@@ -597,6 +609,9 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t len;
 	int use_def_burst_func = 1;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
 	    nb_desc < ICE_MIN_RING_DESC) {
@@ -714,6 +729,9 @@ ice_rx_queue_release(void *rxq)
 {
 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
 		return;
@@ -739,6 +757,9 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 	uint16_t tx_rs_thresh, tx_free_thresh;
 	uint64_t offloads;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -E_RTE_SECONDARY;
+
 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
@@ -910,6 +931,9 @@ ice_tx_queue_release(void *txq)
 {
 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return;
+
 	if (!q) {
 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
 		return;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [dpdk-dev] [v2] net/ice: CVL multi-process support
  2019-07-19 17:53 ` [dpdk-dev] " Xiao Zhang
@ 2019-07-24 16:56   ` Xiao Zhang
  2019-07-25 11:18     ` [dpdk-dev] [v3] net/ice: enable " Xiao Zhang
  0 siblings, 1 reply; 6+ messages in thread
From: Xiao Zhang @ 2019-07-24 16:56 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, qi.z.zhang, Xiao Zhang, stable

Add multiple processes support for CVL, secondary processes will share
memory and configuration with primary process, do not need further
initialization for secondary processes.

Cc: stable@dpdk.org

Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
v2 Remove limitation for secondary processes control path configuration.
---
 drivers/net/ice/ice_ethdev.c | 12 +++++++
 drivers/net/ice/ice_ethdev.h |  2 ++
 drivers/net/ice/ice_rxtx.c   | 74 ++++++++++++++++++++++++++++----------------
 3 files changed, 62 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 9ce730c..532f4db 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = ice_xmit_pkts;
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		ice_set_rx_function(dev);
+		ice_set_tx_function(dev);
+		return 0;
+	}
+
 	ice_set_default_ptype_table(dev);
 	pci_dev = RTE_DEV_TO_PCI(dev->device);
 	intr_handle = &pci_dev->intr_handle;
@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct rte_flow *p_flow;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_dev_close(dev);
 
 	dev->dev_ops = NULL;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 8a52239..a083616 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -282,6 +282,8 @@ struct ice_adapter {
 	struct rte_eth_dev *eth_dev;
 	struct ice_pf pf;
 	bool rx_bulk_alloc_allowed;
+	bool rx_vec_allowed;
+	bool tx_vec_allowed;
 	bool tx_simple_allowed;
 	/* ptype mapping table */
 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 035ed84..d67de8f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 	int i;
 	bool use_avx2 = false;
 
-	if (!ice_rx_vec_dev_check(dev)) {
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			(void)ice_rxq_vec_setup(rxq);
-		}
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+			ad->rx_vec_allowed = true;
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				if (rxq && ice_rxq_vec_setup(rxq)) {
+					ad->rx_vec_allowed = false;
+					break;
+				}
+			}
+
+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+				use_avx2 = true;
 
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
-			use_avx2 = true;
+		} else {
+			ad->rx_vec_allowed = false;
+		}
+	}
 
+	if (ad->rx_vec_allowed) {
 		if (dev->data->scattered_rx) {
 			PMD_DRV_LOG(DEBUG,
-				    "Using %sVector Scattered Rx (port %d).",
-				    use_avx2 ? "avx2 " : "",
-				    dev->data->port_id);
+					"Using %sVector Scattered Rx (port %d).",
+					use_avx2 ? "avx2 " : "",
+					dev->data->port_id);
 			dev->rx_pkt_burst = use_avx2 ?
-					    ice_recv_scattered_pkts_vec_avx2 :
-					    ice_recv_scattered_pkts_vec;
+					ice_recv_scattered_pkts_vec_avx2 :
+					ice_recv_scattered_pkts_vec;
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
-				    use_avx2 ? "avx2 " : "",
-				    dev->data->port_id);
+					use_avx2 ? "avx2 " : "",
+					dev->data->port_id);
 			dev->rx_pkt_burst = use_avx2 ?
-					    ice_recv_pkts_vec_avx2 :
-					    ice_recv_pkts_vec;
+						ice_recv_pkts_vec_avx2 :
+						ice_recv_pkts_vec;
 		}
-
 		return;
 	}
+
 #endif
 
 	if (dev->data->scattered_rx) {
@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	int i;
 	bool use_avx2 = false;
 
-	if (!ice_tx_vec_dev_check(dev)) {
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			txq = dev->data->tx_queues[i];
-			(void)ice_txq_vec_setup(txq);
-		}
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (!ice_tx_vec_dev_check(dev)) {
+			ad->tx_vec_allowed = true;
+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
+				txq = dev->data->tx_queues[i];
+				if (txq && ice_txq_vec_setup(txq)) {
+					ad->tx_vec_allowed = false;
+					break;
+				}
+			}
 
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
-			use_avx2 = true;
+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+				use_avx2 = true;
+
+		} else {
+			ad->tx_vec_allowed = false;
+		}
+	}
 
+	if (ad->tx_vec_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
 			    use_avx2 ? "avx2 " : "",
 			    dev->data->port_id);
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [v3] net/ice: enable multi-process support
  2019-07-25 11:18     ` [dpdk-dev] [v3] net/ice: enable " Xiao Zhang
@ 2019-07-25  4:56       ` Zhang, Qi Z
  2019-07-25 10:10       ` Ye Xiaolong
  1 sibling, 0 replies; 6+ messages in thread
From: Zhang, Qi Z @ 2019-07-25  4:56 UTC (permalink / raw)
  To: Zhang, Xiao, dev; +Cc: Yang, Qiming, stable



> -----Original Message-----
> From: Zhang, Xiao
> Sent: Thursday, July 25, 2019 7:19 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Zhang, Xiao <xiao.zhang@intel.com>;
> stable@dpdk.org
> Subject: [v3] net/ice: enable multi-process support
> 
> Add multiple processes support for ice, secondary processes will share memory
> and configuration with primary process, do not need further initialization for
> secondary processes.
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel with cc stable be removed

Thanks
Qi

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [v3] net/ice: enable multi-process support
  2019-07-25 11:18     ` [dpdk-dev] [v3] net/ice: enable " Xiao Zhang
  2019-07-25  4:56       ` Zhang, Qi Z
@ 2019-07-25 10:10       ` Ye Xiaolong
  1 sibling, 0 replies; 6+ messages in thread
From: Ye Xiaolong @ 2019-07-25 10:10 UTC (permalink / raw)
  To: Xiao Zhang; +Cc: dev, qiming.yang, qi.z.zhang, stable

On 07/25, Xiao Zhang wrote:
>Add multiple processes support for ice, secondary processes will share
>memory and configuration with primary process, do not need further
>initialization for secondary processes.
>
>Cc: stable@dpdk.org

This is a new feature, it's no need to cc stable@dpdk.org.

Thanks,
Xiaolong

>
>Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
>---
>v3 Update the title and commit message.
>v2 Remove limitation for secondary processes control path configuration.
>---
> drivers/net/ice/ice_ethdev.c | 12 +++++++
> drivers/net/ice/ice_ethdev.h |  2 ++
> drivers/net/ice/ice_rxtx.c   | 74 ++++++++++++++++++++++++++++----------------
> 3 files changed, 62 insertions(+), 26 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 9ce730c..532f4db 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
> 	dev->tx_pkt_burst = ice_xmit_pkts;
> 	dev->tx_pkt_prepare = ice_prep_pkts;
> 
>+	/* for secondary processes, we don't initialise any further as primary
>+	 * has already done this work.
>+	 */
>+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>+		ice_set_rx_function(dev);
>+		ice_set_tx_function(dev);
>+		return 0;
>+	}
>+
> 	ice_set_default_ptype_table(dev);
> 	pci_dev = RTE_DEV_TO_PCI(dev->device);
> 	intr_handle = &pci_dev->intr_handle;
>@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> 	struct rte_flow *p_flow;
> 
>+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>+		return 0;
>+
> 	ice_dev_close(dev);
> 
> 	dev->dev_ops = NULL;
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 8a52239..a083616 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -282,6 +282,8 @@ struct ice_adapter {
> 	struct rte_eth_dev *eth_dev;
> 	struct ice_pf pf;
> 	bool rx_bulk_alloc_allowed;
>+	bool rx_vec_allowed;
>+	bool tx_vec_allowed;
> 	bool tx_simple_allowed;
> 	/* ptype mapping table */
> 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index 035ed84..d67de8f 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> 	int i;
> 	bool use_avx2 = false;
> 
>-	if (!ice_rx_vec_dev_check(dev)) {
>-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
>-			rxq = dev->data->rx_queues[i];
>-			(void)ice_rxq_vec_setup(rxq);
>-		}
>+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
>+			ad->rx_vec_allowed = true;
>+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+				rxq = dev->data->rx_queues[i];
>+				if (rxq && ice_rxq_vec_setup(rxq)) {
>+					ad->rx_vec_allowed = false;
>+					break;
>+				}
>+			}
>+
>+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+				use_avx2 = true;
> 
>-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>-			use_avx2 = true;
>+		} else {
>+			ad->rx_vec_allowed = false;
>+		}
>+	}
> 
>+	if (ad->rx_vec_allowed) {
> 		if (dev->data->scattered_rx) {
> 			PMD_DRV_LOG(DEBUG,
>-				    "Using %sVector Scattered Rx (port %d).",
>-				    use_avx2 ? "avx2 " : "",
>-				    dev->data->port_id);
>+					"Using %sVector Scattered Rx (port %d).",
>+					use_avx2 ? "avx2 " : "",
>+					dev->data->port_id);
> 			dev->rx_pkt_burst = use_avx2 ?
>-					    ice_recv_scattered_pkts_vec_avx2 :
>-					    ice_recv_scattered_pkts_vec;
>+					ice_recv_scattered_pkts_vec_avx2 :
>+					ice_recv_scattered_pkts_vec;
> 		} else {
> 			PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
>-				    use_avx2 ? "avx2 " : "",
>-				    dev->data->port_id);
>+					use_avx2 ? "avx2 " : "",
>+					dev->data->port_id);
> 			dev->rx_pkt_burst = use_avx2 ?
>-					    ice_recv_pkts_vec_avx2 :
>-					    ice_recv_pkts_vec;
>+						ice_recv_pkts_vec_avx2 :
>+						ice_recv_pkts_vec;
> 		}
>-
> 		return;
> 	}
>+
> #endif
> 
> 	if (dev->data->scattered_rx) {
>@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> 	int i;
> 	bool use_avx2 = false;
> 
>-	if (!ice_tx_vec_dev_check(dev)) {
>-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
>-			txq = dev->data->tx_queues[i];
>-			(void)ice_txq_vec_setup(txq);
>-		}
>+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+		if (!ice_tx_vec_dev_check(dev)) {
>+			ad->tx_vec_allowed = true;
>+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
>+				txq = dev->data->tx_queues[i];
>+				if (txq && ice_txq_vec_setup(txq)) {
>+					ad->tx_vec_allowed = false;
>+					break;
>+				}
>+			}
> 
>-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>-			use_avx2 = true;
>+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+				use_avx2 = true;
>+
>+		} else {
>+			ad->tx_vec_allowed = false;
>+		}
>+	}
> 
>+	if (ad->tx_vec_allowed) {
> 		PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
> 			    use_avx2 ? "avx2 " : "",
> 			    dev->data->port_id);
>-- 
>2.7.4
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [v3] net/ice: enable multi-process support
  2019-07-24 16:56   ` [dpdk-dev] [v2] " Xiao Zhang
@ 2019-07-25 11:18     ` Xiao Zhang
  2019-07-25  4:56       ` Zhang, Qi Z
  2019-07-25 10:10       ` Ye Xiaolong
  0 siblings, 2 replies; 6+ messages in thread
From: Xiao Zhang @ 2019-07-25 11:18 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, qi.z.zhang, Xiao Zhang, stable

Add multiple processes support for ice, secondary processes will share
memory and configuration with primary process, do not need further
initialization for secondary processes.

Cc: stable@dpdk.org

Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
v3 Update the title and commit message.
v2 Remove limitation for secondary processes control path configuration.
---
 drivers/net/ice/ice_ethdev.c | 12 +++++++
 drivers/net/ice/ice_ethdev.h |  2 ++
 drivers/net/ice/ice_rxtx.c   | 74 ++++++++++++++++++++++++++++----------------
 3 files changed, 62 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 9ce730c..532f4db 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = ice_xmit_pkts;
 	dev->tx_pkt_prepare = ice_prep_pkts;
 
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		ice_set_rx_function(dev);
+		ice_set_tx_function(dev);
+		return 0;
+	}
+
 	ice_set_default_ptype_table(dev);
 	pci_dev = RTE_DEV_TO_PCI(dev->device);
 	intr_handle = &pci_dev->intr_handle;
@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct rte_flow *p_flow;
 
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
 	ice_dev_close(dev);
 
 	dev->dev_ops = NULL;
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 8a52239..a083616 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -282,6 +282,8 @@ struct ice_adapter {
 	struct rte_eth_dev *eth_dev;
 	struct ice_pf pf;
 	bool rx_bulk_alloc_allowed;
+	bool rx_vec_allowed;
+	bool tx_vec_allowed;
 	bool tx_simple_allowed;
 	/* ptype mapping table */
 	uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 035ed84..d67de8f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 	int i;
 	bool use_avx2 = false;
 
-	if (!ice_rx_vec_dev_check(dev)) {
-		for (i = 0; i < dev->data->nb_rx_queues; i++) {
-			rxq = dev->data->rx_queues[i];
-			(void)ice_rxq_vec_setup(rxq);
-		}
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+			ad->rx_vec_allowed = true;
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				rxq = dev->data->rx_queues[i];
+				if (rxq && ice_rxq_vec_setup(rxq)) {
+					ad->rx_vec_allowed = false;
+					break;
+				}
+			}
+
+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+				use_avx2 = true;
 
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
-			use_avx2 = true;
+		} else {
+			ad->rx_vec_allowed = false;
+		}
+	}
 
+	if (ad->rx_vec_allowed) {
 		if (dev->data->scattered_rx) {
 			PMD_DRV_LOG(DEBUG,
-				    "Using %sVector Scattered Rx (port %d).",
-				    use_avx2 ? "avx2 " : "",
-				    dev->data->port_id);
+					"Using %sVector Scattered Rx (port %d).",
+					use_avx2 ? "avx2 " : "",
+					dev->data->port_id);
 			dev->rx_pkt_burst = use_avx2 ?
-					    ice_recv_scattered_pkts_vec_avx2 :
-					    ice_recv_scattered_pkts_vec;
+					ice_recv_scattered_pkts_vec_avx2 :
+					ice_recv_scattered_pkts_vec;
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
-				    use_avx2 ? "avx2 " : "",
-				    dev->data->port_id);
+					use_avx2 ? "avx2 " : "",
+					dev->data->port_id);
 			dev->rx_pkt_burst = use_avx2 ?
-					    ice_recv_pkts_vec_avx2 :
-					    ice_recv_pkts_vec;
+						ice_recv_pkts_vec_avx2 :
+						ice_recv_pkts_vec;
 		}
-
 		return;
 	}
+
 #endif
 
 	if (dev->data->scattered_rx) {
@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	int i;
 	bool use_avx2 = false;
 
-	if (!ice_tx_vec_dev_check(dev)) {
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			txq = dev->data->tx_queues[i];
-			(void)ice_txq_vec_setup(txq);
-		}
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (!ice_tx_vec_dev_check(dev)) {
+			ad->tx_vec_allowed = true;
+			for (i = 0; i < dev->data->nb_tx_queues; i++) {
+				txq = dev->data->tx_queues[i];
+				if (txq && ice_txq_vec_setup(txq)) {
+					ad->tx_vec_allowed = false;
+					break;
+				}
+			}
 
-		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
-		    rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
-			use_avx2 = true;
+			if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+				use_avx2 = true;
+
+		} else {
+			ad->tx_vec_allowed = false;
+		}
+	}
 
+	if (ad->tx_vec_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
 			    use_avx2 ? "avx2 " : "",
 			    dev->data->port_id);
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-07-25  4:56 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-19 16:13 [dpdk-dev] [DPDK] net/ice: CVL multi-process support Xiao Zhang
2019-07-19 17:53 ` [dpdk-dev] " Xiao Zhang
2019-07-24 16:56   ` [dpdk-dev] [v2] " Xiao Zhang
2019-07-25 11:18     ` [dpdk-dev] [v3] net/ice: enable " Xiao Zhang
2019-07-25  4:56       ` Zhang, Qi Z
2019-07-25 10:10       ` Ye Xiaolong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).