All of lore.kernel.org
 help / color / mirror / Atom feed
From: dapengx.yu@intel.com
To: Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Konstantin Ananyev <konstantin.ananyev@intel.com>
Cc: dev@dpdk.org, qi.z.zhang@intel.com, ferruh.yigit@intel.com,
	Dapeng Yu <dapengx.yu@intel.com>,
	stable@dpdk.org
Subject: [dpdk-dev] [PATCH v2] net/iavf: fix multi-process shared data
Date: Sat,  9 Oct 2021 11:25:19 +0800	[thread overview]
Message-ID: <20211009032519.335016-1-dapengx.yu@intel.com> (raw)
In-Reply-To: <20210928033753.1955674-1-dapengx.yu@intel.com>

From: Dapeng Yu <dapengx.yu@intel.com>

When the iavf_adapter instance is not initialized completedly in the
primary process, the secondary process accesses its "rte_eth_dev"
member, it causes secondary process crash.

This patch replaces eth_dev with eth_dev_data in iavf_adapter.

Fixes: f978c1c9b3b5 ("net/iavf: add RSS hash parsing in AVX path")
Fixes: 9c9aa0040344 ("net/iavf: add offload path for Rx AVX512 flex descriptor")
Fixes: 63660ea3ee0b ("net/iavf: add RSS hash parsing in SSE path")
Cc: stable@dpdk.org

Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Remove access to rte_eth_devices
---
 drivers/net/iavf/iavf.h                 |  9 +++---
 drivers/net/iavf/iavf_ethdev.c          | 12 ++++----
 drivers/net/iavf/iavf_fdir.c            |  4 +--
 drivers/net/iavf/iavf_hash.c            |  2 +-
 drivers/net/iavf/iavf_rxtx.h            |  4 +--
 drivers/net/iavf/iavf_rxtx_vec_avx2.c   | 13 +++++----
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 12 ++++----
 drivers/net/iavf/iavf_rxtx_vec_sse.c    |  7 +++--
 drivers/net/iavf/iavf_vchnl.c           | 37 ++++++++++++-------------
 9 files changed, 53 insertions(+), 47 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 940d4f79ec..5de43cf9af 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -228,6 +228,8 @@ struct iavf_info {
 	struct virtchnl_qos_cap_list *qos_cap;
 	struct iavf_qtc_map *qtc_map;
 	struct iavf_tm_conf tm_conf;
+
+	struct rte_eth_dev *eth_dev;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -256,7 +258,7 @@ struct iavf_devargs {
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
-	struct rte_eth_dev *eth_dev;
+	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
 
 	bool rx_bulk_alloc_allowed;
@@ -282,8 +284,6 @@ struct iavf_adapter {
 	(&(((struct iavf_vsi *)vsi)->adapter->hw))
 #define IAVF_VSI_TO_VF(vsi) \
 	(&(((struct iavf_vsi *)vsi)->adapter->vf))
-#define IAVF_VSI_TO_ETH_DEV(vsi) \
-	(((struct iavf_vsi *)vsi)->adapter->eth_dev)
 
 static inline void
 iavf_init_adminq_parameter(struct iavf_hw *hw)
@@ -397,7 +397,8 @@ int iavf_rss_hash_set(struct iavf_adapter *ad, uint64_t rss_hf, bool add);
 int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 			struct rte_ether_addr *mc_addrs,
 			uint32_t mc_addrs_num, bool add);
-int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
+int iavf_request_queues(struct rte_eth_dev *dev, struct iavf_adapter *adapter,
+			uint16_t num);
 int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
 int iavf_get_qos_cap(struct iavf_adapter *adapter);
 int iavf_set_q_tc_map(struct rte_eth_dev *dev,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 5a5a7f59e1..717fa9a8e9 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -383,8 +383,8 @@ iavf_init_rss(struct iavf_adapter *adapter)
 	uint16_t i, j, nb_q;
 	int ret;
 
-	rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
-	nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+	rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
+	nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
 		       vf->max_rss_qregion);
 
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
@@ -438,7 +438,7 @@ iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(ad);
 	int ret;
 
-	ret = iavf_request_queues(ad, num);
+	ret = iavf_request_queues(dev, ad, num);
 	if (ret) {
 		PMD_DRV_LOG(ERR, "request queues from PF failed");
 		return ret;
@@ -1388,7 +1388,7 @@ iavf_dev_rss_hash_update(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	int ret;
 
-	adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
+	adapter->dev_data->dev_conf.rx_adv_conf.rss_conf = *rss_conf;
 
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF))
 		return -ENOTSUP;
@@ -2087,6 +2087,8 @@ iavf_init_vf(struct rte_eth_dev *dev)
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
+	vf->eth_dev = dev;
+
 	err = iavf_parse_devargs(dev);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Failed to parse devargs");
@@ -2352,7 +2354,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	hw->bus.func = pci_dev->addr.function;
 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
 	hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
-	adapter->eth_dev = eth_dev;
+	adapter->dev_data = eth_dev->data;
 	adapter->stopped = 1;
 
 	if (iavf_init_vf(eth_dev) != 0) {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index ea2b692712..2e5d68f9f9 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -431,7 +431,7 @@ iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
 		}
 	}
 
-	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+	if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION, act,
 				"Invalid queue region indexes.");
@@ -511,7 +511,7 @@ iavf_fdir_parse_action(struct iavf_adapter *ad,
 			filter_action->act_conf.queue.index = act_q->index;
 
 			if (filter_action->act_conf.queue.index >=
-				ad->eth_dev->data->nb_rx_queues) {
+				ad->dev_data->nb_rx_queues) {
 				rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION,
 					actions, "Invalid queue for FDIR.");
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index e84f58d6f4..1f2d3772d1 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -1365,7 +1365,7 @@ iavf_hash_uninit(struct iavf_adapter *ad)
 	if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF))
 		return;
 
-	rss_conf = &ad->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+	rss_conf = &ad->dev_data->dev_conf.rx_adv_conf.rss_conf;
 	if (iavf_rss_hash_set(ad, rss_conf->rss_hf, false))
 		PMD_DRV_LOG(ERR, "fail to delete default RSS");
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..25d93a3561 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -577,8 +577,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
 	int i; \
-	for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
-		struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+	for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
+		struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
 		if (!rxq) \
 			continue; \
 		rxq->fdir_enabled = on; \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 96c05d9319..72a4fcab04 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -524,7 +524,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 {
 #define IAVF_DESCS_PER_LOOP_AVX 8
 
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	const uint32_t *type_table = adapter->ptype_tbl;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
 			0, rxq->mbuf_initializer);
@@ -903,9 +906,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH ||
-				rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+		if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
+		    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh7 =
 				_mm_load_si128
@@ -956,8 +958,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 					(_mm256_castsi128_si256(raw_desc_bh0),
 					raw_desc_bh1, 1);
 
-			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-					DEV_RX_OFFLOAD_RSS_HASH) {
+			if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 				/**
 				 * to shift the 32b RSS hash value to the
 				 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index cb0b057b0f..12375d3d80 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -710,8 +710,12 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 					uint8_t *split_packet,
 					bool offload)
 {
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+
 #ifdef IAVF_RX_PTYPE_OFFLOAD
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	const uint32_t *type_table = adapter->ptype_tbl;
 #endif
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
@@ -1137,8 +1141,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 			 * needs to load 2nd 16B of each desc for RSS hash parsing,
 			 * will cause performance drop to get into this context.
 			 */
-			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-			    DEV_RX_OFFLOAD_RSS_HASH ||
+			if (offloads & DEV_RX_OFFLOAD_RSS_HASH ||
 			    rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
 				/* load bottom half of every 32B desc */
 				const __m128i raw_desc_bh7 =
@@ -1190,8 +1193,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 						(_mm256_castsi128_si256(raw_desc_bh0),
 						 raw_desc_bh1, 1);
 
-				if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-						DEV_RX_OFFLOAD_RSS_HASH) {
+				if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 					/**
 					 * to shift the 32b RSS hash value to the
 					 * highest 32b of each 128b before mask
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..edb54991e2 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -644,7 +644,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 	uint16_t nb_pkts_recd;
 	int pos;
 	uint64_t var;
-	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *adapter = rxq->vsi->adapter;
+	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	const uint32_t *ptype_tbl = adapter->ptype_tbl;
 	__m128i crc_adjust = _mm_set_epi16
 				(0, 0, 0,       /* ignore non-length fields */
 				 -rxq->crc_len, /* sub crc on data_len */
@@ -817,8 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
 		 * needs to load 2nd 16B of each desc for RSS hash parsing,
 		 * will cause performance drop to get into this context.
 		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
+		if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
 			/* load bottom half of every 32B desc */
 			const __m128i raw_desc_bh3 =
 				_mm_load_si128
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 3275687927..f4fbfdc957 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -72,7 +72,6 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
-	struct rte_eth_dev *dev = adapter->eth_dev;
 	struct iavf_arq_event_info event;
 	enum iavf_aq_result result = IAVF_MSG_NON;
 	enum virtchnl_ops opcode;
@@ -114,7 +113,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 				speed = vpe->event_data.link_event.link_speed;
 				vf->link_speed = iavf_convert_link_speed(speed);
 			}
-			iavf_dev_link_update(dev, 0);
+			iavf_dev_link_update(vf->eth_dev, 0);
 			PMD_DRV_LOG(INFO, "Link status update:%s",
 					vf->link_up ? "up" : "down");
 			break;
@@ -690,8 +689,8 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 
-	queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
-	queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
 
 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
 	args.in_args = (u8 *)&queue_select;
@@ -718,8 +717,8 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	memset(&queue_select, 0, sizeof(queue_select));
 	queue_select.vsi_id = vf->vsi_res->vsi_id;
 
-	queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
-	queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+	queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+	queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
 
 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
 	args.in_args = (u8 *)&queue_select;
@@ -789,12 +788,12 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
-		adapter->eth_dev->data->nb_tx_queues;
+		adapter->dev_data->nb_tx_queues;
 
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
-		adapter->eth_dev->data->nb_rx_queues;
+		adapter->dev_data->nb_rx_queues;
 
 	args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
 	args.in_args = (u8 *)queue_select;
@@ -833,12 +832,12 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
-		adapter->eth_dev->data->nb_tx_queues;
+		adapter->dev_data->nb_tx_queues;
 
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
 	queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
-		adapter->eth_dev->data->nb_rx_queues;
+		adapter->dev_data->nb_rx_queues;
 
 	args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
 	args.in_args = (u8 *)queue_select;
@@ -969,9 +968,9 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		uint16_t num_queue_pairs, uint16_t index)
 {
 	struct iavf_rx_queue **rxq =
-		(struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
+		(struct iavf_rx_queue **)adapter->dev_data->rx_queues;
 	struct iavf_tx_queue **txq =
-		(struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
+		(struct iavf_tx_queue **)adapter->dev_data->tx_queues;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct virtchnl_vsi_queue_config_info *vc_config;
 	struct virtchnl_queue_pair_info *vc_qp;
@@ -995,7 +994,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->txq.queue_id = i;
 
 		/* Virtchnnl configure tx queues by pairs */
-		if (i < adapter->eth_dev->data->nb_tx_queues) {
+		if (i < adapter->dev_data->nb_tx_queues) {
 			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
 			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
 		}
@@ -1004,7 +1003,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->rxq.queue_id = i;
 		vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
 
-		if (i >= adapter->eth_dev->data->nb_rx_queues)
+		if (i >= adapter->dev_data->nb_rx_queues)
 			continue;
 
 		/* Virtchnnl configure rx queues by pairs */
@@ -1073,7 +1072,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 		return -ENOMEM;
 
 	map_info->num_vectors = vf->nb_msix;
-	for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+	for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) {
 		vecmap =
 		    &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
 		vecmap->vsi_id = vf->vsi_res->vsi_id;
@@ -1152,7 +1151,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		j = 0;
 		len = sizeof(struct virtchnl_ether_addr_list);
 		for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
-			addr = &adapter->eth_dev->data->mac_addrs[i];
+			addr = &adapter->dev_data->mac_addrs[i];
 			if (rte_is_zero_ether_addr(addr))
 				continue;
 			len += sizeof(struct virtchnl_ether_addr);
@@ -1169,7 +1168,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		}
 
 		for (i = begin; i < next_begin; i++) {
-			addr = &adapter->eth_dev->data->mac_addrs[i];
+			addr = &adapter->dev_data->mac_addrs[i];
 			if (rte_is_zero_ether_addr(addr))
 				continue;
 			rte_memcpy(list->list[j].addr, addr->addr_bytes,
@@ -1653,9 +1652,9 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 }
 
 int
-iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
+iavf_request_queues(struct rte_eth_dev *dev, struct iavf_adapter *adapter,
+		    uint16_t num)
 {
-	struct rte_eth_dev *dev = adapter->eth_dev;
 	struct iavf_info *vf =  IAVF_DEV_PRIVATE_TO_VF(adapter);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct virtchnl_vf_res_request vfres;
-- 
2.27.0


  parent reply	other threads:[~2021-10-09  3:25 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-28  3:37 [dpdk-dev] [PATCH] net/iavf: fix multi-process shared data dapengx.yu
2021-09-28 11:12 ` Zhang, Qi Z
2021-09-29 16:28 ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
2021-09-30  9:11   ` Yu, DapengX
2021-09-30 10:57     ` Ferruh Yigit
2021-10-07  4:50       ` Zhang, Qi Z
2021-10-09  3:25 ` dapengx.yu [this message]
2021-10-09  9:40   ` [dpdk-dev] [PATCH v2] " Zhang, Qi Z
2021-10-11  2:01   ` [dpdk-dev] [PATCH v3] " dapengx.yu
2021-10-11  2:57     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211009032519.335016-1-dapengx.yu@intel.com \
    --to=dapengx.yu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jingjing.wu@intel.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.