All of lore.kernel.org
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com,
	Beilei Xing <beilei.xing@intel.com>,
	Wenjun Wu <wenjun1.wu@intel.com>
Subject: [PATCH v5 02/15] common/idpf: add vport structure
Date: Thu,  2 Feb 2023 09:53:44 +0000	[thread overview]
Message-ID: <20230202095357.37929-3-beilei.xing@intel.com> (raw)
In-Reply-To: <20230202095357.37929-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Move idpf_vport structure to common module, remove ethdev dependency.
Also remove unused functions.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.h |  59 ++++++
 drivers/net/idpf/idpf_ethdev.c           |  10 +-
 drivers/net/idpf/idpf_ethdev.h           |  66 +-----
 drivers/net/idpf/idpf_rxtx.c             |   4 +-
 drivers/net/idpf/idpf_rxtx.h             |   3 +
 drivers/net/idpf/idpf_vchnl.c            | 252 +++--------------------
 6 files changed, 96 insertions(+), 298 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 4f548a7185..b7fff84b25 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -17,4 +17,63 @@ struct idpf_adapter {
 	uint8_t *mbx_resp; /* buffer to store the mailbox response from cp */
 };
 
+struct idpf_chunks_info {
+	uint32_t tx_start_qid;
+	uint32_t rx_start_qid;
+	/* Valid only if split queue model */
+	uint32_t tx_compl_start_qid;
+	uint32_t rx_buf_start_qid;
+
+	uint64_t tx_qtail_start;
+	uint32_t tx_qtail_spacing;
+	uint64_t rx_qtail_start;
+	uint32_t rx_qtail_spacing;
+	uint64_t tx_compl_qtail_start;
+	uint32_t tx_compl_qtail_spacing;
+	uint64_t rx_buf_qtail_start;
+	uint32_t rx_buf_qtail_spacing;
+};
+
+struct idpf_vport {
+	struct idpf_adapter *adapter; /* Backreference to associated adapter */
+	struct virtchnl2_create_vport *vport_info; /* virtchnl response info handling */
+	uint16_t sw_idx; /* SW index in adapter->vports[]*/
+	uint16_t vport_id;
+	uint32_t txq_model;
+	uint32_t rxq_model;
+	uint16_t num_tx_q;
+	/* valid only if txq_model is split Q */
+	uint16_t num_tx_complq;
+	uint16_t num_rx_q;
+	/* valid only if rxq_model is split Q */
+	uint16_t num_rx_bufq;
+
+	uint16_t max_mtu;
+	uint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+
+	enum virtchnl_rss_algorithm rss_algorithm;
+	uint16_t rss_key_size;
+	uint16_t rss_lut_size;
+
+	void *dev_data; /* Pointer to the device data */
+	uint16_t max_pkt_len; /* Maximum packet length */
+
+	/* RSS info */
+	uint32_t *rss_lut;
+	uint8_t *rss_key;
+	uint64_t rss_hf;
+
+	/* MSIX info*/
+	struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
+	uint16_t max_vectors;
+	struct virtchnl2_alloc_vectors *recv_vectors;
+
+	/* Chunk info */
+	struct idpf_chunks_info chunks_info;
+
+	uint16_t devarg_id;
+
+	bool stopped;
+};
+
 #endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 1b13d081a7..72a5c9f39b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -275,11 +275,13 @@ static int
 idpf_init_rss(struct idpf_vport *vport)
 {
 	struct rte_eth_rss_conf *rss_conf;
+	struct rte_eth_dev_data *dev_data;
 	uint16_t i, nb_q, lut_size;
 	int ret = 0;
 
-	rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
-	nb_q = vport->dev_data->nb_rx_queues;
+	dev_data = vport->dev_data;
+	rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
+	nb_q = dev_data->nb_rx_queues;
 
 	vport->rss_key = rte_zmalloc("rss_key",
 				     vport->rss_key_size, 0);
@@ -466,7 +468,7 @@ idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
 	}
 	vport->qv_map = qv_map;
 
-	if (idpf_vc_config_irq_map_unmap(vport, true) != 0) {
+	if (idpf_vc_config_irq_map_unmap(vport, dev->data->nb_rx_queues, true) != 0) {
 		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
 		goto config_irq_map_err;
 	}
@@ -582,7 +584,7 @@ idpf_dev_stop(struct rte_eth_dev *dev)
 
 	idpf_stop_queues(dev);
 
-	idpf_vc_config_irq_map_unmap(vport, false);
+	idpf_vc_config_irq_map_unmap(vport, dev->data->nb_rx_queues, false);
 
 	if (vport->recv_vectors != NULL)
 		idpf_vc_dealloc_vectors(vport);
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index e956fa989c..8c29019667 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -74,71 +74,12 @@ enum idpf_vc_result {
 	IDPF_MSG_CMD,      /* Read async command result */
 };
 
-struct idpf_chunks_info {
-	uint32_t tx_start_qid;
-	uint32_t rx_start_qid;
-	/* Valid only if split queue model */
-	uint32_t tx_compl_start_qid;
-	uint32_t rx_buf_start_qid;
-
-	uint64_t tx_qtail_start;
-	uint32_t tx_qtail_spacing;
-	uint64_t rx_qtail_start;
-	uint32_t rx_qtail_spacing;
-	uint64_t tx_compl_qtail_start;
-	uint32_t tx_compl_qtail_spacing;
-	uint64_t rx_buf_qtail_start;
-	uint32_t rx_buf_qtail_spacing;
-};
-
 struct idpf_vport_param {
 	struct idpf_adapter_ext *adapter;
 	uint16_t devarg_id; /* arg id from user */
 	uint16_t idx;       /* index in adapter->vports[]*/
 };
 
-struct idpf_vport {
-	struct idpf_adapter *adapter; /* Backreference to associated adapter */
-	struct virtchnl2_create_vport *vport_info; /* virtchnl response info handling */
-	uint16_t sw_idx; /* SW index in adapter->vports[]*/
-	uint16_t vport_id;
-	uint32_t txq_model;
-	uint32_t rxq_model;
-	uint16_t num_tx_q;
-	/* valid only if txq_model is split Q */
-	uint16_t num_tx_complq;
-	uint16_t num_rx_q;
-	/* valid only if rxq_model is split Q */
-	uint16_t num_rx_bufq;
-
-	uint16_t max_mtu;
-	uint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
-
-	enum virtchnl_rss_algorithm rss_algorithm;
-	uint16_t rss_key_size;
-	uint16_t rss_lut_size;
-
-	struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
-	uint16_t max_pkt_len; /* Maximum packet length */
-
-	/* RSS info */
-	uint32_t *rss_lut;
-	uint8_t *rss_key;
-	uint64_t rss_hf;
-
-	/* MSIX info*/
-	struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
-	uint16_t max_vectors;
-	struct virtchnl2_alloc_vectors *recv_vectors;
-
-	/* Chunk info */
-	struct idpf_chunks_info chunks_info;
-
-	uint16_t devarg_id;
-
-	bool stopped;
-};
-
 /* Struct used when parse driver specific devargs */
 struct idpf_devargs {
 	uint16_t req_vports[IDPF_MAX_VPORT_NUM];
@@ -242,15 +183,12 @@ int idpf_vc_destroy_vport(struct idpf_vport *vport);
 int idpf_vc_set_rss_key(struct idpf_vport *vport);
 int idpf_vc_set_rss_lut(struct idpf_vport *vport);
 int idpf_vc_set_rss_hash(struct idpf_vport *vport);
-int idpf_vc_config_rxqs(struct idpf_vport *vport);
-int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
-int idpf_vc_config_txqs(struct idpf_vport *vport);
-int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
 int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 		      bool rx, bool on);
 int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
-int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, bool map);
+int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
+				 uint16_t nb_rxq, bool map);
 int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
 int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
 int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 4845f2ea0a..918d156e03 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1066,7 +1066,7 @@ idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		dev->data->rx_queues[rx_queue_id];
 	int err = 0;
 
-	err = idpf_vc_config_rxq(vport, rx_queue_id);
+	err = idpf_vc_config_rxq(vport, rxq);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
 		return err;
@@ -1117,7 +1117,7 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		dev->data->tx_queues[tx_queue_id];
 	int err = 0;
 
-	err = idpf_vc_config_txq(vport, tx_queue_id);
+	err = idpf_vc_config_txq(vport, txq);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
 		return err;
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 047fc03614..9417651b3f 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -243,6 +243,9 @@ void idpf_stop_queues(struct rte_eth_dev *dev);
 void idpf_set_rx_function(struct rte_eth_dev *dev);
 void idpf_set_tx_function(struct rte_eth_dev *dev);
 
+int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
+int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
+
 #define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
 /* Helper function to convert a 32b nanoseconds timestamp to 64b. */
 static inline uint64_t
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index ca481bb915..633d3295d3 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -742,121 +742,9 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport)
 
 #define IDPF_RX_BUF_STRIDE		64
 int
-idpf_vc_config_rxqs(struct idpf_vport *vport)
-{
-	struct idpf_adapter *base = vport->adapter;
-	struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(base);
-	struct idpf_rx_queue **rxq =
-		(struct idpf_rx_queue **)vport->dev_data->rx_queues;
-	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
-	struct virtchnl2_rxq_info *rxq_info;
-	struct idpf_cmd_info args;
-	uint16_t total_qs, num_qs;
-	int size, i, j;
-	int err = 0;
-	int k = 0;
-
-	total_qs = vport->num_rx_q + vport->num_rx_bufq;
-	while (total_qs) {
-		if (total_qs > adapter->max_rxq_per_msg) {
-			num_qs = adapter->max_rxq_per_msg;
-			total_qs -= adapter->max_rxq_per_msg;
-		} else {
-			num_qs = total_qs;
-			total_qs = 0;
-		}
-
-		size = sizeof(*vc_rxqs) + (num_qs - 1) *
-			sizeof(struct virtchnl2_rxq_info);
-		vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
-		if (vc_rxqs == NULL) {
-			PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
-			err = -ENOMEM;
-			break;
-		}
-		vc_rxqs->vport_id = vport->vport_id;
-		vc_rxqs->num_qinfo = num_qs;
-		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-			for (i = 0; i < num_qs; i++, k++) {
-				rxq_info = &vc_rxqs->qinfo[i];
-				rxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;
-				rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-				rxq_info->queue_id = rxq[k]->queue_id;
-				rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-				rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
-				rxq_info->max_pkt_size = vport->max_pkt_len;
-
-				rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
-				rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-				rxq_info->ring_len = rxq[k]->nb_rx_desc;
-			}
-		} else {
-			for (i = 0; i < num_qs / 3; i++, k++) {
-				/* Rx queue */
-				rxq_info = &vc_rxqs->qinfo[i * 3];
-				rxq_info->dma_ring_addr =
-					rxq[k]->rx_ring_phys_addr;
-				rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-				rxq_info->queue_id = rxq[k]->queue_id;
-				rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-				rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
-				rxq_info->max_pkt_size = vport->max_pkt_len;
-
-				rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-				rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-				rxq_info->ring_len = rxq[k]->nb_rx_desc;
-				rxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;
-				rxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;
-				rxq_info->rx_buffer_low_watermark = 64;
-
-				/* Buffer queue */
-				for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
-					struct idpf_rx_queue *bufq = j == 1 ?
-						rxq[k]->bufq1 : rxq[k]->bufq2;
-					rxq_info = &vc_rxqs->qinfo[i * 3 + j];
-					rxq_info->dma_ring_addr =
-						bufq->rx_ring_phys_addr;
-					rxq_info->type =
-						VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
-					rxq_info->queue_id = bufq->queue_id;
-					rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-					rxq_info->data_buffer_size = bufq->rx_buf_len;
-					rxq_info->desc_ids =
-						VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-					rxq_info->ring_len = bufq->nb_rx_desc;
-
-					rxq_info->buffer_notif_stride =
-						IDPF_RX_BUF_STRIDE;
-					rxq_info->rx_buffer_low_watermark = 64;
-				}
-			}
-		}
-		memset(&args, 0, sizeof(args));
-		args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
-		args.in_args = (uint8_t *)vc_rxqs;
-		args.in_args_size = size;
-		args.out_buffer = base->mbx_resp;
-		args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-		err = idpf_execute_vc_cmd(base, &args);
-		rte_free(vc_rxqs);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
-			break;
-		}
-	}
-
-	return err;
-}
-
-int
-idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
+idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
 {
 	struct idpf_adapter *adapter = vport->adapter;
-	struct idpf_rx_queue **rxq =
-		(struct idpf_rx_queue **)vport->dev_data->rx_queues;
 	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
 	struct virtchnl2_rxq_info *rxq_info;
 	struct idpf_cmd_info args;
@@ -880,39 +768,38 @@ idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
 	vc_rxqs->num_qinfo = num_qs;
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
 		rxq_info = &vc_rxqs->qinfo[0];
-		rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-		rxq_info->queue_id = rxq[rxq_id]->queue_id;
+		rxq_info->queue_id = rxq->queue_id;
 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-		rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+		rxq_info->data_buffer_size = rxq->rx_buf_len;
 		rxq_info->max_pkt_size = vport->max_pkt_len;
 
 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
 
-		rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
+		rxq_info->ring_len = rxq->nb_rx_desc;
 	}  else {
 		/* Rx queue */
 		rxq_info = &vc_rxqs->qinfo[0];
-		rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
 		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-		rxq_info->queue_id = rxq[rxq_id]->queue_id;
+		rxq_info->queue_id = rxq->queue_id;
 		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-		rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+		rxq_info->data_buffer_size = rxq->rx_buf_len;
 		rxq_info->max_pkt_size = vport->max_pkt_len;
 
 		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
 		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
 
-		rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
-		rxq_info->rx_bufq1_id = rxq[rxq_id]->bufq1->queue_id;
-		rxq_info->rx_bufq2_id = rxq[rxq_id]->bufq2->queue_id;
+		rxq_info->ring_len = rxq->nb_rx_desc;
+		rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
+		rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
 		rxq_info->rx_buffer_low_watermark = 64;
 
 		/* Buffer queue */
 		for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
-			struct idpf_rx_queue *bufq =
-				i == 1 ? rxq[rxq_id]->bufq1 : rxq[rxq_id]->bufq2;
+			struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
 			rxq_info = &vc_rxqs->qinfo[i];
 			rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
 			rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
@@ -943,99 +830,9 @@ idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
 }
 
 int
-idpf_vc_config_txqs(struct idpf_vport *vport)
-{
-	struct idpf_adapter *base = vport->adapter;
-	struct idpf_adapter_ext *adapter = IDPF_ADAPTER_TO_EXT(base);
-	struct idpf_tx_queue **txq =
-		(struct idpf_tx_queue **)vport->dev_data->tx_queues;
-	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
-	struct virtchnl2_txq_info *txq_info;
-	struct idpf_cmd_info args;
-	uint16_t total_qs, num_qs;
-	int size, i;
-	int err = 0;
-	int k = 0;
-
-	total_qs = vport->num_tx_q + vport->num_tx_complq;
-	while (total_qs) {
-		if (total_qs > adapter->max_txq_per_msg) {
-			num_qs = adapter->max_txq_per_msg;
-			total_qs -= adapter->max_txq_per_msg;
-		} else {
-			num_qs = total_qs;
-			total_qs = 0;
-		}
-		size = sizeof(*vc_txqs) + (num_qs - 1) *
-			sizeof(struct virtchnl2_txq_info);
-		vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
-		if (vc_txqs == NULL) {
-			PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
-			err = -ENOMEM;
-			break;
-		}
-		vc_txqs->vport_id = vport->vport_id;
-		vc_txqs->num_qinfo = num_qs;
-		if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-			for (i = 0; i < num_qs; i++, k++) {
-				txq_info = &vc_txqs->qinfo[i];
-				txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
-				txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-				txq_info->queue_id = txq[k]->queue_id;
-				txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-				txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
-				txq_info->ring_len = txq[k]->nb_tx_desc;
-			}
-		} else {
-			for (i = 0; i < num_qs / 2; i++, k++) {
-				/* txq info */
-				txq_info = &vc_txqs->qinfo[2 * i];
-				txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
-				txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-				txq_info->queue_id = txq[k]->queue_id;
-				txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-				txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-				txq_info->ring_len = txq[k]->nb_tx_desc;
-				txq_info->tx_compl_queue_id =
-					txq[k]->complq->queue_id;
-				txq_info->relative_queue_id = txq_info->queue_id;
-
-				/* tx completion queue info */
-				txq_info = &vc_txqs->qinfo[2 * i + 1];
-				txq_info->dma_ring_addr =
-					txq[k]->complq->tx_ring_phys_addr;
-				txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
-				txq_info->queue_id = txq[k]->complq->queue_id;
-				txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-				txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-				txq_info->ring_len = txq[k]->complq->nb_tx_desc;
-			}
-		}
-
-		memset(&args, 0, sizeof(args));
-		args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
-		args.in_args = (uint8_t *)vc_txqs;
-		args.in_args_size = size;
-		args.out_buffer = base->mbx_resp;
-		args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-		err = idpf_execute_vc_cmd(base, &args);
-		rte_free(vc_txqs);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
-			break;
-		}
-	}
-
-	return err;
-}
-
-int
-idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
+idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
 {
 	struct idpf_adapter *adapter = vport->adapter;
-	struct idpf_tx_queue **txq =
-		(struct idpf_tx_queue **)vport->dev_data->tx_queues;
 	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
 	struct virtchnl2_txq_info *txq_info;
 	struct idpf_cmd_info args;
@@ -1060,32 +857,32 @@ idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
 
 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
 		txq_info = &vc_txqs->qinfo[0];
-		txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-		txq_info->queue_id = txq[txq_id]->queue_id;
+		txq_info->queue_id = txq->queue_id;
 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
-		txq_info->ring_len = txq[txq_id]->nb_tx_desc;
+		txq_info->ring_len = txq->nb_tx_desc;
 	} else {
 		/* txq info */
 		txq_info = &vc_txqs->qinfo[0];
-		txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-		txq_info->queue_id = txq[txq_id]->queue_id;
+		txq_info->queue_id = txq->queue_id;
 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-		txq_info->ring_len = txq[txq_id]->nb_tx_desc;
-		txq_info->tx_compl_queue_id = txq[txq_id]->complq->queue_id;
+		txq_info->ring_len = txq->nb_tx_desc;
+		txq_info->tx_compl_queue_id = txq->complq->queue_id;
 		txq_info->relative_queue_id = txq_info->queue_id;
 
 		/* tx completion queue info */
 		txq_info = &vc_txqs->qinfo[1];
-		txq_info->dma_ring_addr = txq[txq_id]->complq->tx_ring_phys_addr;
+		txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
 		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
-		txq_info->queue_id = txq[txq_id]->complq->queue_id;
+		txq_info->queue_id = txq->complq->queue_id;
 		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
 		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-		txq_info->ring_len = txq[txq_id]->complq->nb_tx_desc;
+		txq_info->ring_len = txq->complq->nb_tx_desc;
 	}
 
 	memset(&args, 0, sizeof(args));
@@ -1104,12 +901,11 @@ idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
 }
 
 int
-idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, bool map)
+idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
 {
 	struct idpf_adapter *adapter = vport->adapter;
 	struct virtchnl2_queue_vector_maps *map_info;
 	struct virtchnl2_queue_vector *vecmap;
-	uint16_t nb_rxq = vport->dev_data->nb_rx_queues;
 	struct idpf_cmd_info args;
 	int len, i, err = 0;
 
-- 
2.26.2


  parent reply	other threads:[~2023-02-02 10:20 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17  8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17  8:06   ` [PATCH v4 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18  4:00     ` Zhang, Qi Z
2023-01-18  4:10       ` Zhang, Qi Z
2023-01-17  8:06   ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17  8:06   ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31  8:11     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17  8:06   ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31  8:24     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17  8:06   ` [PATCH v4 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-17  8:06   ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17  8:06   ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17  8:06   ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17  8:06   ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02  9:53   ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02  9:53     ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02  9:53     ` beilei.xing [this message]
2023-02-02  9:53     ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02  9:53     ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02  9:53     ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02  9:53     ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02  9:53     ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02  9:53     ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02  9:53     ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02  9:53     ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02  9:53     ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02  9:53     ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02  9:53     ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43     ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03  9:43       ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03  9:43       ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03  9:43       ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03  9:43       ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03  9:43       ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03  9:43       ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03  9:43       ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03  9:43       ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03  9:43       ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03  9:43       ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43       ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03  9:43       ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03  9:43       ` [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06  2:58       ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06  6:16         ` Xing, Beilei
2023-02-06  5:45       ` [PATCH v7 " beilei.xing
2023-02-06  5:46         ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06  5:46         ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06  5:46         ` [PATCH v7 08/19] common/idpf: support get packet type beilei.xing
2023-02-06  5:46         ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06  5:46         ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06  5:46         ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06  5:46         ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06  5:46         ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06  5:46         ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06  5:46         ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06  5:46         ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06  5:46         ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06  5:46         ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15         ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230202095357.37929-3-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=wenjun1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.