All of lore.kernel.org
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, qi.z.zhang@intel.com, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH v4 11/15] common/idpf: add rxq and txq struct
Date: Tue, 17 Jan 2023 08:06:18 +0000	[thread overview]
Message-ID: <20230117080622.105657-12-beilei.xing@intel.com> (raw)
In-Reply-To: <20230117080622.105657-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

Add idpf_rxq and idpf_txq structure in common module.
Move idpf_vc_config_rxq and idpf_vc_config_txq functions
to common module.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/common/idpf/idpf_common_device.h   |   2 +
 drivers/common/idpf/idpf_common_rxtx.h     | 112 +++++++++++++
 drivers/common/idpf/idpf_common_virtchnl.c | 160 ++++++++++++++++++
 drivers/common/idpf/idpf_common_virtchnl.h |  10 +-
 drivers/common/idpf/version.map            |   2 +
 drivers/net/idpf/idpf_ethdev.h             |   2 -
 drivers/net/idpf/idpf_rxtx.h               |  97 +----------
 drivers/net/idpf/idpf_vchnl.c              | 184 ---------------------
 drivers/net/idpf/meson.build               |   1 -
 9 files changed, 284 insertions(+), 286 deletions(-)
 create mode 100644 drivers/common/idpf/idpf_common_rxtx.h
 delete mode 100644 drivers/net/idpf/idpf_vchnl.c

diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 61c47ba5f4..4895f5f360 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -18,8 +18,10 @@
 
 #define IDPF_DEFAULT_RXQ_NUM	16
 #define IDPF_RX_BUFQ_PER_GRP	2
+#define IDPF_RXQ_PER_GRP	1
 #define IDPF_DEFAULT_TXQ_NUM	16
 #define IDPF_TX_COMPLQ_PER_GRP	1
+#define IDPF_TXQ_PER_GRP	1
 
 #define IDPF_MAX_PKT_TYPE	1024
 
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
new file mode 100644
index 0000000000..a9ed31c08a
--- /dev/null
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_COMMON_RXTX_H_
+#define _IDPF_COMMON_RXTX_H_
+
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf_core.h>
+
+#include "idpf_common_device.h"
+
+struct idpf_rx_stats {
+	uint64_t mbuf_alloc_failed;
+};
+
+struct idpf_rx_queue {
+	struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
+	struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
+	const struct rte_memzone *mz;   /* memzone for Rx ring */
+	volatile void *rx_ring;
+	struct rte_mbuf **sw_ring;      /* address of SW ring */
+	uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
+
+	uint16_t nb_rx_desc;            /* ring length */
+	uint16_t rx_tail;               /* current value of tail */
+	volatile uint8_t *qrx_tail;     /* register address of tail */
+	uint16_t rx_free_thresh;        /* max free RX desc to hold */
+	uint16_t nb_rx_hold;            /* number of held free RX desc */
+	struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+
+	/* used for VPMD */
+	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
+	uint16_t rxrearm_start;    /* the idx we start the re-arming from */
+	uint64_t mbuf_initializer; /* value to init mbufs */
+
+	uint16_t rx_nb_avail;
+	uint16_t rx_next_avail;
+
+	uint16_t port_id;       /* device port ID */
+	uint16_t queue_id;      /* Rx queue index */
+	uint16_t rx_buf_len;    /* The packet buffer size */
+	uint16_t rx_hdr_len;    /* The header buffer size */
+	uint16_t max_pkt_len;   /* Maximum packet length */
+	uint8_t rxdid;
+
+	bool q_set;             /* if rx queue has been configured */
+	bool q_started;         /* if rx queue has been started */
+	bool rx_deferred_start; /* don't start this queue in dev start */
+	const struct idpf_rxq_ops *ops;
+
+	struct idpf_rx_stats rx_stats;
+
+	/* only valid for split queue mode */
+	uint8_t expected_gen_id;
+	struct idpf_rx_queue *bufq1;
+	struct idpf_rx_queue *bufq2;
+
+	uint64_t offloads;
+	uint32_t hw_register_set;
+};
+
+struct idpf_tx_entry {
+	struct rte_mbuf *mbuf;
+	uint16_t next_id;
+	uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+	const struct rte_memzone *mz;		/* memzone for Tx ring */
+	volatile struct idpf_flex_tx_desc *tx_ring;	/* Tx ring virtual address */
+	volatile union {
+		struct idpf_flex_tx_sched_desc *desc_ring;
+		struct idpf_splitq_tx_compl_desc *compl_ring;
+	};
+	uint64_t tx_ring_phys_addr;		/* Tx ring DMA address */
+	struct idpf_tx_entry *sw_ring;		/* address array of SW ring */
+
+	uint16_t nb_tx_desc;		/* ring length */
+	uint16_t tx_tail;		/* current value of tail */
+	volatile uint8_t *qtx_tail;	/* register address of tail */
+	/* number of used desc since RS bit set */
+	uint16_t nb_used;
+	uint16_t nb_free;
+	uint16_t last_desc_cleaned;	/* last desc have been cleaned*/
+	uint16_t free_thresh;
+	uint16_t rs_thresh;
+
+	uint16_t port_id;
+	uint16_t queue_id;
+	uint64_t offloads;
+	uint16_t next_dd;	/* next to set RS, for VPMD */
+	uint16_t next_rs;	/* next to check DD,  for VPMD */
+
+	bool q_set;		/* if tx queue has been configured */
+	bool q_started;		/* if tx queue has been started */
+	bool tx_deferred_start; /* don't start this queue in dev start */
+	const struct idpf_txq_ops *ops;
+
+	/* only valid for split queue mode */
+	uint16_t sw_nb_desc;
+	uint16_t sw_tail;
+	void **txqs;
+	uint32_t tx_start_qid;
+	uint8_t expected_gen_id;
+	struct idpf_tx_queue *complq;
+};
+
+#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index f670d2cc17..188d0131a4 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -805,3 +805,163 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
 	rte_free(ptype_info);
 	return err;
 }
+
+#define IDPF_RX_BUF_STRIDE		64
+int
+idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+	struct virtchnl2_rxq_info *rxq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err, i;
+
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		num_qs = IDPF_RXQ_PER_GRP;
+	else
+		num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+	size = sizeof(*vc_rxqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_rxq_info);
+	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+	if (vc_rxqs == NULL) {
+		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_rxqs->vport_id = vport->vport_id;
+	vc_rxqs->num_qinfo = num_qs;
+	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		rxq_info = &vc_rxqs->qinfo[0];
+		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+		rxq_info->queue_id = rxq->queue_id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		rxq_info->data_buffer_size = rxq->rx_buf_len;
+		rxq_info->max_pkt_size = vport->max_pkt_len;
+
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+		rxq_info->ring_len = rxq->nb_rx_desc;
+	}  else {
+		/* Rx queue */
+		rxq_info = &vc_rxqs->qinfo[0];
+		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+		rxq_info->queue_id = rxq->queue_id;
+		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+		rxq_info->data_buffer_size = rxq->rx_buf_len;
+		rxq_info->max_pkt_size = vport->max_pkt_len;
+
+		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+		rxq_info->ring_len = rxq->nb_rx_desc;
+		rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
+		rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
+		rxq_info->rx_buffer_low_watermark = 64;
+
+		/* Buffer queue */
+		for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
+			struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
+			rxq_info = &vc_rxqs->qinfo[i];
+			rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
+			rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+			rxq_info->queue_id = bufq->queue_id;
+			rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+			rxq_info->data_buffer_size = bufq->rx_buf_len;
+			rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+			rxq_info->ring_len = bufq->nb_rx_desc;
+
+			rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+			rxq_info->rx_buffer_low_watermark = 64;
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+	args.in_args = (uint8_t *)vc_rxqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	rte_free(vc_rxqs);
+	if (err != 0)
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+	return err;
+}
+
+int
+idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
+{
+	struct idpf_adapter *adapter = vport->adapter;
+	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+	struct virtchnl2_txq_info *txq_info;
+	struct idpf_cmd_info args;
+	uint16_t num_qs;
+	int size, err;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+		num_qs = IDPF_TXQ_PER_GRP;
+	else
+		num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+	size = sizeof(*vc_txqs) + (num_qs - 1) *
+		sizeof(struct virtchnl2_txq_info);
+	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+	if (vc_txqs == NULL) {
+		DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+		err = -ENOMEM;
+		return err;
+	}
+	vc_txqs->vport_id = vport->vport_id;
+	vc_txqs->num_qinfo = num_qs;
+
+	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+		txq_info = &vc_txqs->qinfo[0];
+		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+		txq_info->queue_id = txq->queue_id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+		txq_info->ring_len = txq->nb_tx_desc;
+	} else {
+		/* txq info */
+		txq_info = &vc_txqs->qinfo[0];
+		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+		txq_info->queue_id = txq->queue_id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+		txq_info->ring_len = txq->nb_tx_desc;
+		txq_info->tx_compl_queue_id = txq->complq->queue_id;
+		txq_info->relative_queue_id = txq_info->queue_id;
+
+		/* tx completion queue info */
+		txq_info = &vc_txqs->qinfo[1];
+		txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
+		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+		txq_info->queue_id = txq->complq->queue_id;
+		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+		txq_info->ring_len = txq->complq->nb_tx_desc;
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+	args.in_args = (uint8_t *)vc_txqs;
+	args.in_args_size = size;
+	args.out_buffer = adapter->mbx_resp;
+	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+	err = idpf_execute_vc_cmd(adapter, &args);
+	rte_free(vc_txqs);
+	if (err != 0)
+		DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+	return err;
+}
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 11dbc089cb..b8045ba63b 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -6,6 +6,7 @@
 #define _IDPF_COMMON_VIRTCHNL_H_
 
 #include <idpf_common_device.h>
+#include <idpf_common_rxtx.h>
 
 __rte_internal
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
@@ -31,6 +32,9 @@ __rte_internal
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
 		      uint16_t buf_len, uint8_t *buf);
 __rte_internal
+int idpf_execute_vc_cmd(struct idpf_adapter *adapter,
+			struct idpf_cmd_info *args);
+__rte_internal
 int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
 		      bool rx, bool on);
 __rte_internal
@@ -42,7 +46,7 @@ int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
 __rte_internal
 int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
 __rte_internal
-int idpf_execute_vc_cmd(struct idpf_adapter *adapter,
-			struct idpf_cmd_info *args);
-
+int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
+__rte_internal
+int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
 #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index b153647ee1..19de5c8122 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -19,6 +19,8 @@ INTERNAL {
 	idpf_vc_alloc_vectors;
 	idpf_vc_check_api_version;
 	idpf_vc_config_irq_map_unmap;
+	idpf_vc_config_rxq;
+	idpf_vc_config_txq;
 	idpf_vc_create_vport;
 	idpf_vc_dealloc_vectors;
 	idpf_vc_destroy_vport;
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index bef6199622..9b40aa4e56 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -23,8 +23,6 @@
 #define IDPF_MAX_VPORT_NUM	8
 
 #define IDPF_INVALID_VPORT_IDX	0xffff
-#define IDPF_TXQ_PER_GRP	1
-#define IDPF_RXQ_PER_GRP	1
 
 #define IDPF_DFLT_Q_VEC_NUM	1
 
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index cac6040943..b8325f9b96 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -5,6 +5,7 @@
 #ifndef _IDPF_RXTX_H_
 #define _IDPF_RXTX_H_
 
+#include <idpf_common_rxtx.h>
 #include "idpf_ethdev.h"
 
 /* MTS */
@@ -84,103 +85,10 @@
 
 extern uint64_t idpf_timestamp_dynflag;
 
-struct idpf_rx_queue {
-	struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
-	struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
-	const struct rte_memzone *mz;   /* memzone for Rx ring */
-	volatile void *rx_ring;
-	struct rte_mbuf **sw_ring;      /* address of SW ring */
-	uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
-
-	uint16_t nb_rx_desc;            /* ring length */
-	uint16_t rx_tail;               /* current value of tail */
-	volatile uint8_t *qrx_tail;     /* register address of tail */
-	uint16_t rx_free_thresh;        /* max free RX desc to hold */
-	uint16_t nb_rx_hold;            /* number of held free RX desc */
-	struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
-	struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
-	struct rte_mbuf fake_mbuf;      /* dummy mbuf */
-
-	/* used for VPMD */
-	uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
-	uint16_t rxrearm_start;    /* the idx we start the re-arming from */
-	uint64_t mbuf_initializer; /* value to init mbufs */
-
-	uint16_t rx_nb_avail;
-	uint16_t rx_next_avail;
-
-	uint16_t port_id;       /* device port ID */
-	uint16_t queue_id;      /* Rx queue index */
-	uint16_t rx_buf_len;    /* The packet buffer size */
-	uint16_t rx_hdr_len;    /* The header buffer size */
-	uint16_t max_pkt_len;   /* Maximum packet length */
-	uint8_t rxdid;
-
-	bool q_set;             /* if rx queue has been configured */
-	bool q_started;         /* if rx queue has been started */
-	bool rx_deferred_start; /* don't start this queue in dev start */
-	const struct idpf_rxq_ops *ops;
-
-	/* only valid for split queue mode */
-	uint8_t expected_gen_id;
-	struct idpf_rx_queue *bufq1;
-	struct idpf_rx_queue *bufq2;
-
-	uint64_t offloads;
-	uint32_t hw_register_set;
-};
-
-struct idpf_tx_entry {
-	struct rte_mbuf *mbuf;
-	uint16_t next_id;
-	uint16_t last_id;
-};
-
 struct idpf_tx_vec_entry {
 	struct rte_mbuf *mbuf;
 };
 
-/* Structure associated with each TX queue. */
-struct idpf_tx_queue {
-	const struct rte_memzone *mz;		/* memzone for Tx ring */
-	volatile struct idpf_flex_tx_desc *tx_ring;	/* Tx ring virtual address */
-	volatile union {
-		struct idpf_flex_tx_sched_desc *desc_ring;
-		struct idpf_splitq_tx_compl_desc *compl_ring;
-	};
-	uint64_t tx_ring_phys_addr;		/* Tx ring DMA address */
-	struct idpf_tx_entry *sw_ring;		/* address array of SW ring */
-
-	uint16_t nb_tx_desc;		/* ring length */
-	uint16_t tx_tail;		/* current value of tail */
-	volatile uint8_t *qtx_tail;	/* register address of tail */
-	/* number of used desc since RS bit set */
-	uint16_t nb_used;
-	uint16_t nb_free;
-	uint16_t last_desc_cleaned;	/* last desc have been cleaned*/
-	uint16_t free_thresh;
-	uint16_t rs_thresh;
-
-	uint16_t port_id;
-	uint16_t queue_id;
-	uint64_t offloads;
-	uint16_t next_dd;	/* next to set RS, for VPMD */
-	uint16_t next_rs;	/* next to check DD,  for VPMD */
-
-	bool q_set;		/* if tx queue has been configured */
-	bool q_started;		/* if tx queue has been started */
-	bool tx_deferred_start; /* don't start this queue in dev start */
-	const struct idpf_txq_ops *ops;
-
-	/* only valid for split queue mode */
-	uint16_t sw_nb_desc;
-	uint16_t sw_tail;
-	void **txqs;
-	uint32_t tx_start_qid;
-	uint8_t expected_gen_id;
-	struct idpf_tx_queue *complq;
-};
-
 /* Offload features */
 union idpf_tx_offload {
 	uint64_t data;
@@ -239,9 +147,6 @@ void idpf_stop_queues(struct rte_eth_dev *dev);
 void idpf_set_rx_function(struct rte_eth_dev *dev);
 void idpf_set_tx_function(struct rte_eth_dev *dev);
 
-int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
-int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
-
 #define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
 /* Helper function to convert a 32b nanoseconds timestamp to 64b. */
 static inline uint64_t
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
deleted file mode 100644
index 45d05ed108..0000000000
--- a/drivers/net/idpf/idpf_vchnl.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <rte_byteorder.h>
-#include <rte_common.h>
-
-#include <rte_debug.h>
-#include <rte_atomic.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
-#include <ethdev_driver.h>
-#include <ethdev_pci.h>
-#include <rte_dev.h>
-
-#include "idpf_ethdev.h"
-#include "idpf_rxtx.h"
-
-#define IDPF_RX_BUF_STRIDE		64
-int
-idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
-{
-	struct idpf_adapter *adapter = vport->adapter;
-	struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
-	struct virtchnl2_rxq_info *rxq_info;
-	struct idpf_cmd_info args;
-	uint16_t num_qs;
-	int size, err, i;
-
-	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-		num_qs = IDPF_RXQ_PER_GRP;
-	else
-		num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
-
-	size = sizeof(*vc_rxqs) + (num_qs - 1) *
-		sizeof(struct virtchnl2_rxq_info);
-	vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
-	if (vc_rxqs == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
-		err = -ENOMEM;
-		return err;
-	}
-	vc_rxqs->vport_id = vport->vport_id;
-	vc_rxqs->num_qinfo = num_qs;
-	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-		rxq_info = &vc_rxqs->qinfo[0];
-		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
-		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-		rxq_info->queue_id = rxq->queue_id;
-		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-		rxq_info->data_buffer_size = rxq->rx_buf_len;
-		rxq_info->max_pkt_size = vport->max_pkt_len;
-
-		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
-		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-		rxq_info->ring_len = rxq->nb_rx_desc;
-	}  else {
-		/* Rx queue */
-		rxq_info = &vc_rxqs->qinfo[0];
-		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
-		rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-		rxq_info->queue_id = rxq->queue_id;
-		rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-		rxq_info->data_buffer_size = rxq->rx_buf_len;
-		rxq_info->max_pkt_size = vport->max_pkt_len;
-
-		rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-		rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-		rxq_info->ring_len = rxq->nb_rx_desc;
-		rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
-		rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
-		rxq_info->rx_buffer_low_watermark = 64;
-
-		/* Buffer queue */
-		for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
-			struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : rxq->bufq2;
-			rxq_info = &vc_rxqs->qinfo[i];
-			rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
-			rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
-			rxq_info->queue_id = bufq->queue_id;
-			rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-			rxq_info->data_buffer_size = bufq->rx_buf_len;
-			rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-			rxq_info->ring_len = bufq->nb_rx_desc;
-
-			rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
-			rxq_info->rx_buffer_low_watermark = 64;
-		}
-	}
-
-	memset(&args, 0, sizeof(args));
-	args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
-	args.in_args = (uint8_t *)vc_rxqs;
-	args.in_args_size = size;
-	args.out_buffer = adapter->mbx_resp;
-	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-	err = idpf_execute_vc_cmd(adapter, &args);
-	rte_free(vc_rxqs);
-	if (err != 0)
-		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
-
-	return err;
-}
-
-int
-idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
-{
-	struct idpf_adapter *adapter = vport->adapter;
-	struct virtchnl2_config_tx_queues *vc_txqs = NULL;
-	struct virtchnl2_txq_info *txq_info;
-	struct idpf_cmd_info args;
-	uint16_t num_qs;
-	int size, err;
-
-	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-		num_qs = IDPF_TXQ_PER_GRP;
-	else
-		num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
-
-	size = sizeof(*vc_txqs) + (num_qs - 1) *
-		sizeof(struct virtchnl2_txq_info);
-	vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
-	if (vc_txqs == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
-		err = -ENOMEM;
-		return err;
-	}
-	vc_txqs->vport_id = vport->vport_id;
-	vc_txqs->num_qinfo = num_qs;
-
-	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-		txq_info = &vc_txqs->qinfo[0];
-		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
-		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-		txq_info->queue_id = txq->queue_id;
-		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
-		txq_info->ring_len = txq->nb_tx_desc;
-	} else {
-		/* txq info */
-		txq_info = &vc_txqs->qinfo[0];
-		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
-		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-		txq_info->queue_id = txq->queue_id;
-		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-		txq_info->ring_len = txq->nb_tx_desc;
-		txq_info->tx_compl_queue_id = txq->complq->queue_id;
-		txq_info->relative_queue_id = txq_info->queue_id;
-
-		/* tx completion queue info */
-		txq_info = &vc_txqs->qinfo[1];
-		txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
-		txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
-		txq_info->queue_id = txq->complq->queue_id;
-		txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-		txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-		txq_info->ring_len = txq->complq->nb_tx_desc;
-	}
-
-	memset(&args, 0, sizeof(args));
-	args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
-	args.in_args = (uint8_t *)vc_txqs;
-	args.in_args_size = size;
-	args.out_buffer = adapter->mbx_resp;
-	args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-	err = idpf_execute_vc_cmd(adapter, &args);
-	rte_free(vc_txqs);
-	if (err != 0)
-		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
-
-	return err;
-}
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index 650dade0b9..378925166f 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -18,7 +18,6 @@ deps += ['common_idpf']
 sources = files(
         'idpf_ethdev.c',
         'idpf_rxtx.c',
-        'idpf_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.26.2


  parent reply	other threads:[~2023-01-17  8:32 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/project/dpdk/cover/20230117072626.93796-1-beilei.xing@intel.com/>
2023-01-17  8:06 ` [PATCH v4 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-17  8:06   ` [PATCH v4 01/15] common/idpf: add adapter structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 02/15] common/idpf: add vport structure beilei.xing
2023-01-17  8:06   ` [PATCH v4 03/15] common/idpf: add virtual channel functions beilei.xing
2023-01-18  4:00     ` Zhang, Qi Z
2023-01-18  4:10       ` Zhang, Qi Z
2023-01-17  8:06   ` [PATCH v4 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-17  8:06   ` [PATCH v4 06/15] common/idpf: add config RSS beilei.xing
2023-01-17  8:06   ` [PATCH v4 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-31  8:11     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 08/15] common/idpf: support get packet type beilei.xing
2023-01-17  8:06   ` [PATCH v4 09/15] common/idpf: add vport info initialization beilei.xing
2023-01-31  8:24     ` Wu, Jingjing
2023-01-17  8:06   ` [PATCH v4 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-17  8:06   ` beilei.xing [this message]
2023-01-17  8:06   ` [PATCH v4 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-17  8:06   ` [PATCH v4 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-01-17  8:06   ` [PATCH v4 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-17  8:06   ` [PATCH v4 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-02  9:53   ` [PATCH v5 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-02-02  9:53     ` [PATCH v5 01/15] common/idpf: add adapter structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 02/15] common/idpf: add vport structure beilei.xing
2023-02-02  9:53     ` [PATCH v5 03/15] common/idpf: add virtual channel functions beilei.xing
2023-02-02  9:53     ` [PATCH v5 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 05/15] common/idpf: add vport init/deinit beilei.xing
2023-02-02  9:53     ` [PATCH v5 06/15] common/idpf: add config RSS beilei.xing
2023-02-02  9:53     ` [PATCH v5 07/15] common/idpf: add irq map/unmap beilei.xing
2023-02-02  9:53     ` [PATCH v5 08/15] common/idpf: support get packet type beilei.xing
2023-02-02  9:53     ` [PATCH v5 09/15] common/idpf: add vport info initialization beilei.xing
2023-02-02  9:53     ` [PATCH v5 10/15] common/idpf: add vector flags in vport beilei.xing
2023-02-02  9:53     ` [PATCH v5 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-02-02  9:53     ` [PATCH v5 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-02  9:53     ` [PATCH v5 13/15] common/idpf: add Rx and Tx data path beilei.xing
2023-02-02  9:53     ` [PATCH v5 14/15] common/idpf: add vec queue setup beilei.xing
2023-02-02  9:53     ` [PATCH v5 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43     ` [PATCH v6 00/19] net/idpf: introduce idpf common modle beilei.xing
2023-02-03  9:43       ` [PATCH v6 01/19] common/idpf: add adapter structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 02/19] common/idpf: add vport structure beilei.xing
2023-02-03  9:43       ` [PATCH v6 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-03  9:43       ` [PATCH v6 06/19] common/idpf: add config RSS beilei.xing
2023-02-03  9:43       ` [PATCH v6 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-03  9:43       ` [PATCH v6 08/19] common/idpf: support get packet type beilei.xing
2023-02-03  9:43       ` [PATCH v6 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-03  9:43       ` [PATCH v6 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-03  9:43       ` [PATCH v6 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-03  9:43       ` [PATCH v6 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-03  9:43       ` [PATCH v6 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-03  9:43       ` [PATCH v6 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-03  9:43       ` [PATCH v6 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-03  9:43       ` [PATCH v6 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-03  9:43       ` [PATCH v6 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-03  9:43       ` [PATCH v6 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-03  9:43       ` [PATCH v6 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06  2:58       ` [PATCH v6 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z
2023-02-06  6:16         ` Xing, Beilei
2023-02-06  5:45       ` [PATCH v7 " beilei.xing
2023-02-06  5:46         ` [PATCH v7 01/19] common/idpf: add adapter structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 02/19] common/idpf: add vport structure beilei.xing
2023-02-06  5:46         ` [PATCH v7 03/19] common/idpf: add virtual channel functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 04/19] common/idpf: introduce adapter init and deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 05/19] common/idpf: add vport init/deinit beilei.xing
2023-02-06  5:46         ` [PATCH v7 06/19] common/idpf: add config RSS beilei.xing
2023-02-06  5:46         ` [PATCH v7 07/19] common/idpf: add irq map/unmap beilei.xing
2023-02-06  5:46         ` [PATCH v7 08/19] common/idpf: support get packet type beilei.xing
2023-02-06  5:46         ` [PATCH v7 09/19] common/idpf: add vport info initialization beilei.xing
2023-02-06  5:46         ` [PATCH v7 10/19] common/idpf: add vector flags in vport beilei.xing
2023-02-06  5:46         ` [PATCH v7 11/19] common/idpf: add rxq and txq struct beilei.xing
2023-02-06  5:46         ` [PATCH v7 12/19] common/idpf: add help functions for queue setup and release beilei.xing
2023-02-06  5:46         ` [PATCH v7 13/19] common/idpf: add Rx and Tx data path beilei.xing
2023-02-06  5:46         ` [PATCH v7 14/19] common/idpf: add vec queue setup beilei.xing
2023-02-06  5:46         ` [PATCH v7 15/19] common/idpf: add avx512 for single queue model beilei.xing
2023-02-06  5:46         ` [PATCH v7 16/19] common/idpf: refine API name for vport functions beilei.xing
2023-02-06  5:46         ` [PATCH v7 17/19] common/idpf: refine API name for queue config module beilei.xing
2023-02-06  5:46         ` [PATCH v7 18/19] common/idpf: refine API name for data path module beilei.xing
2023-02-06  5:46         ` [PATCH v7 19/19] common/idpf: refine API name for virtual channel functions beilei.xing
2023-02-06 13:15         ` [PATCH v7 00/19] net/idpf: introduce idpf common modle Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230117080622.105657-12-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.