All of lore.kernel.org
 help / color / mirror / Atom feed
From: Konstantin Ananyev <konstantin.ananyev@intel.com>
To: dev@dpdk.org
Subject: [PATCHv5 3/8] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
Date: Thu,  1 Oct 2015 20:54:48 +0100	[thread overview]
Message-ID: <1443729293-20753-4-git-send-email-konstantin.ananyev@intel.com> (raw)
In-Reply-To: <1443729293-20753-1-git-send-email-konstantin.ananyev@intel.com>

From: "Ananyev, Konstantin" <konstantin.ananyev@intel.com>

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++++++++++++++
 drivers/net/ixgbe/ixgbe_ethdev.h |  6 ++++
 drivers/net/ixgbe/ixgbe_rxtx.c   | 68 +++++++++++++++++++++++++---------------
 drivers/net/ixgbe/ixgbe_rxtx.h   | 21 +++++++++++++
 4 files changed, 93 insertions(+), 25 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index ec2918c..4769bb0 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {
 
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = IXGBE_MAX_RING_DESC,
+	.nb_min = IXGBE_MIN_RING_DESC,
+	.nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = IXGBE_MAX_RING_DESC,
+	.nb_min = IXGBE_MIN_RING_DESC,
+	.nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.dev_configure        = ixgbe_dev_configure,
 	.dev_start            = ixgbe_dev_start,
@@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
 	.rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
 	.filter_ctrl          = ixgbe_dev_filter_ctrl,
 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+	.rxq_info_get         = ixgbe_rxq_info_get,
+	.txq_info_get         = ixgbe_txq_info_get,
 	.timesync_enable      = ixgbe_timesync_enable,
 	.timesync_disable     = ixgbe_timesync_disable,
 	.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
@@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
 	.mac_addr_add         = ixgbevf_add_mac_addr,
 	.mac_addr_remove      = ixgbevf_remove_mac_addr,
 	.set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+	.rxq_info_get         = ixgbe_rxq_info_get,
+	.txq_info_get         = ixgbe_txq_info_get,
 	.mac_addr_set         = ixgbevf_set_default_mac_addr,
 	.get_reg_length       = ixgbevf_get_reg_length,
 	.get_reg              = ixgbevf_get_regs,
@@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
 				ETH_TXQ_FLAGS_NOOFFLOADS,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
+
 	dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
 	dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
 	dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
@@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
 				ETH_TXQ_FLAGS_NOOFFLOADS,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c3d4f4f..d16f476 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
 
 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..ba08588 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
  **********************************************************************/
 
 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
-/*
  * Create memzone for HW rings. malloc can't be used as the physical address is
  * needed. If the memzone is already created, then this function returns a ptr
  * to the old one.
@@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
-	    (nb_desc > IXGBE_MAX_RING_DESC) ||
-	    (nb_desc < IXGBE_MIN_RING_DESC)) {
+	if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+			(nb_desc > IXGBE_MAX_RING_DESC) ||
+			(nb_desc < IXGBE_MIN_RING_DESC)) {
 		return -EINVAL;
 	}
 
@@ -2374,9 +2355,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
-	    (nb_desc > IXGBE_MAX_RING_DESC) ||
-	    (nb_desc < IXGBE_MIN_RING_DESC)) {
+	if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+			(nb_desc > IXGBE_MAX_RING_DESC) ||
+			(nb_desc < IXGBE_MIN_RING_DESC)) {
 		return (-EINVAL);
 	}
 
@@ -4649,6 +4630,43 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct ixgbe_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct ixgbe_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 /*
  * [VF] Initializes Receive Unit.
  */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index b9eca67..475a800 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -34,6 +34,27 @@
 #ifndef _IXGBE_RXTX_H_
 #define _IXGBE_RXTX_H_
 
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define	IXGBE_ALIGN	128
+
+#define IXGBE_RXD_ALIGN	(IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
+#define IXGBE_TXD_ALIGN	(IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define	IXGBE_MIN_RING_DESC	32
+#define	IXGBE_MAX_RING_DESC	4096
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
-- 
1.8.3.1

  parent reply	other threads:[~2015-10-01 19:55 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-01 19:54 [PATCHv5 0/8] ethdev: add new API to retrieve RX/TX queue information Konstantin Ananyev
2015-10-01 19:54 ` [PATCHv5 1/8] " Konstantin Ananyev
2015-10-14 11:39   ` [dpdk-dev, PATCHv5, " Amine Kherbouche
2015-10-14 11:49     ` Ananyev, Konstantin
2015-10-14 12:21       ` Amine Kherbouche
2015-10-14 12:42         ` Ananyev, Konstantin
2015-10-14 12:47           ` Amine Kherbouche
2015-10-14 12:48             ` Ananyev, Konstantin
2015-10-20  7:53       ` Qiu, Michael
2015-10-20  8:09         ` Vincent JARDIN
2015-10-20  8:32           ` Qiu, Michael
2015-10-14 12:44   ` [PATCHv5 " Remy Horton
2015-10-14 16:09   ` Stephen Hemminger
2015-10-14 18:44     ` Ananyev, Konstantin
2015-10-16 13:16       ` Bruce Richardson
2015-10-19 22:06   ` [dpdk-dev,PATCHv6 0/6] Enhance queue information API Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 1/6] ethdev: enhance rte_eth_(tx|rx)q_info struct Amine Kherbouche
2015-10-19 22:44       ` Stephen Hemminger
2015-10-20  9:52         ` Ananyev, Konstantin
2015-10-20 14:55           ` Amine Kherbouche
2015-10-20  9:36       ` Ananyev, Konstantin
2015-10-20 15:16         ` Thomas Monjalon
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 2/6] testpmd: enhance the command to display RX/TX queue information Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 3/6] virtio: add support for eth_(rxq|txq)_info_get Amine Kherbouche
2015-10-20  7:31       ` Tan, Jianfeng
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 4/6] e1000: enhance eth_(rxq|txq)_info_get to retrieve more queue information Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 5/6] i40e: " Amine Kherbouche
2015-10-19 22:43       ` Stephen Hemminger
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 6/6] ixgbe: " Amine Kherbouche
2015-10-01 19:54 ` [PATCHv5 2/8] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim Konstantin Ananyev
2015-10-14 12:46   ` Remy Horton
2015-10-01 19:54 ` Konstantin Ananyev [this message]
2015-10-14 12:47   ` [PATCHv5 3/8] ixgbe: " Remy Horton
2015-10-01 19:54 ` [PATCHv5 4/8] e1000: " Konstantin Ananyev
2015-10-14 12:48   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 5/8] fm10k: add HW specific desc_lim data into dev_info Konstantin Ananyev
2015-10-14 12:48   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 6/8] cxgbe: " Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 7/8] vmxnet3: " Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 8/8] testpmd: add new command to display RX/TX queue information Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1443729293-20753-4-git-send-email-konstantin.ananyev@intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.