All of lore.kernel.org
 help / color / mirror / Atom feed
From: Konstantin Ananyev <konstantin.ananyev@intel.com>
To: dev@dpdk.org
Subject: [PATCHv5 4/8] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
Date: Thu,  1 Oct 2015 20:54:49 +0100	[thread overview]
Message-ID: <1443729293-20753-5-git-send-email-konstantin.ananyev@intel.com> (raw)
In-Reply-To: <1443729293-20753-1-git-send-email-konstantin.ananyev@intel.com>

From: "Ananyev, Konstantin" <konstantin.ananyev@intel.com>

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 drivers/net/e1000/e1000_ethdev.h | 36 ++++++++++++++++++++
 drivers/net/e1000/em_ethdev.c    | 14 ++++++++
 drivers/net/e1000/em_rxtx.c      | 71 +++++++++++++++++++++++-----------------
 drivers/net/e1000/igb_ethdev.c   | 22 +++++++++++++
 drivers/net/e1000/igb_rxtx.c     | 66 ++++++++++++++++++++++++-------------
 5 files changed, 156 insertions(+), 53 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..3c6f613 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -108,6 +108,30 @@
 	ETH_RSS_IPV6_TCP_EX | \
 	ETH_RSS_IPV6_UDP_EX)
 
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define	E1000_MIN_RING_DESC	32
+#define	E1000_MAX_RING_DESC	4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define	E1000_ALIGN	128
+
+#define	IGB_RXD_ALIGN	(E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#define	IGB_TXD_ALIGN	(E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#define	EM_RXD_ALIGN	(E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#define	EM_TXD_ALIGN	(E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
 	uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
 
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
 
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);
 
 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 912f5dd..0cbc228 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
 	.mac_addr_add         = eth_em_rar_set,
 	.mac_addr_remove      = eth_em_rar_clear,
 	.set_mc_addr_list     = eth_em_set_mc_addr_list,
+	.rxq_info_get         = em_rxq_info_get,
+	.txq_info_get         = em_txq_info_get,
 };
 
 /**
@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->max_rx_queues = 1;
 	dev_info->max_tx_queues = 1;
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = E1000_MAX_RING_DESC,
+		.nb_min = E1000_MIN_RING_DESC,
+		.nb_align = EM_RXD_ALIGN,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = E1000_MAX_RING_DESC,
+		.nb_min = E1000_MIN_RING_DESC,
+		.nb_align = EM_TXD_ALIGN,
+	};
 }
 
 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..03e1bc2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return (nb_rx);
 }
 
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #define	EM_MAX_BUF_SIZE     16384
 #define EM_RCTL_FLXBUF_STEP 1024
 
@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of EM_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
-			(nb_desc > EM_MAX_RING_DESC) ||
-			(nb_desc < EM_MIN_RING_DESC)) {
+	if (nb_desc % EM_TXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return -(EINVAL);
 	}
 
@@ -1272,7 +1252,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
+	tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
 	if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
 			socket_id)) == NULL)
 		return (-ENOMEM);
@@ -1375,11 +1355,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of EM_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
-			(nb_desc > EM_MAX_RING_DESC) ||
-			(nb_desc < EM_MIN_RING_DESC)) {
+	if (nb_desc % EM_RXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return (-EINVAL);
 	}
 
@@ -1399,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	/* Allocate RX ring for max possible mumber of hardware descriptors. */
-	rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
+	rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
 	if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
 			socket_id)) == NULL)
 		return (-ENOMEM);
@@ -1881,3 +1861,34 @@ eth_em_tx_init(struct rte_eth_dev *dev)
 	/* This write will effectively turn on the transmit unit. */
 	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct em_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct em_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 848ef6e..73c067e 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -281,6 +281,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {
 {0},
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = E1000_MAX_RING_DESC,
+	.nb_min = E1000_MIN_RING_DESC,
+	.nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = E1000_MAX_RING_DESC,
+	.nb_min = E1000_MIN_RING_DESC,
+	.nb_align = IGB_RXD_ALIGN,
+};
+
 static const struct eth_dev_ops eth_igb_ops = {
 	.dev_configure        = eth_igb_configure,
 	.dev_start            = eth_igb_start,
@@ -319,6 +331,8 @@ static const struct eth_dev_ops eth_igb_ops = {
 	.rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
 	.filter_ctrl          = eth_igb_filter_ctrl,
 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
+	.rxq_info_get         = igb_rxq_info_get,
+	.txq_info_get         = igb_txq_info_get,
 	.timesync_enable      = igb_timesync_enable,
 	.timesync_disable     = igb_timesync_disable,
 	.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -349,6 +363,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
 	.tx_queue_setup       = eth_igb_tx_queue_setup,
 	.tx_queue_release     = eth_igb_tx_queue_release,
 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
+	.rxq_info_get         = igb_rxq_info_get,
+	.txq_info_get         = igb_txq_info_get,
 	.mac_addr_set         = igbvf_default_mac_addr_set,
 	.get_reg_length       = igbvf_get_reg_length,
 	.get_reg              = igbvf_get_regs,
@@ -1570,6 +1586,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.txq_flags = 0,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 static void
@@ -1621,6 +1640,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.txq_flags = 0,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 19905fd..cca3300 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1148,25 +1148,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
-/*
  * Maximum number of Ring Descriptors.
  *
  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
  * desscriptors should meet the following condition:
  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
  */
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
 static const struct rte_memzone *
 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 		      uint16_t queue_id, uint32_t ring_size, int socket_id)
@@ -1183,10 +1170,10 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 
 #ifdef RTE_LIBRTE_XEN_DOM0
 	return rte_memzone_reserve_bounded(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
+			socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);
 #else
 	return rte_memzone_reserve_aligned(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN);
+			socket_id, 0, E1000_ALIGN);
 #endif
 }
 
@@ -1282,10 +1269,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of IGB_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
-	    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+	if (nb_desc % IGB_TXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return -EINVAL;
 	}
 
@@ -1321,7 +1309,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+	size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
 	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
 					size, socket_id);
 	if (tz == NULL) {
@@ -1430,10 +1418,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of IGB_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
-	    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+	if (nb_desc % IGB_RXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return (-EINVAL);
 	}
 
@@ -1469,7 +1458,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	 *  handle the maximum ring size is allocated in order to allow for
 	 *  resizing in later calls to the queue setup function.
 	 */
-	size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+	size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
 	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
 	if (rz == NULL) {
 		igb_rx_queue_release(rxq);
@@ -2482,3 +2471,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
 	}
 
 }
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct igb_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct igb_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}
-- 
1.8.3.1

  parent reply	other threads:[~2015-10-01 19:55 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-01 19:54 [PATCHv5 0/8] ethdev: add new API to retrieve RX/TX queue information Konstantin Ananyev
2015-10-01 19:54 ` [PATCHv5 1/8] " Konstantin Ananyev
2015-10-14 11:39   ` [dpdk-dev, PATCHv5, " Amine Kherbouche
2015-10-14 11:49     ` Ananyev, Konstantin
2015-10-14 12:21       ` Amine Kherbouche
2015-10-14 12:42         ` Ananyev, Konstantin
2015-10-14 12:47           ` Amine Kherbouche
2015-10-14 12:48             ` Ananyev, Konstantin
2015-10-20  7:53       ` Qiu, Michael
2015-10-20  8:09         ` Vincent JARDIN
2015-10-20  8:32           ` Qiu, Michael
2015-10-14 12:44   ` [PATCHv5 " Remy Horton
2015-10-14 16:09   ` Stephen Hemminger
2015-10-14 18:44     ` Ananyev, Konstantin
2015-10-16 13:16       ` Bruce Richardson
2015-10-19 22:06   ` [dpdk-dev,PATCHv6 0/6] Enhance queue information API Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 1/6] ethdev: enhance rte_eth_(tx|rx)q_info struct Amine Kherbouche
2015-10-19 22:44       ` Stephen Hemminger
2015-10-20  9:52         ` Ananyev, Konstantin
2015-10-20 14:55           ` Amine Kherbouche
2015-10-20  9:36       ` Ananyev, Konstantin
2015-10-20 15:16         ` Thomas Monjalon
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 2/6] testpmd: enhance the command to display RX/TX queue information Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 3/6] virtio: add support for eth_(rxq|txq)_info_get Amine Kherbouche
2015-10-20  7:31       ` Tan, Jianfeng
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 4/6] e1000: enhance eth_(rxq|txq)_info_get to retrieve more queue information Amine Kherbouche
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 5/6] i40e: " Amine Kherbouche
2015-10-19 22:43       ` Stephen Hemminger
2015-10-19 22:06     ` [dpdk-dev, PATCHv6 6/6] ixgbe: " Amine Kherbouche
2015-10-01 19:54 ` [PATCHv5 2/8] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim Konstantin Ananyev
2015-10-14 12:46   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 3/8] ixgbe: " Konstantin Ananyev
2015-10-14 12:47   ` Remy Horton
2015-10-01 19:54 ` Konstantin Ananyev [this message]
2015-10-14 12:48   ` [PATCHv5 4/8] e1000: " Remy Horton
2015-10-01 19:54 ` [PATCHv5 5/8] fm10k: add HW specific desc_lim data into dev_info Konstantin Ananyev
2015-10-14 12:48   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 6/8] cxgbe: " Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 7/8] vmxnet3: " Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton
2015-10-01 19:54 ` [PATCHv5 8/8] testpmd: add new command to display RX/TX queue information Konstantin Ananyev
2015-10-14 12:49   ` Remy Horton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1443729293-20753-5-git-send-email-konstantin.ananyev@intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.