All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ravi Kumar <Ravi1.kumar@amd.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com
Subject: [PATCH v4 09/17] net/axgbe: add DMA programming and dev start and stop apis
Date: Thu,  5 Apr 2018 02:39:41 -0400	[thread overview]
Message-ID: <1522910389-35530-9-git-send-email-Ravi1.kumar@amd.com> (raw)
In-Reply-To: <1522910389-35530-1-git-send-email-Ravi1.kumar@amd.com>

This patch adds support to program DMA and DPDK device start
and stop apis.

Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
 drivers/net/axgbe/axgbe_dev.c    | 844 +++++++++++++++++++++++++++++++++++++++
 drivers/net/axgbe/axgbe_ethdev.c |  96 +++++
 2 files changed, 940 insertions(+)

diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
index 82fe7bf..d05f9c8 100644
--- a/drivers/net/axgbe/axgbe_dev.c
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -6,6 +6,13 @@
 #include "axgbe_ethdev.h"
 #include "axgbe_common.h"
 #include "axgbe_phy.h"
+#include "axgbe_rxtx.h"
+
+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
+{
+	return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
+		ETHER_CRC_LEN + VLAN_HLEN;
+}
 
 /* query busy bit */
 static int mdio_complete(struct axgbe_port *pdata)
@@ -212,6 +219,191 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
 	return 0;
 }
 
+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+{
+	unsigned int max_q_count, q_count;
+	unsigned int reg, reg_val;
+	unsigned int i;
+
+	/* Clear MTL flow control */
+	for (i = 0; i < pdata->rx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+	/* Clear MAC flow control */
+	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+	q_count = RTE_MIN(pdata->tx_q_count,
+			max_q_count);
+	reg = MAC_Q0TFCR;
+	for (i = 0; i < q_count; i++) {
+		reg_val = AXGMAC_IOREAD(pdata, reg);
+		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+		AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+		reg += MAC_QTFCR_INC;
+	}
+
+	return 0;
+}
+
+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+{
+	unsigned int max_q_count, q_count;
+	unsigned int reg, reg_val;
+	unsigned int i;
+
+	/* Set MTL flow control */
+	for (i = 0; i < pdata->rx_q_count; i++) {
+		unsigned int ehfc = 0;
+
+		/* Flow control thresholds are established */
+		if (pdata->rx_rfd[i])
+			ehfc = 1;
+
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+	}
+
+	/* Set MAC flow control */
+	max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+	q_count = RTE_MIN(pdata->tx_q_count,
+			max_q_count);
+	reg = MAC_Q0TFCR;
+	for (i = 0; i < q_count; i++) {
+		reg_val = AXGMAC_IOREAD(pdata, reg);
+
+		/* Enable transmit flow control */
+		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+		/* Set pause time */
+		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+		AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+		reg += MAC_QTFCR_INC;
+	}
+
+	return 0;
+}
+
+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
+{
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+	return 0;
+}
+
+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
+{
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+	return 0;
+}
+
+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
+{
+	if (pdata->tx_pause)
+		axgbe_enable_tx_flow_control(pdata);
+	else
+		axgbe_disable_tx_flow_control(pdata);
+
+	return 0;
+}
+
+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
+{
+	if (pdata->rx_pause)
+		axgbe_enable_rx_flow_control(pdata);
+	else
+		axgbe_disable_rx_flow_control(pdata);
+
+	return 0;
+}
+
+static void axgbe_config_flow_control(struct axgbe_port *pdata)
+{
+	axgbe_config_tx_flow_control(pdata);
+	axgbe_config_rx_flow_control(pdata);
+
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
+}
+
+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
+					       unsigned int queue,
+					       unsigned int q_fifo_size)
+{
+	unsigned int frame_fifo_size;
+	unsigned int rfa, rfd;
+
+	frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
+
+	/* This path deals with just maximum frame sizes which are
+	 * limited to a jumbo frame of 9,000 (plus headers, etc.)
+	 * so we can never exceed the maximum allowable RFA/RFD
+	 * values.
+	 */
+	if (q_fifo_size <= 2048) {
+		/* rx_rfd to zero to signal no flow control */
+		pdata->rx_rfa[queue] = 0;
+		pdata->rx_rfd[queue] = 0;
+		return;
+	}
+
+	if (q_fifo_size <= 4096) {
+		/* Between 2048 and 4096 */
+		pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
+		pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
+		return;
+	}
+
+	if (q_fifo_size <= frame_fifo_size) {
+		/* Between 4096 and max-frame */
+		pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
+		pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
+		return;
+	}
+
+	if (q_fifo_size <= (frame_fifo_size * 3)) {
+		/* Between max-frame and 3 max-frames,
+		 * trigger if we get just over a frame of data and
+		 * resume when we have just under half a frame left.
+		 */
+		rfa = q_fifo_size - frame_fifo_size;
+		rfd = rfa + (frame_fifo_size / 2);
+	} else {
+		/* Above 3 max-frames - trigger when just over
+		 * 2 frames of space available
+		 */
+		rfa = frame_fifo_size * 2;
+		rfa += AXGMAC_FLOW_CONTROL_UNIT;
+		rfd = rfa + frame_fifo_size;
+	}
+
+	pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
+	pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
+{
+	unsigned int q_fifo_size;
+	unsigned int i;
+
+	for (i = 0; i < pdata->rx_q_count; i++) {
+		q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
+
+		axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+	}
+}
+
+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->rx_q_count; i++) {
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+					pdata->rx_rfa[i]);
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+					pdata->rx_rfd[i]);
+	}
+}
+
 static int __axgbe_exit(struct axgbe_port *pdata)
 {
 	unsigned int count = 2000;
@@ -244,10 +436,659 @@ static int axgbe_exit(struct axgbe_port *pdata)
 	return __axgbe_exit(pdata);
 }
 
+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
+{
+	unsigned int i, count;
+
+	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+		return 0;
+
+	for (i = 0; i < pdata->tx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+	/* Poll Until Poll Condition */
+	for (i = 0; i < pdata->tx_q_count; i++) {
+		count = 2000;
+		while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
+							 MTL_Q_TQOMR, FTQ))
+			rte_delay_us(500);
+
+		if (!count)
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void axgbe_config_dma_bus(struct axgbe_port *pdata)
+{
+	/* Set enhanced addressing mode */
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+	/* Out standing read/write requests*/
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
+
+	/* Set the System Bus mode */
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
+	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
+}
+
+static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+{
+	unsigned int arcache, awcache, arwcache;
+
+	arcache = 0;
+	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
+	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+	awcache = 0;
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
+	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+
+	arwcache = 0;
+	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
+	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+}
+
+static void axgbe_config_edma_control(struct axgbe_port *pdata)
+{
+	AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
+	AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
+}
+
+static int axgbe_config_osp_mode(struct axgbe_port *pdata)
+{
+	/* Force DMA to operate on second packet before closing descriptors
+	 *  of first packet
+	 */
+	struct axgbe_tx_queue *txq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
+					pdata->tx_osp_mode);
+	}
+
+	return 0;
+}
+
+static int axgbe_config_pblx8(struct axgbe_port *pdata)
+{
+	struct axgbe_tx_queue *txq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
+					pdata->pblx8);
+	}
+	return 0;
+}
+
+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
+{
+	struct axgbe_tx_queue *txq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
+				pdata->tx_pbl);
+	}
+
+	return 0;
+}
+
+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
+{
+	struct axgbe_rx_queue *rxq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+		rxq = pdata->eth_dev->data->rx_queues[i];
+		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
+				pdata->rx_pbl);
+	}
+
+	return 0;
+}
+
+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
+{
+	struct axgbe_rx_queue *rxq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+		rxq = pdata->eth_dev->data->rx_queues[i];
+
+		rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+			RTE_PKTMBUF_HEADROOM;
+		rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
+			~(AXGBE_RX_BUF_ALIGN - 1);
+
+		if (rxq->buf_size > pdata->rx_buf_size)
+			pdata->rx_buf_size = rxq->buf_size;
+
+		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
+					rxq->buf_size);
+	}
+}
+
+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
+			       unsigned int index, unsigned int val)
+{
+	unsigned int wait;
+
+	if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+		return -EBUSY;
+
+	AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+	wait = 1000;
+	while (wait--) {
+		if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+			return 0;
+
+		rte_delay_us(1500);
+	}
+
+	return -EBUSY;
+}
+
+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
+{
+	struct rte_eth_rss_conf *rss_conf;
+	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+	unsigned int *key;
+	int ret;
+
+	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+	if (!rss_conf->rss_key)
+		key = (unsigned int *)&pdata->rss_key;
+	else
+		key = (unsigned int *)&rss_conf->rss_key;
+
+	while (key_regs--) {
+		ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
+					  key_regs, *key++);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+		ret = axgbe_write_rss_reg(pdata,
+					  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
+					  pdata->rss_table[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int axgbe_enable_rss(struct axgbe_port *pdata)
+{
+	int ret;
+
+	/* Program the hash key */
+	ret = axgbe_write_rss_hash_key(pdata);
+	if (ret)
+		return ret;
+
+	/* Program the lookup table */
+	ret = axgbe_write_rss_lookup_table(pdata);
+	if (ret)
+		return ret;
+
+	/* Set the RSS options */
+	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+	/* Enable RSS */
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+	return 0;
+}
+
+static void axgbe_rss_options(struct axgbe_port *pdata)
+{
+	struct rte_eth_rss_conf *rss_conf;
+	uint64_t rss_hf;
+
+	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+	rss_hf = rss_conf->rss_hf;
+
+	if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+	if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+}
+
+static int axgbe_config_rss(struct axgbe_port *pdata)
+{
+	uint32_t i;
+
+	if (pdata->rss_enable) {
+		/* Initialize RSS hash key and lookup table */
+		uint32_t *key = (uint32_t *)pdata->rss_key;
+
+		for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
+			*key++ = (uint32_t)rte_rand();
+		for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
+			AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+					i % pdata->eth_dev->data->nb_rx_queues);
+		axgbe_rss_options(pdata);
+		if (axgbe_enable_rss(pdata)) {
+			PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+			return -1;
+		}
+	} else {
+		AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+	}
+
+	return 0;
+}
+
+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
+{
+	struct axgbe_tx_queue *txq;
+	unsigned int dma_ch_isr, dma_ch_ier;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+
+		/* Clear all the interrupts which are set */
+		dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
+		AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
+
+		/* Clear all interrupt enable bits */
+		dma_ch_ier = 0;
+
+		/* Enable following interrupts
+		 *   NIE  - Normal Interrupt Summary Enable
+		 *   AIE  - Abnormal Interrupt Summary Enable
+		 *   FBEE - Fatal Bus Error Enable
+		 */
+		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
+		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+		/* Enable following Rx interrupts
+		 *   RBUE - Receive Buffer Unavailable Enable
+		 *   RIE  - Receive Interrupt Enable (unless using
+		 *          per channel interrupts in edge triggered
+		 *          mode)
+		 */
+		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+
+		AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
+	}
+}
+
+static void wrapper_tx_desc_init(struct axgbe_port *pdata)
+{
+	struct axgbe_tx_queue *txq;
+	unsigned int i;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+		txq = pdata->eth_dev->data->tx_queues[i];
+		txq->cur = 0;
+		txq->dirty = 0;
+		/* Update the total number of Tx descriptors */
+		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
+		/* Update the starting address of descriptor ring */
+		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
+					high32_value(txq->ring_phys_addr));
+		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
+					low32_value(txq->ring_phys_addr));
+	}
+}
+
+static int wrapper_rx_desc_init(struct axgbe_port *pdata)
+{
+	struct axgbe_rx_queue *rxq;
+	struct rte_mbuf *mbuf;
+	volatile union axgbe_rx_desc *desc;
+	unsigned int i, j;
+
+	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+		rxq = pdata->eth_dev->data->rx_queues[i];
+
+		/* Initialize software ring entries */
+		rxq->mbuf_alloc = 0;
+		rxq->cur = 0;
+		rxq->dirty = 0;
+		desc = AXGBE_GET_DESC_PT(rxq, 0);
+
+		for (j = 0; j < rxq->nb_desc; j++) {
+			mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+			if (mbuf == NULL) {
+				PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+					    (unsigned int)rxq->queue_id, j);
+				axgbe_dev_rx_queue_release(rxq);
+				return -ENOMEM;
+			}
+			rxq->sw_ring[j] = mbuf;
+			/* Mbuf populate */
+			mbuf->next = NULL;
+			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+			mbuf->nb_segs = 1;
+			mbuf->port = rxq->port_id;
+			desc->read.baddr =
+				rte_cpu_to_le_64(
+					rte_mbuf_data_iova_default(mbuf));
+			rte_wmb();
+			AXGMAC_SET_BITS_LE(desc->read.desc3,
+						RX_NORMAL_DESC3, OWN, 1);
+			rte_wmb();
+			rxq->mbuf_alloc++;
+			desc++;
+		}
+		/* Update the total number of Rx descriptors */
+		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
+					rxq->nb_desc - 1);
+		/* Update the starting address of descriptor ring */
+		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
+					high32_value(rxq->ring_phys_addr));
+		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
+					low32_value(rxq->ring_phys_addr));
+		/* Update the Rx Descriptor Tail Pointer */
+		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+				   low32_value(rxq->ring_phys_addr +
+				   (rxq->nb_desc - 1) *
+				   sizeof(union axgbe_rx_desc)));
+	}
+	return 0;
+}
+
+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
+{
+	unsigned int i;
+
+	/* Set Tx to weighted round robin scheduling algorithm */
+	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+	/* Set Tx traffic classes to use WRR algorithm with equal weights */
+	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+				MTL_TSA_ETS);
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+	}
+
+	/* Set Rx to strict priority algorithm */
+	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->tx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+	return 0;
+}
+
+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->rx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+	return 0;
+}
+
+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
+				     unsigned int val)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->tx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+	return 0;
+}
+
+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+				     unsigned int val)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->rx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+	return 0;
+}
+
+/*Distrubting fifo size  */
+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+{
+	unsigned int fifo_size;
+	unsigned int q_fifo_size;
+	unsigned int p_fifo, i;
+
+	fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+			  pdata->hw_feat.rx_fifo_size);
+	q_fifo_size = fifo_size / pdata->rx_q_count;
+
+	/* Calculate the fifo setting by dividing the queue's fifo size
+	 * by the fifo allocation increment (with 0 representing the
+	 * base allocation increment so decrement the result
+	 * by 1).
+	 */
+	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+	if (p_fifo)
+		p_fifo--;
+
+	for (i = 0; i < pdata->rx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
+	pdata->fifo = p_fifo;
+
+	/*Calculate and config Flow control threshold*/
+	axgbe_calculate_flow_control_threshold(pdata);
+	axgbe_config_flow_control_threshold(pdata);
+}
+
+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
+{
+	unsigned int fifo_size;
+	unsigned int q_fifo_size;
+	unsigned int p_fifo, i;
+
+	fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+				pdata->hw_feat.tx_fifo_size);
+	q_fifo_size = fifo_size / pdata->tx_q_count;
+
+	/* Calculate the fifo setting by dividing the queue's fifo size
+	 * by the fifo allocation increment (with 0 representing the
+	 * base allocation increment so decrement the result
+	 * by 1).
+	 */
+	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+	if (p_fifo)
+		p_fifo--;
+
+	for (i = 0; i < pdata->tx_q_count; i++)
+		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
+}
+
+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
+{
+	unsigned int qptc, qptc_extra, queue;
+	unsigned int i, j, reg, reg_val;
+
+	/* Map the MTL Tx Queues to Traffic Classes
+	 *   Note: Tx Queues >= Traffic Classes
+	 */
+	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+		for (j = 0; j < qptc; j++)
+			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+						Q2TCMAP, i);
+		if (i < qptc_extra)
+			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+						Q2TCMAP, i);
+	}
+
+	if (pdata->rss_enable) {
+		/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+		reg = MTL_RQDCM0R;
+		reg_val = 0;
+		for (i = 0; i < pdata->rx_q_count;) {
+			reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+			if ((i % MTL_RQDCM_Q_PER_REG) &&
+			    (i != pdata->rx_q_count))
+				continue;
+
+			AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+			reg += MTL_RQDCM_INC;
+			reg_val = 0;
+		}
+	}
+}
+
+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
+{
+	unsigned int mtl_q_isr;
+	unsigned int q_count, i;
+
+	q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+	for (i = 0; i < q_count; i++) {
+		/* Clear all the interrupts which are set */
+		mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+		/* No MTL interrupts to be enabled */
+		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+	}
+}
+
+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
+{
+	unsigned int mac_addr_hi, mac_addr_lo;
+
+	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
+	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+		(addr[1] <<  8) | (addr[0] <<  0);
+
+	AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+	AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+	return 0;
+}
+
+static void axgbe_config_mac_address(struct axgbe_port *pdata)
+{
+	axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
+}
+
+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
+{
+	unsigned int val;
+
+	val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
+	val = 1;
+
+	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void axgbe_config_mac_speed(struct axgbe_port *pdata)
+{
+	axgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
+{
+	if (pdata->rx_csum_enable)
+		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+	else
+		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+}
+
+static int axgbe_init(struct axgbe_port *pdata)
+{
+	int ret;
+
+	/* Flush Tx queues */
+	ret = axgbe_flush_tx_queues(pdata);
+	if (ret)
+		return ret;
+	/* Initialize DMA related features */
+	axgbe_config_dma_bus(pdata);
+	axgbe_config_dma_cache(pdata);
+	axgbe_config_edma_control(pdata);
+	axgbe_config_osp_mode(pdata);
+	axgbe_config_pblx8(pdata);
+	axgbe_config_tx_pbl_val(pdata);
+	axgbe_config_rx_pbl_val(pdata);
+	axgbe_config_rx_buffer_size(pdata);
+	axgbe_config_rss(pdata);
+	wrapper_tx_desc_init(pdata);
+	ret = wrapper_rx_desc_init(pdata);
+	if (ret)
+		return ret;
+	axgbe_enable_dma_interrupts(pdata);
+
+	/* Initialize MTL related features */
+	axgbe_config_mtl_mode(pdata);
+	axgbe_config_queue_mapping(pdata);
+	axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+	axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+	axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+	axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+	axgbe_config_tx_fifo_size(pdata);
+	axgbe_config_rx_fifo_size(pdata);
+
+	axgbe_enable_mtl_interrupts(pdata);
+
+	/* Initialize MAC related features */
+	axgbe_config_mac_address(pdata);
+	axgbe_config_jumbo_enable(pdata);
+	axgbe_config_flow_control(pdata);
+	axgbe_config_mac_speed(pdata);
+	axgbe_config_checksum_offload(pdata);
+
+	return 0;
+}
+
 void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
 {
 	hw_if->exit = axgbe_exit;
+	hw_if->config_flow_control = axgbe_config_flow_control;
 
+	hw_if->init = axgbe_init;
 
 	hw_if->read_mmd_regs = axgbe_read_mmd_regs;
 	hw_if->write_mmd_regs = axgbe_write_mmd_regs;
@@ -257,4 +1098,7 @@ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
 	hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
 	hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
 	hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+	/* For FLOW ctrl */
+	hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
+	hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
 }
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
index 3b5f1ae..f8cfbd8 100644
--- a/drivers/net/axgbe/axgbe_ethdev.c
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -10,6 +10,9 @@
 
 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int  axgbe_dev_configure(struct rte_eth_dev *dev);
+static int  axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
 static void axgbe_dev_interrupt_handler(void *param);
 static void axgbe_dev_close(struct rte_eth_dev *dev);
 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
@@ -64,6 +67,9 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
 };
 
 static const struct eth_dev_ops axgbe_eth_dev_ops = {
+	.dev_configure        = axgbe_dev_configure,
+	.dev_start            = axgbe_dev_start,
+	.dev_stop             = axgbe_dev_stop,
 	.dev_close            = axgbe_dev_close,
 	.dev_infos_get        = axgbe_dev_info_get,
 	.rx_queue_setup       = axgbe_dev_rx_queue_setup,
@@ -72,6 +78,13 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
 	.tx_queue_release     = axgbe_dev_tx_queue_release,
 };
 
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+	pdata->phy_link = -1;
+	pdata->phy_speed = SPEED_UNKNOWN;
+	return pdata->phy_if.phy_reset(pdata);
+}
+
 /*
  * Interrupt handler triggered by NIC  for handling
  * specific interrupt.
@@ -96,6 +109,89 @@ axgbe_dev_interrupt_handler(void *param)
 	rte_intr_enable(&pdata->pci_dev->intr_handle);
 }
 
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+	struct axgbe_port *pdata =  dev->data->dev_private;
+	/* Checksum offload to hardware */
+	pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_CHECKSUM;
+	return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+	struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+
+	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+		pdata->rss_enable = 1;
+	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+		pdata->rss_enable = 0;
+	else
+		return  -1;
+	return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+	PMD_INIT_FUNC_TRACE();
+	struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+	int ret;
+
+	/* Multiqueue RSS */
+	ret = axgbe_dev_rx_mq_config(dev);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+		return ret;
+	}
+	ret = axgbe_phy_reset(pdata);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "phy reset failed\n");
+		return ret;
+	}
+	ret = pdata->hw_if.init(pdata);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "dev_init failed\n");
+		return ret;
+	}
+
+	/* enable uio/vfio intr/eventfd mapping */
+	rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+	/* phy start*/
+	pdata->phy_if.phy_start(pdata);
+
+	axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+	axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+	return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+	PMD_INIT_FUNC_TRACE();
+	struct axgbe_port *pdata = dev->data->dev_private;
+
+	rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+	if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+		return;
+
+	axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+
+	pdata->phy_if.phy_stop(pdata);
+	pdata->hw_if.exit(pdata);
+	memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+	axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+}
+
 /* Clear all resources like TX/RX queues. */
 static void
 axgbe_dev_close(struct rte_eth_dev *dev)
-- 
2.7.4

  parent reply	other threads:[~2018-04-05  6:40 UTC|newest]

Thread overview: 128+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-30 13:10 [PATCH 01/16] net/axgbe: add minimal dev init and uninit support Ravi Kumar
2017-11-30 13:10 ` [PATCH 02/16] net/axgbe: add register map and related macros Ravi Kumar
2017-12-08 21:04   ` Ferruh Yigit
2017-12-11  7:20   ` Jianbo Liu
2017-11-30 13:10 ` [PATCH 03/16] net/axgbe: add phy register map and helper macros Ravi Kumar
2017-11-30 13:11 ` [PATCH 04/16] net/axgbe: add structures for MAC initialization and reset Ravi Kumar
2017-12-08 21:05   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 05/16] net/axgbe: add phy initialization and related apis Ravi Kumar
2017-11-30 13:11 ` [PATCH 06/16] net/axgbe: add phy programming apis Ravi Kumar
2017-11-30 13:11 ` [PATCH 07/16] net/axgbe: add interrupt handler for autonegotiation Ravi Kumar
2017-12-08 21:06   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 08/16] net/axgbe: add transmit and receive queue setup apis Ravi Kumar
2017-11-30 13:11 ` [PATCH 09/16] net/axgbe: add DMA programming and dev start and stop apis Ravi Kumar
2017-12-08 21:07   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 10/16] net/axgbe: add transmit and receive data path apis Ravi Kumar
2017-12-08 21:08   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 11/16] net/axgbe: add configure flow control while link adjustment Ravi Kumar
2017-11-30 13:11 ` [PATCH 12/16] net/axgbe: add promiscuous mode support Ravi Kumar
2017-12-08 21:08   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 13/16] net/axgbe: add generic transmit and receive stats support Ravi Kumar
2017-11-30 13:11 ` [PATCH 14/16] doc: add documents for AMD axgbe Ethernet PMD Ravi Kumar
2017-12-08 21:09   ` Ferruh Yigit
2017-11-30 13:11 ` [PATCH 15/16] net/axgbe: add support for icc and clang build Ravi Kumar
2017-11-30 13:11 ` [PATCH 16/16] net/axgbe: add support for build 32-bit mode Ravi Kumar
2017-12-08 21:10   ` Ferruh Yigit
2017-12-05  0:25 ` [PATCH 01/16] net/axgbe: add minimal dev init and uninit support Ferruh Yigit
2017-12-08 21:04 ` Ferruh Yigit
2017-12-09 13:18   ` Kumar, Ravi1
2018-01-05  9:52 ` [PATCH v2 " Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 02/16] net/axgbe: add register map and related macros Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 03/16] net/axgbe: add phy register map and helper macros Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 04/16] net/axgbe: add structures for MAC initialization and reset Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 05/16] net/axgbe: add phy initialization and related apis Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 06/16] net/axgbe: add phy programming apis Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 07/16] net/axgbe: add interrupt handler for autonegotiation Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 08/16] net/axgbe: add transmit and receive queue setup apis Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 09/16] net/axgbe: add DMA programming and dev start and stop apis Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 10/16] net/axgbe: add transmit and receive data path apis Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 11/16] doc: add documents for AMD axgbe Ethernet PMD Ravi Kumar
2018-01-05 20:33     ` Stephen Hemminger
2018-01-08  5:40       ` Kumar, Ravi1
2018-01-15 14:51         ` Kovacevic, Marko
2018-01-05  9:52   ` [PATCH v2 12/16] net/axgbe: add link status update Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 13/16] net/axgbe: add configure flow control while link adjustment Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 14/16] net/axgbe: add promiscuous mode support Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 15/16] net/axgbe: add generic transmit and receive status support Ravi Kumar
2018-01-05  9:52   ` [PATCH v2 16/16] net/axgbe: add support for build 32-bit mode Ravi Kumar
2018-01-05 20:32     ` Stephen Hemminger
2018-01-09 20:17   ` [PATCH v2 01/16] net/axgbe: add minimal dev init and uninit support Ferruh Yigit
2018-01-11  6:42     ` Kumar, Ravi1
2018-03-09  8:42   ` [PATCH v3 01/18] " Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 02/18] net/axgbe: add register map and related macros Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 03/18] net/axgbe: add phy register map and helper macros Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 04/18] net/axgbe: add structures for MAC initialization and reset Ravi Kumar
2018-03-16 17:43       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 05/18] net/axgbe: add phy initialization and related apis Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 06/18] net/axgbe: add phy programming apis Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 07/18] net/axgbe: add interrupt handler for autonegotiation Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 08/18] net/axgbe: add transmit and receive queue setup apis Ravi Kumar
2018-03-16 17:44       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 09/18] net/axgbe: add DMA programming and dev start and stop apis Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 10/18] net/axgbe: add transmit and receive data path apis Ravi Kumar
2018-03-16 17:45       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 11/18] doc: add documents for AMD axgbe Ethernet PMD Ravi Kumar
2018-03-16 17:46       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 12/18] net/axgbe: add link status update Ravi Kumar
2018-03-16 17:46       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 13/18] net/axgbe: add configure flow control while link adjustment Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 14/18] net/axgbe: add promiscuous mode support Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 15/18] net/axgbe: add generic transmit and receive stats support Ravi Kumar
2018-03-16 17:47       ` Ferruh Yigit
2018-03-09  8:42     ` [PATCH v3 16/18] net/axgbe: add support for build 32-bit mode Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 17/18] net/axgbe: add workaround for axgbe ethernet training bug Ravi Kumar
2018-03-09  8:42     ` [PATCH v3 18/18] net/axgbe: moved license headers to SPDX format Ravi Kumar
2018-03-11 23:31       ` Stephen Hemminger
2018-03-12 11:23         ` Kumar, Ravi1
2018-03-09 16:13     ` [PATCH v3 01/18] net/axgbe: add minimal dev init and uninit support Ferruh Yigit
2018-03-09 16:39       ` Ferruh Yigit
2018-03-12 11:25         ` Kumar, Ravi1
2018-03-16 17:42     ` Ferruh Yigit
2018-03-19 12:33       ` Kumar, Ravi1
2018-04-03 12:21         ` Ferruh Yigit
2018-04-05  6:39     ` [PATCH v4 01/17] " Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 02/17] net/axgbe: add register map and related macros Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 03/17] net/axgbe: add phy register map and helper macros Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 04/17] net/axgbe: add structures for MAC initialization and reset Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 05/17] net/axgbe: add phy initialization and related apis Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 06/17] net/axgbe: add phy programming apis Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 07/17] net/axgbe: add interrupt handler for autonegotiation Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 08/17] net/axgbe: add transmit and receive queue setup apis Ravi Kumar
2018-04-05  6:39       ` Ravi Kumar [this message]
2018-04-05  6:39       ` [PATCH v4 10/17] net/axgbe: add transmit and receive data path apis Ravi Kumar
2018-04-05 11:34         ` Ferruh Yigit
2018-04-06 12:40           ` Kumar, Ravi1
2018-04-05  6:39       ` [PATCH v4 11/17] doc: add documents for AMD axgbe Ethernet PMD Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 12/17] net/axgbe: add link status update Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 13/17] net/axgbe: add configure flow control while link adjustment Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 14/17] net/axgbe: add promiscuous mode support Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 15/17] net/axgbe: support generic transmit and receive stats api Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 16/17] net/axgbe: add support for build 32-bit mode Ravi Kumar
2018-04-05  6:39       ` [PATCH v4 17/17] net/axgbe: add workaround for axgbe ethernet training bug Ravi Kumar
2018-04-05 11:35         ` Ferruh Yigit
2018-04-06 12:41           ` Kumar, Ravi1
2018-04-05 11:34       ` [PATCH v4 01/17] net/axgbe: add minimal dev init and uninit support Ferruh Yigit
2018-04-06 12:39         ` Kumar, Ravi1
2018-04-06 12:36       ` [PATCH v5 01/18] " Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 02/18] net/axgbe: add register map and related macros Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 03/18] net/axgbe: add phy register map and helper macros Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 04/18] net/axgbe: add structures for MAC initialization and reset Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 05/18] net/axgbe: add phy initialization and related apis Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 06/18] net/axgbe: add phy programming apis Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 07/18] net/axgbe: add interrupt handler for autonegotiation Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 08/18] net/axgbe: add transmit and receive queue setup apis Ravi Kumar
2018-04-09  4:49           ` Rosen, Rami
2018-04-09 12:30             ` Ferruh Yigit
2018-04-06 12:36         ` [PATCH v5 09/18] net/axgbe: add DMA programming and dev start and stop apis Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 10/18] net/axgbe: add transmit and receive data path apis Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 11/18] doc: add documents for AMD axgbe Ethernet PMD Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 12/18] net/axgbe: add link status update Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 13/18] net/axgbe: add configure flow control while link adjustment Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 14/18] net/axgbe: add promiscuous mode support Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 15/18] net/axgbe: support generic transmit and receive stats api Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 16/18] net/axgbe: add support for build 32-bit mode Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 17/18] net/axgbe: add workaround for axgbe ethernet training bug Ravi Kumar
2018-04-06 12:36         ` [PATCH v5 18/18] net/axgbe : support meson build Ravi Kumar
2018-04-06 15:55         ` [PATCH v5 01/18] net/axgbe: add minimal dev init and uninit support Ferruh Yigit
2018-04-09  6:00           ` Kumar, Ravi1
2018-11-22 14:39             ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1522910389-35530-9-git-send-email-Ravi1.kumar@amd.com \
    --to=ravi1.kumar@amd.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.