From: Apeksha Gupta <apeksha.gupta@nxp.com>
To: david.marchand@redhat.com, andrew.rybchenko@oktetlabs.ru,
ferruh.yigit@intel.com
Cc: dev@dpdk.org, sachin.saxena@nxp.com, hemant.agrawal@nxp.com,
Apeksha Gupta <apeksha.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH v3 4/5] net/enetfec: add enqueue and dequeue support
Date: Fri, 10 Sep 2021 02:13:13 +0530 [thread overview]
Message-ID: <20210909204314.1826-5-apeksha.gupta@nxp.com> (raw)
In-Reply-To: <20210909204314.1826-1-apeksha.gupta@nxp.com>
This patch adds burst enqueue and dequeue operations to the enetfec
PMD. Loopback mode is also added, compile time flag 'ENETFEC_LOOPBACK' is
used to enable this feature. By default loopback mode is disabled.
Basic features added like promiscuous enable, basic stats.
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
doc/guides/nics/enetfec.rst | 2 +
doc/guides/nics/features/enetfec.ini | 2 +
drivers/net/enetfec/enet_ethdev.c | 188 +++++++++++
drivers/net/enetfec/enet_rxtx.c | 445 +++++++++++++++++++++++++++
drivers/net/enetfec/meson.build | 3 +-
5 files changed, 639 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/enetfec/enet_rxtx.c
diff --git a/doc/guides/nics/enetfec.rst b/doc/guides/nics/enetfec.rst
index 5d0b51a2de..f14e59ce74 100644
--- a/doc/guides/nics/enetfec.rst
+++ b/doc/guides/nics/enetfec.rst
@@ -75,6 +75,8 @@ ENETFEC driver.
ENETFEC Features
~~~~~~~~~~~~~~~~~
+- Basic stats
+- Promiscuous
- Linux
- ARMv8
diff --git a/doc/guides/nics/features/enetfec.ini b/doc/guides/nics/features/enetfec.ini
index bdfbdbd9d4..7e0fb148ac 100644
--- a/doc/guides/nics/features/enetfec.ini
+++ b/doc/guides/nics/features/enetfec.ini
@@ -4,6 +4,8 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Basic stats = Y
+Promiscuous mode = Y
Linux = Y
ARMv8 = Y
Usage doc = Y
diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c
index 90abc6d188..0b1a72e23a 100644
--- a/drivers/net/enetfec/enet_ethdev.c
+++ b/drivers/net/enetfec/enet_ethdev.c
@@ -41,6 +41,8 @@
#define ENETFEC_RAFL_V 0x8
#define ENETFEC_OPD_V 0xFFF0
+/* Extended buffer descriptor */
+#define ENETFEC_EXTENDED_BD 0
#define NUM_OF_QUEUES 6
uint32_t e_cntl;
@@ -179,6 +181,40 @@ enetfec_restart(struct rte_eth_dev *dev)
rte_delay_us(10);
}
+static void
+enet_free_buffers(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i, q;
+ struct rte_mbuf *mbuf;
+ struct bufdesc *bdp;
+ struct enetfec_priv_rx_q *rxq;
+ struct enetfec_priv_tx_q *txq;
+
+ for (q = 0; q < dev->data->nb_rx_queues; q++) {
+ rxq = fep->rx_queues[q];
+ bdp = rxq->bd.base;
+ for (i = 0; i < rxq->bd.ring_size; i++) {
+ mbuf = rxq->rx_mbuf[i];
+ rxq->rx_mbuf[i] = NULL;
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ }
+ }
+
+ for (q = 0; q < dev->data->nb_tx_queues; q++) {
+ txq = fep->tx_queues[q];
+ bdp = txq->bd.base;
+ for (i = 0; i < txq->bd.ring_size; i++) {
+ mbuf = txq->tx_mbuf[i];
+ txq->tx_mbuf[i] = NULL;
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+}
+
static int
enetfec_eth_configure(__rte_unused struct rte_eth_dev *dev)
{
@@ -192,6 +228,8 @@ static int
enetfec_eth_start(struct rte_eth_dev *dev)
{
enetfec_restart(dev);
+ dev->rx_pkt_burst = &enetfec_recv_pkts;
+ dev->tx_pkt_burst = &enetfec_xmit_pkts;
return 0;
}
@@ -226,6 +264,101 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)
return 0;
}
+static int
+enetfec_eth_close(__rte_unused struct rte_eth_dev *dev)
+{
+ enet_free_buffers(dev);
+ return 0;
+}
+
+static int
+enetfec_eth_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ if (dev == NULL) {
+ ENETFEC_PMD_ERR("Invalid device in link_update.\n");
+ return 0;
+ }
+
+ ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+ "down");
+ return 0;
+}
+
+static int
+enetfec_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ uint32_t tmp;
+
+ tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+ tmp |= 0x8;
+ tmp &= ~0x2;
+ rte_write32(rte_cpu_to_le_32(tmp),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
+
+ return 0;
+}
+
+static int
+enetfec_multicast_enable(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+
+ rte_write32(rte_cpu_to_le_32(0xffffffff),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+ rte_write32(rte_cpu_to_le_32(0xffffffff),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+ dev->data->all_multicast = 1;
+
+ rte_write32(rte_cpu_to_le_32(0x04400002),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
+ rte_write32(rte_cpu_to_le_32(0x10800049),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
+
+ return 0;
+}
+
+/* Set a MAC change in hardware. */
+static int
+enetfec_set_mac_address(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+
+ writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
+ (addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
+ writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
+ (uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
+
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+enetfec_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ struct rte_eth_stats *eth_stats = &fep->stats;
+
+ if (stats == NULL)
+ return -1;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+
+ stats->ipackets = eth_stats->ipackets;
+ stats->ibytes = eth_stats->ibytes;
+ stats->ierrors = eth_stats->ierrors;
+ stats->opackets = eth_stats->opackets;
+ stats->obytes = eth_stats->obytes;
+ stats->oerrors = eth_stats->oerrors;
+
+ return 0;
+}
+
static int
enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
@@ -237,6 +370,18 @@ enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
return 0;
}
+static void
+enet_free_queue(struct rte_eth_dev *dev)
+{
+ struct enetfec_private *fep = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(fep->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(fep->rx_queues[i]);
+}
+
static const unsigned short offset_des_active_rxq[] = {
ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
};
@@ -442,6 +587,12 @@ static const struct eth_dev_ops enetfec_ops = {
.dev_configure = enetfec_eth_configure,
.dev_start = enetfec_eth_start,
.dev_stop = enetfec_eth_stop,
+ .dev_close = enetfec_eth_close,
+ .link_update = enetfec_eth_link_update,
+ .promiscuous_enable = enetfec_promiscuous_enable,
+ .allmulticast_enable = enetfec_multicast_enable,
+ .mac_addr_set = enetfec_set_mac_address,
+ .stats_get = enetfec_stats_get,
.dev_infos_get = enetfec_eth_info,
.rx_queue_setup = enetfec_rx_queue_setup,
.tx_queue_setup = enetfec_tx_queue_setup
@@ -468,6 +619,7 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
int rc;
int i;
unsigned int bdsize;
+ struct rte_ether_addr macaddr;
name = rte_vdev_device_name(vdev);
if (name == NULL)
@@ -507,6 +659,27 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
fep->bd_addr_p = fep->bd_addr_p + bdsize;
}
+ /* Copy the station address into the dev structure, */
+ dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
+ ETHER_ADDR_LEN);
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Set default mac address
+ */
+ macaddr.addr_bytes[0] = 1;
+ macaddr.addr_bytes[1] = 1;
+ macaddr.addr_bytes[2] = 1;
+ macaddr.addr_bytes[3] = 1;
+ macaddr.addr_bytes[4] = 1;
+ macaddr.addr_bytes[5] = 1;
+ enetfec_set_mac_address(dev, &macaddr);
+
+ fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
rc = enetfec_eth_init(dev);
if (rc)
goto failed_init;
@@ -515,6 +688,8 @@ pmd_enetfec_probe(struct rte_vdev_device *vdev)
failed_init:
ENETFEC_PMD_ERR("Failed to init");
+err:
+ rte_eth_dev_release_port(dev);
return rc;
}
@@ -522,6 +697,8 @@ static int
pmd_enetfec_remove(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *eth_dev = NULL;
+ struct enetfec_private *fep;
+ struct enetfec_priv_rx_q *rxq;
int ret;
/* find the ethdev entry */
@@ -529,11 +706,22 @@ pmd_enetfec_remove(struct rte_vdev_device *vdev)
if (eth_dev == NULL)
return -ENODEV;
+ fep = eth_dev->data->dev_private;
+ /* Free descriptor base of first RX queue as it was configured
+ * first in enetfec_eth_init().
+ */
+ rxq = fep->rx_queues[0];
+ rte_free(rxq->bd.base);
+ enet_free_queue(eth_dev);
+ enetfec_eth_stop(eth_dev);
+
ret = rte_eth_dev_release_port(eth_dev);
if (ret != 0)
return -EINVAL;
ENETFEC_PMD_INFO("Closing sw device");
+ munmap(fep->hw_baseaddr_v, fep->cbus_size);
+
return 0;
}
diff --git a/drivers/net/enetfec/enet_rxtx.c b/drivers/net/enetfec/enet_rxtx.c
new file mode 100644
index 0000000000..445fa97e77
--- /dev/null
+++ b/drivers/net/enetfec/enet_rxtx.c
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#include <signal.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include "enet_regs.h"
+#include "enet_ethdev.h"
+#include "enet_pmd_logs.h"
+
+#define ENETFEC_LOOPBACK 0
+#define ENETFEC_DUMP 0
+
+#if ENETFEC_DUMP
+static void
+enet_dump(struct enetfec_priv_tx_q *txq)
+{
+ struct bufdesc *bdp;
+ int index = 0;
+
+ ENETFEC_PMD_DEBUG("TX ring dump\n");
+ ENETFEC_PMD_DEBUG("Nr SC addr len MBUF\n");
+
+ bdp = txq->bd.base;
+ do {
+ ENETFEC_PMD_DEBUG("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+ index,
+ bdp == txq->bd.cur ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+ rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+ txq->tx_mbuf[index]);
+ bdp = enet_get_nextdesc(bdp, &txq->bd);
+ index++;
+ } while (bdp != txq->bd.base);
+}
+
+static void
+enet_dump_rx(struct enetfec_priv_rx_q *rxq)
+{
+ struct bufdesc *bdp;
+ int index = 0;
+
+ ENETFEC_PMD_DEBUG("RX ring dump\n");
+ ENETFEC_PMD_DEBUG("Nr SC addr len MBUF\n");
+
+ bdp = rxq->bd.base;
+ do {
+ ENETFEC_PMD_DEBUG("%3u %c 0x%04x 0x%08x %4u %p\n",
+ index,
+ bdp == rxq->bd.cur ? 'S' : ' ',
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_sc)),
+ rte_read32(rte_le_to_cpu_32(&bdp->bd_bufaddr)),
+ rte_read16(rte_le_to_cpu_16(&bdp->bd_datlen)),
+ rxq->rx_mbuf[index]);
+ rte_pktmbuf_dump(stdout, rxq->rx_mbuf[index],
+ rxq->rx_mbuf[index]->pkt_len);
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+ index++;
+ } while (bdp != rxq->bd.base);
+}
+#endif
+
+#if ENETFEC_LOOPBACK
+static volatile bool lb_quit;
+
+static void fec_signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTSTP || signum == SIGTERM) {
+ printf("\n\n %s: Signal %d received, preparing to exit...\n",
+ __func__, signum);
+ lb_quit = true;
+ }
+}
+
+static void
+enetfec_lb_rxtx(void *rxq1)
+{
+ struct rte_mempool *pool;
+ struct bufdesc *rx_bdp = NULL, *tx_bdp = NULL;
+ struct rte_mbuf *mbuf = NULL, *new_mbuf = NULL;
+ unsigned short status;
+ unsigned short pkt_len = 0;
+ int index_r = 0, index_t = 0;
+ u8 *data;
+ struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
+ struct rte_eth_stats *stats = &rxq->fep->stats;
+ unsigned int i;
+ struct enetfec_private *fep;
+ struct enetfec_priv_tx_q *txq;
+ fep = rxq->fep->dev->data->dev_private;
+ txq = fep->tx_queues[0];
+
+ pool = rxq->pool;
+ rx_bdp = rxq->bd.cur;
+ tx_bdp = txq->bd.cur;
+
+ signal(SIGTSTP, fec_signal_handler);
+ while (!lb_quit) {
+chk_again:
+ status = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_sc));
+ if (status & RX_BD_EMPTY) {
+ if (!lb_quit)
+ goto chk_again;
+ rxq->bd.cur = rx_bdp;
+ txq->bd.cur = tx_bdp;
+ return;
+ }
+
+ /* Check for errors. */
+ status ^= RX_BD_LAST;
+ if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+ RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+ RX_BD_TR)) {
+ stats->ierrors++;
+ if (status & RX_BD_OV) {
+ /* FIFO overrun */
+ ENETFEC_PMD_ERR("rx_fifo_error\n");
+ goto rx_processing_done;
+ }
+ if (status & (RX_BD_LG | RX_BD_SH
+ | RX_BD_LAST)) {
+ /* Frame too long or too short. */
+ ENETFEC_PMD_ERR("rx_length_error\n");
+ if (status & RX_BD_LAST)
+ ENETFEC_PMD_ERR("rcv is not +last\n");
+ }
+ /* CRC Error */
+ if (status & RX_BD_CR)
+ ENETFEC_PMD_ERR("rx_crc_errors\n");
+
+ /* Report late collisions as a frame error. */
+ if (status & (RX_BD_NO | RX_BD_TR))
+ ENETFEC_PMD_ERR("rx_frame_error\n");
+ mbuf = NULL;
+ goto rx_processing_done;
+ }
+
+ new_mbuf = rte_pktmbuf_alloc(pool);
+ if (unlikely(!new_mbuf)) {
+ stats->ierrors++;
+ break;
+ }
+ /* Process the incoming frame. */
+ pkt_len = rte_le_to_cpu_16(rte_read16(&rx_bdp->bd_datlen));
+
+ /* shows data with respect to the data_off field. */
+ index_r = enet_get_bd_index(rx_bdp, &rxq->bd);
+ mbuf = rxq->rx_mbuf[index_r];
+
+ /* adjust pkt_len */
+ rte_pktmbuf_append((struct rte_mbuf *)mbuf, pkt_len - 4);
+ if (rxq->fep->quirks & QUIRK_RACC)
+ rte_pktmbuf_adj(mbuf, 2);
+
+ /* Replace Buffer in BD */
+ rxq->rx_mbuf[index_r] = new_mbuf;
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+ &rx_bdp->bd_bufaddr);
+
+rx_processing_done:
+ /* when rx_processing_done clear the status flags
+ * for this buffer
+ */
+ status &= ~RX_BD_STATS;
+
+ /* Mark the buffer empty */
+ status |= RX_BD_EMPTY;
+
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &rx_bdp->bd_sc);
+
+ /* Update BD pointer to next entry */
+ rx_bdp = enet_get_nextdesc(rx_bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames.
+ */
+ rte_write32(0, rxq->bd.active_reg_desc);
+
+ /* TX begins: First clean the ring then process packet */
+ index_t = enet_get_bd_index(tx_bdp, &txq->bd);
+ status = rte_le_to_cpu_16(rte_read16(&tx_bdp->bd_sc));
+ if (status & TX_BD_READY)
+ stats->oerrors++;
+ break;
+ if (txq->tx_mbuf[index_t]) {
+ rte_pktmbuf_free(txq->tx_mbuf[index_t]);
+ txq->tx_mbuf[index_t] = NULL;
+ }
+
+ if (mbuf == NULL)
+ continue;
+
+ /* Fill in a Tx ring entry */
+ status &= ~TX_BD_STATS;
+
+ /* Set buffer length and buffer pointer */
+ pkt_len = rte_pktmbuf_pkt_len(mbuf);
+ status |= (TX_BD_LAST);
+ data = rte_pktmbuf_mtod(mbuf, void *);
+
+ for (i = 0; i <= pkt_len; i += RTE_CACHE_LINE_SIZE)
+ dcbf(data + i);
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+ &tx_bdp->bd_bufaddr);
+ rte_write16(rte_cpu_to_le_16(pkt_len), &tx_bdp->bd_datlen);
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ status |= (TX_BD_READY | TX_BD_TC);
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &tx_bdp->bd_sc);
+
+ /* Trigger transmission start */
+ rte_write32(0, txq->bd.active_reg_desc);
+
+ /* Save mbuf pointer to clean later */
+ txq->tx_mbuf[index_t] = mbuf;
+
+ /* If this was the last BD in the ring, start at the
+ * beginning again.
+ */
+ tx_bdp = enet_get_nextdesc(tx_bdp, &txq->bd);
+ }
+}
+#endif
+
+/* This function does enetfec_rx_queue processing. Dequeue packet from Rx queue
+ * When update through the ring, just set the empty indicator.
+ */
+uint16_t
+enetfec_recv_pkts(void *rxq1, __rte_unused struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mempool *pool;
+ struct bufdesc *bdp;
+ struct rte_mbuf *mbuf, *new_mbuf = NULL;
+ unsigned short status;
+ unsigned short pkt_len;
+ int pkt_received = 0, index = 0;
+ void *data;
+ struct enetfec_priv_rx_q *rxq = (struct enetfec_priv_rx_q *)rxq1;
+ struct rte_eth_stats *stats = &rxq->fep->stats;
+ pool = rxq->pool;
+ bdp = rxq->bd.cur;
+#if ENETFEC_LOOPBACK
+ enetfec_lb_rxtx(rxq1);
+#endif
+ /* Process the incoming packet */
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+ while ((status & RX_BD_EMPTY) == 0) {
+ if (pkt_received >= nb_pkts)
+ break;
+
+ new_mbuf = rte_pktmbuf_alloc(pool);
+ if (unlikely(new_mbuf == NULL)) {
+ stats->ierrors++;
+ break;
+ }
+ /* Check for errors. */
+ status ^= RX_BD_LAST;
+ if (status & (RX_BD_LG | RX_BD_SH | RX_BD_NO |
+ RX_BD_CR | RX_BD_OV | RX_BD_LAST |
+ RX_BD_TR)) {
+ stats->ierrors++;
+ if (status & RX_BD_OV) {
+ /* FIFO overrun */
+ /* enet_dump_rx(rxq); */
+ ENETFEC_PMD_ERR("rx_fifo_error\n");
+ goto rx_processing_done;
+ }
+ if (status & (RX_BD_LG | RX_BD_SH
+ | RX_BD_LAST)) {
+ /* Frame too long or too short. */
+ ENETFEC_PMD_ERR("rx_length_error\n");
+ if (status & RX_BD_LAST)
+ ENETFEC_PMD_ERR("rcv is not +last\n");
+ }
+ if (status & RX_BD_CR) { /* CRC Error */
+ ENETFEC_PMD_ERR("rx_crc_errors\n");
+ }
+ /* Report late collisions as a frame error. */
+ if (status & (RX_BD_NO | RX_BD_TR))
+ ENETFEC_PMD_ERR("rx_frame_error\n");
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ stats->ipackets++;
+ pkt_len = rte_le_to_cpu_16(rte_read16(&bdp->bd_datlen));
+ stats->ibytes += pkt_len;
+
+ /* shows data with respect to the data_off field. */
+ index = enet_get_bd_index(bdp, &rxq->bd);
+ mbuf = rxq->rx_mbuf[index];
+
+ data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ rte_prefetch0(data);
+ rte_pktmbuf_append((struct rte_mbuf *)mbuf,
+ pkt_len - 4);
+
+ if (rxq->fep->quirks & QUIRK_RACC)
+ data = rte_pktmbuf_adj(mbuf, 2);
+
+ rx_pkts[pkt_received] = mbuf;
+ pkt_received++;
+ rxq->rx_mbuf[index] = new_mbuf;
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(new_mbuf)),
+ &bdp->bd_bufaddr);
+rx_processing_done:
+ /* when rx_processing_done clear the status flags
+ * for this buffer
+ */
+ status &= ~RX_BD_STATS;
+
+ /* Mark the buffer empty */
+ status |= RX_BD_EMPTY;
+
+ if (rxq->fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ rte_write32(rte_cpu_to_le_32(RX_BD_INT),
+ &ebdp->bd_esc);
+ rte_write32(0, &ebdp->bd_prot);
+ rte_write32(0, &ebdp->bd_bdu);
+ }
+
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+ /* Update BD pointer to next entry */
+ bdp = enet_get_nextdesc(bdp, &rxq->bd);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames.
+ */
+ rte_write32(0, rxq->bd.active_reg_desc);
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+ }
+ rxq->bd.cur = bdp;
+ return pkt_received;
+}
+
+uint16_t
+enetfec_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct enetfec_priv_tx_q *txq =
+ (struct enetfec_priv_tx_q *)tx_queue;
+ struct rte_eth_stats *stats = &txq->fep->stats;
+ struct bufdesc *bdp, *last_bdp;
+ struct rte_mbuf *mbuf;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int index, estatus = 0;
+ unsigned int i, pkt_transmitted = 0;
+ u8 *data;
+ int tx_st = 1;
+
+ while (tx_st) {
+ if (pkt_transmitted >= nb_pkts) {
+ tx_st = 0;
+ break;
+ }
+ bdp = txq->bd.cur;
+ /* First clean the ring */
+ index = enet_get_bd_index(bdp, &txq->bd);
+ status = rte_le_to_cpu_16(rte_read16(&bdp->bd_sc));
+
+ if (status & TX_BD_READY) {
+ stats->oerrors++;
+ break;
+ }
+ if (txq->tx_mbuf[index]) {
+ rte_pktmbuf_free(txq->tx_mbuf[index]);
+ txq->tx_mbuf[index] = NULL;
+ }
+
+ mbuf = *(tx_pkts);
+ tx_pkts++;
+
+ /* Fill in a Tx ring entry */
+ last_bdp = bdp;
+ status &= ~TX_BD_STATS;
+
+ /* Set buffer length and buffer pointer */
+ buflen = rte_pktmbuf_pkt_len(mbuf);
+ stats->opackets++;
+ stats->obytes += buflen;
+
+ if (mbuf->nb_segs > 1) {
+ ENETFEC_PMD_DEBUG("SG not supported");
+ return -1;
+ }
+ status |= (TX_BD_LAST);
+ data = rte_pktmbuf_mtod(mbuf, void *);
+ for (i = 0; i <= buflen; i += RTE_CACHE_LINE_SIZE)
+ dcbf(data + i);
+
+ rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
+ &bdp->bd_bufaddr);
+ rte_write16(rte_cpu_to_le_16(buflen), &bdp->bd_datlen);
+
+ if (txq->fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ rte_write32(0, &ebdp->bd_bdu);
+ rte_write32(rte_cpu_to_le_32(estatus),
+ &ebdp->bd_esc);
+ }
+
+ index = enet_get_bd_index(last_bdp, &txq->bd);
+ /* Save mbuf pointer */
+ txq->tx_mbuf[index] = mbuf;
+
+ /* Make sure the updates to rest of the descriptor are performed
+ * before transferring ownership.
+ */
+ status |= (TX_BD_READY | TX_BD_TC);
+ rte_wmb();
+ rte_write16(rte_cpu_to_le_16(status), &bdp->bd_sc);
+
+ /* Trigger transmission start */
+ rte_write32(0, txq->bd.active_reg_desc);
+ pkt_transmitted++;
+
+ /* If this was the last BD in the ring, start at the
+ * beginning again.
+ */
+ bdp = enet_get_nextdesc(last_bdp, &txq->bd);
+
+ /* Make sure the update to bdp and tx_skbuff are performed
+ * before txq->bd.cur.
+ */
+ txq->bd.cur = bdp;
+ }
+ return nb_pkts;
+}
diff --git a/drivers/net/enetfec/meson.build b/drivers/net/enetfec/meson.build
index 25a6d73d57..7d35abd513 100644
--- a/drivers/net/enetfec/meson.build
+++ b/drivers/net/enetfec/meson.build
@@ -9,4 +9,5 @@ endif
deps += ['common_dpaax']
sources = files('enet_ethdev.c',
- 'enet_uio.c')
+ 'enet_uio.c',
+ 'enet_rxtx.c')
--
2.17.1
next prev parent reply other threads:[~2021-09-09 20:44 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-09 20:43 [dpdk-dev] [PATCH v3 0/5] drivers/net: add NXP ENETFEC driver Apeksha Gupta
2021-09-09 20:43 ` [dpdk-dev] [PATCH v3 1/5] net/enetfec: introduce " Apeksha Gupta
2021-09-23 10:09 ` Ferruh Yigit
2021-10-01 10:22 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-10-01 14:45 ` Ferruh Yigit
2021-10-05 5:24 ` Apeksha Gupta
2021-10-05 9:29 ` Ferruh Yigit
2021-10-06 9:36 ` Apeksha Gupta
2021-09-09 20:43 ` [dpdk-dev] [PATCH v3 2/5] net/enetfec: add UIO support Apeksha Gupta
2021-09-23 10:13 ` Ferruh Yigit
2021-10-01 10:30 ` [dpdk-dev] [EXT] " Apeksha Gupta
2021-09-09 20:43 ` [dpdk-dev] [PATCH v3 3/5] net/enetfec: support queue configuration Apeksha Gupta
2021-09-09 20:43 ` Apeksha Gupta [this message]
2021-09-09 20:43 ` [dpdk-dev] [PATCH v3 5/5] net/enetfec: add features Apeksha Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210909204314.1826-5-apeksha.gupta@nxp.com \
--to=apeksha.gupta@nxp.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=hemant.agrawal@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.