All of lore.kernel.org
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v2 1/4] event/cnxk: add Rx adapter support
Date: Mon, 24 May 2021 17:52:59 +0530	[thread overview]
Message-ID: <20210524122303.1116-1-pbhagavatula@marvell.com> (raw)

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add support for event eth Rx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 This patch set depends on
 http://patches.dpdk.org/project/dpdk/list/?series=15515

 doc/guides/eventdevs/cnxk.rst            |   4 +
 drivers/event/cnxk/cn10k_eventdev.c      |  76 +++++++++++
 drivers/event/cnxk/cn10k_worker.h        |   4 +
 drivers/event/cnxk/cn9k_eventdev.c       |  82 ++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |   4 +
 drivers/event/cnxk/cnxk_eventdev.h       |  21 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 157 +++++++++++++++++++++++
 7 files changed, 348 insertions(+)

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 36da3800cc..03dfcbd6a8 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -39,6 +39,10 @@ Features of the OCTEON cnxk SSO PMD are:
   time granularity of 2.5us on CN9K and 1us on CN10K.
 - Up to 256 TIM rings a.k.a event timer adapters.
 - Up to 8 rings traversed in parallel.
+- HW managed packets enqueued from ethdev to eventdev exposed through event eth
+  RX adapter.
+- N:1 ethernet device Rx queue to Event queue mapping.
+- Full Rx offload support defined through ethdev queue configuration.

 Prerequisites and Compilation procedure
 ---------------------------------------
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bf4052c76c..66040df060 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -407,6 +407,76 @@ cn10k_sso_selftest(void)
 	return cnxk_sso_selftest(RTE_STR(event_cn10k));
 }

+static int
+cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int rc;
+
+	RTE_SET_USED(event_dev);
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
+	if (rc)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+	return 0;
+}
+
+static void
+cn10k_sso_set_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
+		ws->lookup_mem = lookup_mem;
+	}
+}
+
+static int
+cn10k_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	void *lookup_mem;
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+	if (rc)
+		return -EINVAL;
+
+	rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+					   queue_conf);
+	if (rc)
+		return -EINVAL;
+
+	lookup_mem = ((struct cn10k_eth_rxq *)eth_dev->data->rx_queues[0])
+			     ->lookup_mem;
+	cn10k_sso_set_lookup_mem(event_dev, lookup_mem);
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			       const struct rte_eth_dev *eth_dev,
+			       int32_t rx_queue_id)
+{
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+	if (rc)
+		return -EINVAL;
+
+	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -420,6 +490,12 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.port_unlink = cn10k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,

+	.eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
+	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
 	.timer_adapter_caps_get = cnxk_tim_caps_get,

 	.dump = cnxk_sso_dump,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 2f093a8dd5..085857bccf 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -5,9 +5,13 @@
 #ifndef __CN10K_WORKER_H__
 #define __CN10K_WORKER_H__

+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"

+#include "cn10k_ethdev.h"
+#include "cn10k_rx.h"
+
 /* SSO Operations */

 static __rte_always_inline uint8_t
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 0684417eab..8e6bf54df9 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -481,6 +481,82 @@ cn9k_sso_selftest(void)
 	return cnxk_sso_selftest(RTE_STR(event_cn9k));
 }

+static int
+cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+			     const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int rc;
+
+	RTE_SET_USED(event_dev);
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 9);
+	if (rc)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
+			RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID;
+
+	return 0;
+}
+
+static void
+cn9k_sso_set_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		if (dev->dual_ws) {
+			struct cn9k_sso_hws_dual *dws =
+				event_dev->data->ports[i];
+			dws->lookup_mem = lookup_mem;
+		} else {
+			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
+			ws->lookup_mem = lookup_mem;
+		}
+	}
+}
+
+static int
+cn9k_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	void *lookup_mem;
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+	if (rc)
+		return -EINVAL;
+
+	rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
+					   queue_conf);
+	if (rc)
+		return -EINVAL;
+
+	lookup_mem = ((struct cn9k_eth_rxq *)eth_dev->data->rx_queues[0])
+			     ->lookup_mem;
+	cn9k_sso_set_lookup_mem(event_dev, lookup_mem);
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+cn9k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev,
+			      int32_t rx_queue_id)
+{
+	int rc;
+
+	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+	if (rc)
+		return -EINVAL;
+
+	return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -494,6 +570,12 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.port_unlink = cn9k_sso_port_unlink,
 	.timeout_ticks = cnxk_sso_timeout_ticks,

+	.eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
+	.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
+	.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
+
 	.timer_adapter_caps_get = cnxk_tim_caps_get,

 	.dump = cnxk_sso_dump,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 38fca08fb6..f5a4401465 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -5,9 +5,13 @@
 #ifndef __CN9K_WORKER_H__
 #define __CN9K_WORKER_H__

+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"

+#include "cn9k_ethdev.h"
+#include "cn9k_rx.h"
+
 /* SSO Operations */

 static __rte_always_inline uint8_t
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 668e51d62a..6e0bb8ac5c 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -6,6 +6,8 @@
 #define __CNXK_EVENTDEV_H__

 #include <rte_devargs.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_kvargs.h>
 #include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
@@ -81,7 +83,10 @@ struct cnxk_sso_evdev {
 	uint64_t nb_xaq_cfg;
 	rte_iova_t fc_iova;
 	struct rte_mempool *xaq_pool;
+	uint64_t rx_offloads;
 	uint64_t adptr_xae_cnt;
+	uint16_t rx_adptr_pool_cnt;
+	uint64_t *rx_adptr_pools;
 	uint16_t tim_adptr_ring_cnt;
 	uint16_t *timer_adptr_rings;
 	uint64_t *timer_adptr_sz;
@@ -108,6 +113,7 @@ struct cnxk_sso_evdev {
 struct cn10k_sso_hws {
 	/* Get Work Fastpath data */
 	CN10K_SSO_HWS_OPS;
+	void *lookup_mem;
 	uint32_t gw_wdata;
 	uint8_t swtag_req;
 	uint8_t hws_id;
@@ -132,6 +138,7 @@ struct cn10k_sso_hws {
 struct cn9k_sso_hws {
 	/* Get Work Fastpath data */
 	CN9K_SSO_HWS_OPS;
+	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t hws_id;
 	/* Add Work Fastpath data */
@@ -148,6 +155,7 @@ struct cn9k_sso_hws_state {
 struct cn9k_sso_hws_dual {
 	/* Get Work Fastpath data */
 	struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t vws; /* Ping pong bit */
 	uint8_t hws_id;
@@ -250,4 +258,17 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
 /* CN9K */
 void cn9k_sso_set_rsrc(void *arg);

+/* Common adapter ops */
+int cnxk_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+				  const struct rte_eth_dev *eth_dev,
+				  int32_t rx_queue_id);
+int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev);
+int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+			     const struct rte_eth_dev *eth_dev);
+
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 89a1d82c14..8de7b6f895 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */

+#include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"

 void
@@ -11,6 +12,32 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
 	int i;

 	switch (event_type) {
+	case RTE_EVENT_TYPE_ETHDEV: {
+		struct cnxk_eth_rxq_sp *rxq = data;
+		uint64_t *old_ptr;
+
+		for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
+			if ((uint64_t)rxq->qconf.mp == dev->rx_adptr_pools[i])
+				return;
+		}
+
+		dev->rx_adptr_pool_cnt++;
+		old_ptr = dev->rx_adptr_pools;
+		dev->rx_adptr_pools = rte_realloc(
+			dev->rx_adptr_pools,
+			sizeof(uint64_t) * dev->rx_adptr_pool_cnt, 0);
+		if (dev->rx_adptr_pools == NULL) {
+			dev->adptr_xae_cnt += rxq->qconf.mp->size;
+			dev->rx_adptr_pools = old_ptr;
+			dev->rx_adptr_pool_cnt--;
+			return;
+		}
+		dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+			(uint64_t)rxq->qconf.mp;
+
+		dev->adptr_xae_cnt += rxq->qconf.mp->size;
+		break;
+	}
 	case RTE_EVENT_TYPE_TIMER: {
 		struct cnxk_tim_ring *timr = data;
 		uint16_t *old_ring_ptr;
@@ -65,3 +92,133 @@ cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
 		break;
 	}
 }
+
+static int
+cnxk_sso_rxq_enable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id,
+		    uint16_t port_id, const struct rte_event *ev,
+		    uint8_t custom_flowid)
+{
+	struct roc_nix_rq *rq;
+
+	rq = &cnxk_eth_dev->rqs[rq_id];
+	rq->sso_ena = 1;
+	rq->tt = ev->sched_type;
+	rq->hwgrp = ev->queue_id;
+	rq->flow_tag_width = 20;
+	rq->wqe_skip = 1;
+	rq->tag_mask = (port_id & 0xF) << 20;
+	rq->tag_mask |= (((port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4))
+			<< 24;
+
+	if (custom_flowid) {
+		rq->flow_tag_width = 0;
+		rq->tag_mask |= ev->flow_id;
+	}
+
+	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+static int
+cnxk_sso_rxq_disable(struct cnxk_eth_dev *cnxk_eth_dev, uint16_t rq_id)
+{
+	struct roc_nix_rq *rq;
+
+	rq = &cnxk_eth_dev->rqs[rq_id];
+	rq->sso_ena = 0;
+	rq->flow_tag_width = 32;
+	rq->tag_mask = 0;
+
+	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
+}
+
+int
+cnxk_sso_rx_adapter_queue_add(
+	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+	int32_t rx_queue_id,
+	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uint16_t port = eth_dev->data->port_id;
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	int i, rc = 0;
+
+	if (rx_queue_id < 0) {
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+			rxq_sp = eth_dev->data->rx_queues[i];
+			rxq_sp = rxq_sp - 1;
+			cnxk_sso_updt_xae_cnt(dev, rxq_sp,
+					      RTE_EVENT_TYPE_ETHDEV);
+			rc = cnxk_sso_xae_reconfigure(
+				(struct rte_eventdev *)(uintptr_t)event_dev);
+			rc |= cnxk_sso_rxq_enable(
+				cnxk_eth_dev, i, port, &queue_conf->ev,
+				!!(queue_conf->rx_queue_flags &
+				   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+		}
+	} else {
+		rxq_sp = eth_dev->data->rx_queues[rx_queue_id];
+		rxq_sp = rxq_sp - 1;
+		cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
+		rc = cnxk_sso_xae_reconfigure(
+			(struct rte_eventdev *)(uintptr_t)event_dev);
+		rc |= cnxk_sso_rxq_enable(
+			cnxk_eth_dev, (uint16_t)rx_queue_id, port,
+			&queue_conf->ev,
+			!!(queue_conf->rx_queue_flags &
+			   RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID));
+	}
+
+	if (rc < 0) {
+		plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
+			queue_conf->ev.queue_id);
+		return rc;
+	}
+
+	dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
+
+	return 0;
+}
+
+int
+cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+			      const struct rte_eth_dev *eth_dev,
+			      int32_t rx_queue_id)
+{
+	struct cnxk_eth_dev *dev = eth_dev->data->dev_private;
+	int i, rc = 0;
+
+	RTE_SET_USED(event_dev);
+	if (rx_queue_id < 0) {
+		for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+			rc = cnxk_sso_rxq_disable(dev, i);
+	} else {
+		rc = cnxk_sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+	}
+
+	if (rc < 0)
+		plt_err("Failed to clear Rx adapter config port=%d, q=%d",
+			eth_dev->data->port_id, rx_queue_id);
+
+	return rc;
+}
+
+int
+cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
+			  const struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(event_dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
+int
+cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
+			 const struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(event_dev);
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
--
2.17.1


             reply	other threads:[~2021-05-24 12:23 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-24 12:22 pbhagavatula [this message]
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 2/4] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 3/4] event/cnxk: add Tx adapter support pbhagavatula
2021-05-24 12:23 ` [dpdk-dev] [PATCH v2 4/4] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01 ` [dpdk-dev] [PATCH v2 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 02/13] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-19 11:01   ` [dpdk-dev] [PATCH v2 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-20 20:28   ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 02/13] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 03/13] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 04/13] net/cnxk: enable ptp " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 05/13] net/cnxk: enable TSO " pbhagavatula
2021-06-20 20:28     ` [dpdk-dev] [PATCH v3 06/13] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 07/13] event/cnxk: add Rx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 08/13] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 09/13] event/cnxk: add Tx adapter support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 10/13] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 11/13] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 12/13] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-20 20:29     ` [dpdk-dev] [PATCH v3 13/13] event/cnxk: add Tx " pbhagavatula
2021-06-27  6:57     ` [dpdk-dev] [PATCH v3 01/13] net/cnxk: add multi seg Rx vector routine Jerin Jacob
2021-06-28 19:41     ` [dpdk-dev] [PATCH v4 1/6] " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-28 19:41       ` [dpdk-dev] [PATCH v4 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29  7:25         ` Nithin Dabilpuram
2021-06-29  7:44       ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 2/6] net/cnxk: enable ptp processing in vector Rx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 3/6] net/cnxk: enable VLAN processing in vector Tx pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 4/6] net/cnxk: enable ptp " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 5/6] net/cnxk: enable TSO " pbhagavatula
2021-06-29  7:44         ` [dpdk-dev] [PATCH v5 6/6] net/cnxk: add multi seg Tx vector routine pbhagavatula
2021-06-29 16:20         ` [dpdk-dev] [PATCH v5 1/6] net/cnxk: add multi seg Rx " Jerin Jacob
2021-06-28 19:52     ` [dpdk-dev] [PATCH v4 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-28 19:52       ` [dpdk-dev] [PATCH v4 7/7] event/cnxk: add Tx " pbhagavatula
2021-06-29  8:01       ` [dpdk-dev] [PATCH v5 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-06-29  8:01         ` [dpdk-dev] [PATCH v5 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-02 21:14         ` [dpdk-dev] [PATCH v6 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 13:23             ` Nithin Dabilpuram
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-02 21:14           ` [dpdk-dev] [PATCH v6 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-03 22:00           ` [dpdk-dev] [PATCH v7 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-03 22:00             ` [dpdk-dev] [PATCH v7 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-11 23:29             ` [dpdk-dev] [PATCH v8 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-11 23:29               ` [dpdk-dev] [PATCH v8 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-13 13:36                 ` Jerin Jacob
2021-07-14  9:02               ` [dpdk-dev] [PATCH v9 1/7] event/cnxk: add Rx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 2/7] event/cnxk: add Rx adapter fastpath ops pbhagavatula
2021-07-20 11:03                   ` David Marchand
2021-07-20 11:43                     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-07-20 11:50                       ` David Marchand
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 3/7] event/cnxk: add Tx adapter support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 4/7] event/cnxk: add Tx adapter fastpath ops pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 5/7] event/cnxk: add Rx adapter vector support pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 6/7] event/cnxk: add Rx event vector fastpath pbhagavatula
2021-07-14  9:02                 ` [dpdk-dev] [PATCH v9 7/7] event/cnxk: add Tx " pbhagavatula
2021-07-16 12:19                   ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210524122303.1116-1-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.