All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k
Date: Fri, 1 Oct 2021 19:10:14 +0530	[thread overview]
Message-ID: <20211001134022.22700-21-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20211001134022.22700-1-ndabilpuram@marvell.com>

Add support to receive CPT processed packets on Rx via
second pass on CN10K.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c         |  80 ++--
 drivers/event/cnxk/cn10k_worker.h           |  73 +++-
 drivers/event/cnxk/cn10k_worker_deq.c       |   2 +-
 drivers/event/cnxk/cn10k_worker_deq_burst.c |   2 +-
 drivers/event/cnxk/cn10k_worker_deq_ca.c    |   2 +-
 drivers/event/cnxk/cn10k_worker_deq_tmo.c   |   2 +-
 drivers/net/cnxk/cn10k_ethdev.h             |   4 +
 drivers/net/cnxk/cn10k_rx.c                 |  31 +-
 drivers/net/cnxk/cn10k_rx.h                 | 648 +++++++++++++++++++++++-----
 drivers/net/cnxk/cn10k_rx_mseg.c            |   2 +-
 drivers/net/cnxk/cn10k_rx_vec.c             |   4 +-
 drivers/net/cnxk/cn10k_rx_vec_mseg.c        |   4 +-
 drivers/net/cnxk/cn10k_tx.h                 |   3 -
 13 files changed, 688 insertions(+), 169 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 8af273a..9c0d84b 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -7,7 +7,8 @@
 #include "cnxk_worker.h"
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
-	(deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
+	(deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
+			 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
 			 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
 			 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
 			 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
@@ -288,88 +289,91 @@ static void
 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	const event_dequeue_t sso_hws_deq[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
+	const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                            \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
+	const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
+	const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_tmo_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
+	const event_dequeue_burst_t
+		sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
+	const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
+	const event_dequeue_burst_t
+		sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
+	const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_burst_t sso_hws_deq_seg_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
+	const event_dequeue_burst_t
+		sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
+	const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
 	const event_dequeue_burst_t
-		sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
+		sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
 			NIX_RX_FASTPATH_MODES
 #undef R
 		};
 
-	const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
+	const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
 	const event_dequeue_burst_t
-		sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
+		sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
 			NIX_RX_FASTPATH_MODES
 #undef R
 	};
@@ -385,7 +389,7 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 
 	const event_tx_adapter_enqueue
 		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                            \
 	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index e5ed043..b79bd90 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -106,12 +106,17 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
 
 static __rte_always_inline void
 cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
-		   void *lookup_mem, void *tstamp)
+		   void *lookup_mem, void *tstamp, uintptr_t lbase)
 {
 	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
 			     (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
 	struct rte_event_vector *vec;
+	uint64_t aura_handle, laddr;
 	uint16_t nb_mbufs, non_vec;
+	uint16_t lmt_id, d_off;
+	struct rte_mbuf *mbuf;
+	uint8_t loff = 0;
+	uint64_t sa_base;
 	uint64_t **wqe;
 
 	mbuf_init |= ((uint64_t)port_id) << 48;
@@ -121,17 +126,41 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
 	nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, vec->mbufs, nb_mbufs,
 					      flags | NIX_RX_VWQE_F, lookup_mem,
-					      tstamp);
+					      tstamp, lbase);
 	wqe += nb_mbufs;
 	non_vec = vec->nb_elem - nb_mbufs;
 
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+		mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
+					   sizeof(struct rte_mbuf));
+		/* Pick first mbuf's aura handle assuming all
+		 * mbufs are from a vec and are from same RQ.
+		 */
+		aura_handle = mbuf->pool->pool_id;
+		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+		laddr = lbase;
+		laddr += 8;
+		d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
+		d_off += (mbuf_init & 0xFFFF);
+		sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
+		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+	}
+
 	while (non_vec) {
 		struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
-		struct rte_mbuf *mbuf;
 		uint64_t tstamp_ptr;
 
 		mbuf = (struct rte_mbuf *)((char *)cqe -
 					   sizeof(struct rte_mbuf));
+
+		/* Translate meta to mbuf */
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			const uint64_t cq_w1 = *((const uint64_t *)cqe + 1);
+
+			mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, sa_base, laddr,
+						       &loff, mbuf, d_off);
+		}
+
 		cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
 				      mbuf_init, flags);
 		/* Extracting tstamp, if PTP enabled*/
@@ -145,6 +174,12 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 		non_vec--;
 		wqe++;
 	}
+
+	/* Free remaining meta buffers if any */
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
+		nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
+		plt_io_wmb();
+	}
 }
 
 static __rte_always_inline uint16_t
@@ -188,6 +223,34 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 			   RTE_EVENT_TYPE_ETHDEV) {
 			uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
 
+			if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+				struct rte_mbuf *m;
+				uintptr_t sa_base;
+				uint64_t iova = 0;
+				uint8_t loff = 0;
+				uint16_t d_off;
+				uint64_t cq_w1;
+
+				m = (struct rte_mbuf *)mbuf;
+				d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
+				d_off += RTE_PKTMBUF_HEADROOM;
+
+				cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
+
+				sa_base = cnxk_nix_sa_base_get(port,
+							       lookup_mem);
+				sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+				mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(cq_w1,
+						sa_base, (uintptr_t)&iova,
+						&loff, (struct rte_mbuf *)mbuf,
+						d_off);
+				if (loff)
+					roc_npa_aura_op_free(m->pool->pool_id,
+							     0, iova);
+
+			}
+
 			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
 			cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
 					  gw.u64[0] & 0xFFFFF, flags,
@@ -212,7 +275,7 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 				   ((uint64_t)port << 32);
 			*(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
 			cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
-					   ws->tstamp);
+					   ws->tstamp, ws->lmt_base);
 		}
 	}
 
@@ -290,7 +353,7 @@ uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
 uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
 					uint16_t nb_events);
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
 		void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
 	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
diff --git a/drivers/event/cnxk/cn10k_worker_deq.c b/drivers/event/cnxk/cn10k_worker_deq.c
index 36ec454..6083f69 100644
--- a/drivers/event/cnxk/cn10k_worker_deq.c
+++ b/drivers/event/cnxk/cn10k_worker_deq.c
@@ -6,7 +6,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_deq_##name(                           \
 		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn10k_worker_deq_burst.c b/drivers/event/cnxk/cn10k_worker_deq_burst.c
index 29ecc55..8539d5d 100644
--- a/drivers/event/cnxk/cn10k_worker_deq_burst.c
+++ b/drivers/event/cnxk/cn10k_worker_deq_burst.c
@@ -6,7 +6,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name(                     \
 		void *port, struct rte_event ev[], uint16_t nb_events,         \
 		uint64_t timeout_ticks)                                        \
diff --git a/drivers/event/cnxk/cn10k_worker_deq_ca.c b/drivers/event/cnxk/cn10k_worker_deq_ca.c
index c90f6a9..15c698e 100644
--- a/drivers/event/cnxk/cn10k_worker_deq_ca.c
+++ b/drivers/event/cnxk/cn10k_worker_deq_ca.c
@@ -6,7 +6,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 	uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name(                        \
 		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn10k_worker_deq_tmo.c b/drivers/event/cnxk/cn10k_worker_deq_tmo.c
index c8524a2..537ae37 100644
--- a/drivers/event/cnxk/cn10k_worker_deq_tmo.c
+++ b/drivers/event/cnxk/cn10k_worker_deq_tmo.c
@@ -6,7 +6,7 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
 	uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name(                       \
 		void *port, struct rte_event *ev, uint64_t timeout_ticks)      \
 	{                                                                      \
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index a888364..200cd93 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -81,4 +81,8 @@ void cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
 /* Security context setup */
 void cn10k_eth_sec_ops_override(void);
 
+#define LMT_OFF(lmt_addr, lmt_num, offset)                                     \
+	(void *)((uintptr_t)(lmt_addr) +                                       \
+		 ((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
+
 #endif /* __CN10K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c
index 69e767a..d6af54b 100644
--- a/drivers/net/cnxk/cn10k_rx.c
+++ b/drivers/net/cnxk/cn10k_rx.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_rx.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name(	       \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
@@ -17,12 +17,13 @@ NIX_RX_FASTPATH_MODES
 
 static inline void
 pick_rx_func(struct rte_eth_dev *eth_dev,
-	     const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
+	     const eth_rx_burst_t rx_burst[2][2][2][2][2][2][2])
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
 	/* [VLAN] [TSP] [MARK] [CKSUM] [PTYPE] [RSS] */
 	eth_dev->rx_pkt_burst = rx_burst
+		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)]
 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
 		[!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
@@ -38,33 +39,33 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
-	const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
+	const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			      \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name,
 
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
+	const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			      \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name,
 
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
+	const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			      \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_##name,
 
 		NIX_RX_FASTPATH_MODES
 #undef R
 	};
 
-	const eth_rx_burst_t nix_eth_rx_vec_burst_mseg[2][2][2][2][2][2] = {
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_mseg_##name,
+	const eth_rx_burst_t nix_eth_rx_vec_burst_mseg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                            \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_recv_pkts_vec_mseg_##name,
 
 		NIX_RX_FASTPATH_MODES
 #undef R
@@ -73,7 +74,7 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev)
 	/* Copy multi seg version with no offload for tear down sequence */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
 		dev->rx_pkt_burst_no_offload =
-			nix_eth_rx_burst_mseg[0][0][0][0][0][0];
+			nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
 
 	if (dev->scalar_ena) {
 		if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index d27a231..fcc451a 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -65,6 +65,130 @@ nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
 	return (struct rte_mbuf *)(buff - data_off);
 }
 
+static __rte_always_inline void
+nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff,
+		   uintptr_t aura_handle)
+{
+	uint64_t pa;
+
+	/* laddr is pointing to first pointer */
+	laddr -= 8;
+
+	/* Trigger free either on lmtline full or different aura handle */
+	pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
+
+	/* Update aura handle */
+	*(uint64_t *)laddr = (((uint64_t)(loff & 0x1) << 32) |
+			      roc_npa_aura_handle_to_aura(aura_handle));
+
+	pa |= ((loff >> 1) << 4);
+	roc_lmt_submit_steorl(lmt_id, pa);
+}
+
+static __rte_always_inline struct rte_mbuf *
+nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, const uint64_t sa_base, uintptr_t laddr,
+			uint8_t *loff, struct rte_mbuf *mbuf, uint16_t data_off)
+{
+	const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
+	const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
+	struct cn10k_inb_priv_data *inb_priv;
+	struct rte_mbuf *inner;
+	uint32_t sa_idx;
+	void *inb_sa;
+	uint64_t w0;
+
+	if (cq_w1 & BIT(11)) {
+		inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
+					    sizeof(struct rte_mbuf));
+
+		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+		w0 = hdr->w0.u64;
+		sa_idx = w0 >> 32;
+
+		inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
+		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
+
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+
+		/* Update l2 hdr length first */
+		inner->pkt_len = (hdr->w2.il3_off -
+				  sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7));
+
+		/* Store meta in lmtline to free
+		 * Assume all meta's from same aura.
+		 */
+		*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
+		*loff = *loff + 1;
+
+		return inner;
+	}
+	return mbuf;
+}
+
+#if defined(RTE_ARCH_ARM64)
+
+static __rte_always_inline struct rte_mbuf *
+nix_sec_meta_to_mbuf(uint64_t cq_w1, uintptr_t sa_base, uintptr_t laddr,
+		     uint8_t *loff, struct rte_mbuf *mbuf, uint16_t data_off,
+		     uint8x16_t *rx_desc_field1, uint64_t *ol_flags)
+{
+	const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
+	const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
+	struct cn10k_inb_priv_data *inb_priv;
+	struct rte_mbuf *inner;
+	uint64_t *sg, res_w1;
+	uint32_t sa_idx;
+	void *inb_sa;
+	uint16_t len;
+	uint64_t w0;
+
+	if (cq_w1 & BIT(11)) {
+		inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
+					    sizeof(struct rte_mbuf));
+		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+		w0 = hdr->w0.u64;
+		sa_idx = w0 >> 32;
+
+		inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
+		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
+
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+
+		/* CPT result(struct cpt_cn10k_res_s) is at
+		 * after first IOVA in meta
+		 */
+		sg = (uint64_t *)(inner + 1);
+		res_w1 = sg[10];
+
+		/* Clear checksum flags and update security flag */
+		*ol_flags &= ~(PKT_RX_L4_CKSUM_MASK | PKT_RX_IP_CKSUM_MASK);
+		*ol_flags |= (((res_w1 & 0xFF) == CPT_COMP_WARN) ?
+			      PKT_RX_SEC_OFFLOAD :
+			      (PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED));
+		/* Calculate inner packet length */
+		len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
+			sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
+		/* Update pkt_len and data_len */
+		*rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 2);
+		*rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 4);
+
+		/* Store meta in lmtline to free
+		 * Assume all meta's from same aura.
+		 */
+		*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
+		*loff = *loff + 1;
+
+		/* Return inner mbuf */
+		return inner;
+	}
+
+	/* Return same mbuf as it is not a decrypted pkt */
+	return mbuf;
+}
+#endif
+
 static __rte_always_inline uint32_t
 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
 {
@@ -177,8 +301,8 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 {
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
-	const uint16_t len = rx->pkt_lenm1 + 1;
 	const uint64_t w1 = *(const uint64_t *)rx;
+	uint16_t len = rx->pkt_lenm1 + 1;
 	uint64_t ol_flags = 0;
 
 	/* Mark mempool obj as "get" as it is alloc'ed by NIX */
@@ -194,8 +318,30 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
 		ol_flags |= PKT_RX_RSS_HASH;
 	}
 
-	if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
-		ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+	/* Process Security packets */
+	if (flag & NIX_RX_OFFLOAD_SECURITY_F) {
+		if (w1 & BIT(11)) {
+			/* CPT result(struct cpt_cn10k_res_s) is at
+			 * after first IOVA in meta
+			 */
+			const uint64_t *sg = (const uint64_t *)(mbuf + 1);
+			const uint64_t res_w1 = sg[10];
+			const uint16_t uc_cc = res_w1 & 0xFF;
+
+			/* Rlen */
+			len = ((res_w1 >> 16) & 0xFFFF) + mbuf->pkt_len;
+			ol_flags |= ((uc_cc == CPT_COMP_WARN) ?
+						   PKT_RX_SEC_OFFLOAD :
+						   (PKT_RX_SEC_OFFLOAD |
+					      PKT_RX_SEC_OFFLOAD_FAILED));
+		} else {
+			if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
+				ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+		}
+	} else {
+		if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
+			ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+	}
 
 	if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
 		if (rx->vtag0_gone) {
@@ -263,13 +409,28 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	const uintptr_t desc = rxq->desc;
 	const uint64_t wdata = rxq->wdata;
 	const uint32_t qmask = rxq->qmask;
+	uint64_t lbase = rxq->lmt_base;
 	uint16_t packets = 0, nb_pkts;
+	uint8_t loff = 0, lnum = 0;
 	uint32_t head = rxq->head;
 	struct nix_cqe_hdr_s *cq;
 	struct rte_mbuf *mbuf;
+	uint64_t aura_handle;
+	uint64_t sa_base;
+	uint16_t lmt_id;
+	uint64_t laddr;
 
 	nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
 
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+		aura_handle = rxq->aura_handle;
+		sa_base = rxq->sa_base;
+		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+		laddr = lbase;
+		laddr += 8;
+	}
+
 	while (packets < nb_pkts) {
 		/* Prefetch N desc ahead */
 		rte_prefetch_non_temporal(
@@ -278,6 +439,14 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 
 		mbuf = nix_get_mbuf_from_cqe(cq, data_off);
 
+		/* Translate meta to mbuf */
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
+
+			mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, sa_base, laddr,
+						       &loff, mbuf, data_off);
+		}
+
 		cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
 				      flags);
 		cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
@@ -289,6 +458,20 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 		roc_prefetch_store_keep(mbuf);
 		head++;
 		head &= qmask;
+
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			/* Flush when we don't have space for 4 meta */
+			if ((15 - loff) < 1) {
+				nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
+						   aura_handle);
+				lnum++;
+				lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
+					1;
+				/* First pointer starts at 8B offset */
+				laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
+				loff = 0;
+			}
+		}
 	}
 
 	rxq->head = head;
@@ -297,6 +480,12 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	/* Free all the CQs that we've processed */
 	plt_write64((wdata | nb_pkts), rxq->cq_door);
 
+	/* Free remaining meta buffers if any */
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
+		nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
+		plt_io_wmb();
+	}
+
 	return nb_pkts;
 }
 
@@ -327,7 +516,8 @@ nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
 static __rte_always_inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
-			   struct cnxk_timesync_info *tstamp)
+			   struct cnxk_timesync_info *tstamp,
+			   uintptr_t lmt_base)
 {
 	struct cn10k_eth_rxq *rxq = args;
 	const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
@@ -346,9 +536,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
 	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+	uint64_t aura_handle, lbase, laddr;
+	uint8_t loff = 0, lnum = 0;
 	uint8x16_t f0, f1, f2, f3;
+	uint16_t lmt_id, d_off;
 	uint16_t packets = 0;
 	uint16_t pkts_left;
+	uintptr_t sa_base;
 	uint32_t head;
 	uintptr_t cq0;
 
@@ -366,6 +560,38 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		RTE_SET_USED(head);
 	}
 
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+		if (flags & NIX_RX_VWQE_F) {
+			uint16_t port;
+
+			mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
+						    sizeof(struct rte_mbuf));
+			/* Pick first mbuf's aura handle assuming all
+			 * mbufs are from a vec and are from same RQ.
+			 */
+			aura_handle = mbuf0->pool->pool_id;
+			/* Calculate offset from mbuf to actual data area */
+			d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
+			d_off += (mbuf_initializer & 0xFFFF);
+
+			/* Get SA Base from lookup tbl using port_id */
+			port = mbuf_initializer >> 48;
+			sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
+
+			lbase = lmt_base;
+		} else {
+			aura_handle = rxq->aura_handle;
+			d_off = rxq->data_off;
+			sa_base = rxq->sa_base;
+			lbase = rxq->lmt_base;
+		}
+		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+		lnum = 0;
+		laddr = lbase;
+		laddr += 8;
+	}
+
 	while (packets < pkts) {
 		if (!(flags & NIX_RX_VWQE_F)) {
 			/* Exit loop if head is about to wrap and become
@@ -428,6 +654,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
 		f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
 
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			/* Prefetch probable CPT parse header area */
+			rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf0, d_off));
+			rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf1, d_off));
+			rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf2, d_off));
+			rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf3, d_off));
+		}
+
 		/* Load CQE word0 and word 1 */
 		const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
 		const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
@@ -474,6 +708,30 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
 		}
 
+		/* Translate meta to mbuf */
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			/* Checksum ol_flags will be cleared if mbuf is meta */
+			mbuf0 = nix_sec_meta_to_mbuf(cq0_w1, sa_base, laddr,
+						     &loff, mbuf0, d_off, &f0,
+						     &ol_flags0);
+			mbuf01 = vsetq_lane_u64((uint64_t)mbuf0, mbuf01, 0);
+
+			mbuf1 = nix_sec_meta_to_mbuf(cq1_w1, sa_base, laddr,
+						     &loff, mbuf1, d_off, &f1,
+						     &ol_flags1);
+			mbuf01 = vsetq_lane_u64((uint64_t)mbuf1, mbuf01, 1);
+
+			mbuf2 = nix_sec_meta_to_mbuf(cq2_w1, sa_base, laddr,
+						     &loff, mbuf2, d_off, &f2,
+						     &ol_flags2);
+			mbuf23 = vsetq_lane_u64((uint64_t)mbuf2, mbuf23, 0);
+
+			mbuf3 = nix_sec_meta_to_mbuf(cq3_w1, sa_base, laddr,
+						     &loff, mbuf3, d_off, &f3,
+						     &ol_flags3);
+			mbuf23 = vsetq_lane_u64((uint64_t)mbuf3, mbuf23, 1);
+		}
+
 		if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
 			uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
 			uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
@@ -659,6 +917,26 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			head += NIX_DESCS_PER_LOOP;
 			head &= qmask;
 		}
+
+		if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+			/* Flush when we don't have space for 4 meta */
+			if ((15 - loff) < 4) {
+				nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
+						   aura_handle);
+				lnum++;
+				lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
+					1;
+				/* First pointer starts at 8B offset */
+				laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
+				loff = 0;
+			}
+		}
+	}
+
+	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
+		nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
+		if (flags & NIX_RX_VWQE_F)
+			plt_io_wmb();
 	}
 
 	if (flags & NIX_RX_VWQE_F)
@@ -681,16 +959,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 #else
 
 static inline uint16_t
-cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
-			   uint16_t pkts, const uint16_t flags,
-			   void *lookup_mem, void *tstamp)
+cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
+			   const uint16_t flags, void *lookup_mem,
+			   struct cnxk_timesync_info *tstamp,
+			   uintptr_t lmt_base)
 {
-	RTE_SET_USED(lookup_mem);
-	RTE_SET_USED(rx_queue);
-	RTE_SET_USED(rx_pkts);
+	RTE_SET_USED(args);
+	RTE_SET_USED(mbufs);
 	RTE_SET_USED(pkts);
 	RTE_SET_USED(flags);
+	RTE_SET_USED(lookup_mem);
 	RTE_SET_USED(tstamp);
+	RTE_SET_USED(lmt_base);
 
 	return 0;
 }
@@ -704,98 +984,268 @@ cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
 #define MARK_F	  NIX_RX_OFFLOAD_MARK_UPDATE_F
 #define TS_F      NIX_RX_OFFLOAD_TSTAMP_F
 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
+#define R_SEC_F   NIX_RX_OFFLOAD_SECURITY_F
 
-/* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
+/* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
 #define NIX_RX_FASTPATH_MODES						       \
-R(no_offload,			0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE)	       \
-R(rss,				0, 0, 0, 0, 0, 1, RSS_F)		       \
-R(ptype,			0, 0, 0, 0, 1, 0, PTYPE_F)		       \
-R(ptype_rss,			0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F)	       \
-R(cksum,			0, 0, 0, 1, 0, 0, CKSUM_F)		       \
-R(cksum_rss,			0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F)	       \
-R(cksum_ptype,			0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F)	       \
-R(cksum_ptype_rss,		0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F)   \
-R(mark,				0, 0, 1, 0, 0, 0, MARK_F)		       \
-R(mark_rss,			0, 0, 1, 0, 0, 1, MARK_F | RSS_F)	       \
-R(mark_ptype,			0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F)	       \
-R(mark_ptype_rss,		0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F)    \
-R(mark_cksum,			0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F)	       \
-R(mark_cksum_rss,		0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F)    \
-R(mark_cksum_ptype,		0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F)  \
-R(mark_cksum_ptype_rss,		0, 0, 1, 1, 1, 1,			       \
-			MARK_F | CKSUM_F | PTYPE_F | RSS_F)		       \
-R(ts,				0, 1, 0, 0, 0, 0, TS_F)			       \
-R(ts_rss,			0, 1, 0, 0, 0, 1, TS_F | RSS_F)		       \
-R(ts_ptype,			0, 1, 0, 0, 1, 0, TS_F | PTYPE_F)	       \
-R(ts_ptype_rss,			0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F)      \
-R(ts_cksum,			0, 1, 0, 1, 0, 0, TS_F | CKSUM_F)	       \
-R(ts_cksum_rss,			0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F)      \
-R(ts_cksum_ptype,		0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F)    \
-R(ts_cksum_ptype_rss,		0, 1, 0, 1, 1, 1,			       \
-			TS_F | CKSUM_F | PTYPE_F | RSS_F)		       \
-R(ts_mark,			0, 1, 1, 0, 0, 0, TS_F | MARK_F)	       \
-R(ts_mark_rss,			0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F)       \
-R(ts_mark_ptype,		0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F)     \
-R(ts_mark_ptype_rss,		0, 1, 1, 0, 1, 1,			       \
-			TS_F | MARK_F | PTYPE_F | RSS_F)		       \
-R(ts_mark_cksum,		0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F)     \
-R(ts_mark_cksum_rss,		0, 1, 1, 1, 0, 1,			       \
-			TS_F | MARK_F | CKSUM_F | RSS_F)		       \
-R(ts_mark_cksum_ptype,		0, 1, 1, 1, 1, 0,			       \
-			TS_F | MARK_F | CKSUM_F | PTYPE_F)		       \
-R(ts_mark_cksum_ptype_rss,	0, 1, 1, 1, 1, 1,			       \
-			TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)	       \
-R(vlan,				1, 0, 0, 0, 0, 0, RX_VLAN_F)		       \
-R(vlan_rss,			1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F)	       \
-R(vlan_ptype,			1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F)	       \
-R(vlan_ptype_rss,		1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
-R(vlan_cksum,			1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F)	       \
-R(vlan_cksum_rss,		1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
-R(vlan_cksum_ptype,		1, 0, 0, 1, 1, 0,			       \
-			RX_VLAN_F | CKSUM_F | PTYPE_F)			       \
-R(vlan_cksum_ptype_rss,		1, 0, 0, 1, 1, 1,			       \
-			RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F)		       \
-R(vlan_mark,			1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F)	       \
-R(vlan_mark_rss,		1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F)  \
-R(vlan_mark_ptype,		1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
-R(vlan_mark_ptype_rss,		1, 0, 1, 0, 1, 1,			       \
-			RX_VLAN_F | MARK_F | PTYPE_F | RSS_F)		       \
-R(vlan_mark_cksum,		1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
-R(vlan_mark_cksum_rss,		1, 0, 1, 1, 0, 1,			       \
-			RX_VLAN_F | MARK_F | CKSUM_F | RSS_F)		       \
-R(vlan_mark_cksum_ptype,	1, 0, 1, 1, 1, 0,			       \
-			RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F)		       \
-R(vlan_mark_cksum_ptype_rss,	1, 0, 1, 1, 1, 1,			       \
-			RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)	       \
-R(vlan_ts,			1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F)	       \
-R(vlan_ts_rss,			1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F)    \
-R(vlan_ts_ptype,		1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F)  \
-R(vlan_ts_ptype_rss,		1, 1, 0, 0, 1, 1,			       \
-			RX_VLAN_F | TS_F | PTYPE_F | RSS_F)		       \
-R(vlan_ts_cksum,		1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F)  \
-R(vlan_ts_cksum_rss,		1, 1, 0, 1, 0, 1,			       \
-			RX_VLAN_F | TS_F | CKSUM_F | RSS_F)		       \
-R(vlan_ts_cksum_ptype,		1, 1, 0, 1, 1, 0,			       \
-			RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F)		       \
-R(vlan_ts_cksum_ptype_rss,	1, 1, 0, 1, 1, 1,			       \
-			RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F)	       \
-R(vlan_ts_mark,			1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F)   \
-R(vlan_ts_mark_rss,		1, 1, 1, 0, 0, 1,			       \
-			RX_VLAN_F | TS_F | MARK_F | RSS_F)		       \
-R(vlan_ts_mark_ptype,		1, 1, 1, 0, 1, 0,			       \
-			RX_VLAN_F | TS_F | MARK_F | PTYPE_F)		       \
-R(vlan_ts_mark_ptype_rss,	1, 1, 1, 0, 1, 1,			       \
-			RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F)	       \
-R(vlan_ts_mark_cksum,		1, 1, 1, 1, 0, 0,			       \
-			RX_VLAN_F | TS_F | MARK_F | CKSUM_F)		       \
-R(vlan_ts_mark_cksum_rss,	1, 1, 1, 1, 0, 1,			       \
-			RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F)	       \
-R(vlan_ts_mark_cksum_ptype,	1, 1, 1, 1, 1, 0,			       \
-			RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F)	       \
-R(vlan_ts_mark_cksum_ptype_rss,	1, 1, 1, 1, 1, 1,			       \
-			RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
+R(no_offload,			0, 0, 0, 0, 0, 0, 0,			       \
+		NIX_RX_OFFLOAD_NONE)					       \
+R(rss,				0, 0, 0, 0, 0, 0, 1,			       \
+		RSS_F)							       \
+R(ptype,			0, 0, 0, 0, 0, 1, 0,			       \
+		PTYPE_F)						       \
+R(ptype_rss,			0, 0, 0, 0, 0, 1, 1,			       \
+		PTYPE_F | RSS_F)					       \
+R(cksum,			0, 0, 0, 0, 1, 0, 0,			       \
+		CKSUM_F)						       \
+R(cksum_rss,			0, 0, 0, 0, 1, 0, 1,			       \
+		CKSUM_F | RSS_F)					       \
+R(cksum_ptype,			0, 0, 0, 0, 1, 1, 0,			       \
+		CKSUM_F | PTYPE_F)					       \
+R(cksum_ptype_rss,		0, 0, 0, 0, 1, 1, 1,			       \
+		CKSUM_F | PTYPE_F | RSS_F)				       \
+R(mark,				0, 0, 0, 1, 0, 0, 0,			       \
+		MARK_F)							       \
+R(mark_rss,			0, 0, 0, 1, 0, 0, 1,			       \
+		MARK_F | RSS_F)						       \
+R(mark_ptype,			0, 0, 0, 1, 0, 1, 0,			       \
+		MARK_F | PTYPE_F)					       \
+R(mark_ptype_rss,		0, 0, 0, 1, 0, 1, 1,			       \
+		MARK_F | PTYPE_F | RSS_F)				       \
+R(mark_cksum,			0, 0, 0, 1, 1, 0, 0,			       \
+		MARK_F | CKSUM_F)					       \
+R(mark_cksum_rss,		0, 0, 0, 1, 1, 0, 1,			       \
+		MARK_F | CKSUM_F | RSS_F)				       \
+R(mark_cksum_ptype,		0, 0, 0, 1, 1, 1, 0,			       \
+		MARK_F | CKSUM_F | PTYPE_F)				       \
+R(mark_cksum_ptype_rss,		0, 0, 0, 1, 1, 1, 1,			       \
+		MARK_F | CKSUM_F | PTYPE_F | RSS_F)			       \
+R(ts,				0, 0, 1, 0, 0, 0, 0,			       \
+		TS_F)							       \
+R(ts_rss,			0, 0, 1, 0, 0, 0, 1,			       \
+		TS_F | RSS_F)						       \
+R(ts_ptype,			0, 0, 1, 0, 0, 1, 0,			       \
+		TS_F | PTYPE_F)						       \
+R(ts_ptype_rss,			0, 0, 1, 0, 0, 1, 1,			       \
+		TS_F | PTYPE_F | RSS_F)					       \
+R(ts_cksum,			0, 0, 1, 0, 1, 0, 0,			       \
+		TS_F | CKSUM_F)						       \
+R(ts_cksum_rss,			0, 0, 1, 0, 1, 0, 1,			       \
+		TS_F | CKSUM_F | RSS_F)					       \
+R(ts_cksum_ptype,		0, 0, 1, 0, 1, 1, 0,			       \
+		TS_F | CKSUM_F | PTYPE_F)				       \
+R(ts_cksum_ptype_rss,		0, 0, 1, 0, 1, 1, 1,			       \
+		TS_F | CKSUM_F | PTYPE_F | RSS_F)			       \
+R(ts_mark,			0, 0, 1, 1, 0, 0, 0,			       \
+		TS_F | MARK_F)						       \
+R(ts_mark_rss,			0, 0, 1, 1, 0, 0, 1,			       \
+		TS_F | MARK_F | RSS_F)					       \
+R(ts_mark_ptype,		0, 0, 1, 1, 0, 1, 0,			       \
+		TS_F | MARK_F | PTYPE_F)				       \
+R(ts_mark_ptype_rss,		0, 0, 1, 1, 0, 1, 1,			       \
+		TS_F | MARK_F | PTYPE_F | RSS_F)			       \
+R(ts_mark_cksum,		0, 0, 1, 1, 1, 0, 0,			       \
+		TS_F | MARK_F | CKSUM_F)				       \
+R(ts_mark_cksum_rss,		0, 0, 1, 1, 1, 0, 1,			       \
+		TS_F | MARK_F | CKSUM_F | RSS_F)			       \
+R(ts_mark_cksum_ptype,		0, 0, 1, 1, 1, 1, 0,			       \
+		TS_F | MARK_F | CKSUM_F | PTYPE_F)			       \
+R(ts_mark_cksum_ptype_rss,	0, 0, 1, 1, 1, 1, 1,			       \
+		TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)		       \
+R(vlan,				0, 1, 0, 0, 0, 0, 0,			       \
+		RX_VLAN_F)						       \
+R(vlan_rss,			0, 1, 0, 0, 0, 0, 1,			       \
+		RX_VLAN_F | RSS_F)					       \
+R(vlan_ptype,			0, 1, 0, 0, 0, 1, 0,			       \
+		RX_VLAN_F | PTYPE_F)					       \
+R(vlan_ptype_rss,		0, 1, 0, 0, 0, 1, 1,			       \
+		RX_VLAN_F | PTYPE_F | RSS_F)				       \
+R(vlan_cksum,			0, 1, 0, 0, 1, 0, 0,			       \
+		RX_VLAN_F | CKSUM_F)					       \
+R(vlan_cksum_rss,		0, 1, 0, 0, 1, 0, 1,			       \
+		RX_VLAN_F | CKSUM_F | RSS_F)				       \
+R(vlan_cksum_ptype,		0, 1, 0, 0, 1, 1, 0,			       \
+		RX_VLAN_F | CKSUM_F | PTYPE_F)				       \
+R(vlan_cksum_ptype_rss,		0, 1, 0, 0, 1, 1, 1,			       \
+		RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F)			       \
+R(vlan_mark,			0, 1, 0, 1, 0, 0, 0,			       \
+		RX_VLAN_F | MARK_F)					       \
+R(vlan_mark_rss,		0, 1, 0, 1, 0, 0, 1,			       \
+		RX_VLAN_F | MARK_F | RSS_F)				       \
+R(vlan_mark_ptype,		0, 1, 0, 1, 0, 1, 0,			       \
+		RX_VLAN_F | MARK_F | PTYPE_F)				       \
+R(vlan_mark_ptype_rss,		0, 1, 0, 1, 0, 1, 1,			       \
+		RX_VLAN_F | MARK_F | PTYPE_F | RSS_F)			       \
+R(vlan_mark_cksum,		0, 1, 0, 1, 1, 0, 0,			       \
+		RX_VLAN_F | MARK_F | CKSUM_F)				       \
+R(vlan_mark_cksum_rss,		0, 1, 0, 1, 1, 0, 1,			       \
+		RX_VLAN_F | MARK_F | CKSUM_F | RSS_F)			       \
+R(vlan_mark_cksum_ptype,	0, 1, 0, 1, 1, 1, 0,			       \
+		RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F)			       \
+R(vlan_mark_cksum_ptype_rss,	0, 1, 0, 1, 1, 1, 1,			       \
+		RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)		       \
+R(vlan_ts,			0, 1, 1, 0, 0, 0, 0,			       \
+		RX_VLAN_F | TS_F)					       \
+R(vlan_ts_rss,			0, 1, 1, 0, 0, 0, 1,			       \
+		RX_VLAN_F | TS_F | RSS_F)				       \
+R(vlan_ts_ptype,		0, 1, 1, 0, 0, 1, 0,			       \
+		RX_VLAN_F | TS_F | PTYPE_F)				       \
+R(vlan_ts_ptype_rss,		0, 1, 1, 0, 0, 1, 1,			       \
+		RX_VLAN_F | TS_F | PTYPE_F | RSS_F)			       \
+R(vlan_ts_cksum,		0, 1, 1, 0, 1, 0, 0,			       \
+		RX_VLAN_F | TS_F | CKSUM_F)				       \
+R(vlan_ts_cksum_rss,		0, 1, 1, 0, 1, 0, 1,			       \
+		RX_VLAN_F | TS_F | CKSUM_F | RSS_F)			       \
+R(vlan_ts_cksum_ptype,		0, 1, 1, 0, 1, 1, 0,			       \
+		RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F)			       \
+R(vlan_ts_cksum_ptype_rss,	0, 1, 1, 0, 1, 1, 1,			       \
+		RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F)		       \
+R(vlan_ts_mark,			0, 1, 1, 1, 0, 0, 0,			       \
+		RX_VLAN_F | TS_F | MARK_F)				       \
+R(vlan_ts_mark_rss,		0, 1, 1, 1, 0, 0, 1,			       \
+		RX_VLAN_F | TS_F | MARK_F | RSS_F)			       \
+R(vlan_ts_mark_ptype,		0, 1, 1, 1, 0, 1, 0,			       \
+		RX_VLAN_F | TS_F | MARK_F | PTYPE_F)			       \
+R(vlan_ts_mark_ptype_rss,	0, 1, 1, 1, 0, 1, 1,			       \
+		RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F)		       \
+R(vlan_ts_mark_cksum,		0, 1, 1, 1, 1, 0, 0,			       \
+		RX_VLAN_F | TS_F | MARK_F | CKSUM_F)			       \
+R(vlan_ts_mark_cksum_rss,	0, 1, 1, 1, 1, 0, 1,			       \
+		RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F)		       \
+R(vlan_ts_mark_cksum_ptype,	0, 1, 1, 1, 1, 1, 0,			       \
+		RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F)		       \
+R(vlan_ts_mark_cksum_ptype_rss,	0, 1, 1, 1, 1, 1, 1,			       \
+		RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)	       \
+R(sec,				1, 0, 0, 0, 0, 0, 0,			       \
+		R_SEC_F)						       \
+R(sec_rss,			1, 0, 0, 0, 0, 0, 1,			       \
+		RSS_F)							       \
+R(sec_ptype,			1, 0, 0, 0, 0, 1, 0,			       \
+		R_SEC_F | PTYPE_F)					       \
+R(sec_ptype_rss,		1, 0, 0, 0, 0, 1, 1,			       \
+		R_SEC_F | PTYPE_F | RSS_F)				       \
+R(sec_cksum,			1, 0, 0, 0, 1, 0, 0,			       \
+		R_SEC_F | CKSUM_F)					       \
+R(sec_cksum_rss,		1, 0, 0, 0, 1, 0, 1,			       \
+		R_SEC_F | CKSUM_F | RSS_F)				       \
+R(sec_cksum_ptype,		1, 0, 0, 0, 1, 1, 0,			       \
+		R_SEC_F | CKSUM_F | PTYPE_F)				       \
+R(sec_cksum_ptype_rss,		1, 0, 0, 0, 1, 1, 1,			       \
+		R_SEC_F | CKSUM_F | PTYPE_F | RSS_F)			       \
+R(sec_mark,			1, 0, 0, 1, 0, 0, 0,			       \
+		R_SEC_F | MARK_F)					       \
+R(sec_mark_rss,			1, 0, 0, 1, 0, 0, 1,			       \
+		R_SEC_F | MARK_F | RSS_F)				       \
+R(sec_mark_ptype,		1, 0, 0, 1, 0, 1, 0,			       \
+		R_SEC_F | MARK_F | PTYPE_F)				       \
+R(sec_mark_ptype_rss,		1, 0, 0, 1, 0, 1, 1,			       \
+		R_SEC_F | MARK_F | PTYPE_F | RSS_F)			       \
+R(sec_mark_cksum,		1, 0, 0, 1, 1, 0, 0,			       \
+		R_SEC_F | MARK_F | CKSUM_F)				       \
+R(sec_mark_cksum_rss,		1, 0, 0, 1, 1, 0, 1,			       \
+		R_SEC_F | MARK_F | CKSUM_F | RSS_F)			       \
+R(sec_mark_cksum_ptype,		1, 0, 0, 1, 1, 1, 0,			       \
+		R_SEC_F | MARK_F | CKSUM_F | PTYPE_F)			       \
+R(sec_mark_cksum_ptype_rss,	1, 0, 0, 1, 1, 1, 1,			       \
+		R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)		       \
+R(sec_ts,			1, 0, 1, 0, 0, 0, 0,			       \
+		R_SEC_F | TS_F)						       \
+R(sec_ts_rss,			1, 0, 1, 0, 0, 0, 1,			       \
+		R_SEC_F | TS_F | RSS_F)					       \
+R(sec_ts_ptype,			1, 0, 1, 0, 0, 1, 0,			       \
+		R_SEC_F | TS_F | PTYPE_F)				       \
+R(sec_ts_ptype_rss,		1, 0, 1, 0, 0, 1, 1,			       \
+		R_SEC_F | TS_F | PTYPE_F | RSS_F)			       \
+R(sec_ts_cksum,			1, 0, 1, 0, 1, 0, 0,			       \
+		R_SEC_F | TS_F | CKSUM_F)				       \
+R(sec_ts_cksum_rss,		1, 0, 1, 0, 1, 0, 1,			       \
+		R_SEC_F | TS_F | CKSUM_F | RSS_F)			       \
+R(sec_ts_cksum_ptype,		1, 0, 1, 0, 1, 1, 0,			       \
+		R_SEC_F | TS_F | CKSUM_F | PTYPE_F)			       \
+R(sec_ts_cksum_ptype_rss,	1, 0, 1, 0, 1, 1, 1,			       \
+		R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F)		       \
+R(sec_ts_mark,			1, 0, 1, 1, 0, 0, 0,			       \
+		R_SEC_F | TS_F | MARK_F)				       \
+R(sec_ts_mark_rss,		1, 0, 1, 1, 0, 0, 1,			       \
+		R_SEC_F | TS_F | MARK_F | RSS_F)			       \
+R(sec_ts_mark_ptype,		1, 0, 1, 1, 0, 1, 0,			       \
+		R_SEC_F | TS_F | MARK_F | PTYPE_F)			       \
+R(sec_ts_mark_ptype_rss,	1, 0, 1, 1, 0, 1, 1,			       \
+		R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F)		       \
+R(sec_ts_mark_cksum,		1, 0, 1, 1, 1, 0, 0,			       \
+		R_SEC_F | TS_F | MARK_F | CKSUM_F)			       \
+R(sec_ts_mark_cksum_rss,	1, 0, 1, 1, 1, 0, 1,			       \
+		R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F)		       \
+R(sec_ts_mark_cksum_ptype,	1, 0, 1, 1, 1, 1, 0,			       \
+		R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F)		       \
+R(sec_ts_mark_cksum_ptype_rss,	1, 0, 1, 1, 1, 1, 1,			       \
+		R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)	       \
+R(sec_vlan,			1, 1, 0, 0, 0, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F)					       \
+R(sec_vlan_rss,			1, 1, 0, 0, 0, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | RSS_F)				       \
+R(sec_vlan_ptype,		1, 1, 0, 0, 0, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | PTYPE_F)				       \
+R(sec_vlan_ptype_rss,		1, 1, 0, 0, 0, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F)			       \
+R(sec_vlan_cksum,		1, 1, 0, 0, 1, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | CKSUM_F)				       \
+R(sec_vlan_cksum_rss,		1, 1, 0, 0, 1, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F)			       \
+R(sec_vlan_cksum_ptype,		1, 1, 0, 0, 1, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F)		       \
+R(sec_vlan_cksum_ptype_rss,	1, 1, 0, 0, 1, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F)	       \
+R(sec_vlan_mark,		1, 1, 0, 1, 0, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F)				       \
+R(sec_vlan_mark_rss,		1, 1, 0, 1, 0, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | RSS_F)			       \
+R(sec_vlan_mark_ptype,		1, 1, 0, 1, 0, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F)			       \
+R(sec_vlan_mark_ptype_rss,	1, 1, 0, 1, 0, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F)		       \
+R(sec_vlan_mark_cksum,		1, 1, 0, 1, 1, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F)			       \
+R(sec_vlan_mark_cksum_rss,	1, 1, 0, 1, 1, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F)		       \
+R(sec_vlan_mark_cksum_ptype,	1, 1, 0, 1, 1, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F)	       \
+R(sec_vlan_mark_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)      \
+R(sec_vlan_ts,			1, 1, 1, 0, 0, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F)				       \
+R(sec_vlan_ts_rss,		1, 1, 1, 0, 0, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | RSS_F)			       \
+R(sec_vlan_ts_ptype,		1, 1, 1, 0, 0, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F)			       \
+R(sec_vlan_ts_ptype_rss,	1, 1, 1, 0, 0, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F)		       \
+R(sec_vlan_ts_cksum,		1, 1, 1, 0, 1, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F)			       \
+R(sec_vlan_ts_cksum_rss,	1, 1, 1, 0, 1, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F)		       \
+R(sec_vlan_ts_cksum_ptype,	1, 1, 1, 0, 1, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F)		       \
+R(sec_vlan_ts_cksum_ptype_rss,	1, 1, 1, 0, 1, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F)	       \
+R(sec_vlan_ts_mark,		1, 1, 1, 1, 0, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F)			       \
+R(sec_vlan_ts_mark_rss,		1, 1, 1, 1, 0, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F)		       \
+R(sec_vlan_ts_mark_ptype,	1, 1, 1, 1, 0, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F)		       \
+R(sec_vlan_ts_mark_ptype_rss,	1, 1, 1, 1, 0, 1, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F)	       \
+R(sec_vlan_ts_mark_cksum,	1, 1, 1, 1, 1, 0, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F)		       \
+R(sec_vlan_ts_mark_cksum_rss,	1, 1, 1, 1, 1, 0, 1,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F)	       \
+R(sec_vlan_ts_mark_cksum_ptype,	1, 1, 1, 1, 1, 1, 0,			       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F)       \
+R(sec_vlan_ts_mark_cksum_ptype_rss,	1, 1, 1, 1, 1, 1, 1,		       \
+		R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name(          \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);     \
 									       \
diff --git a/drivers/net/cnxk/cn10k_rx_mseg.c b/drivers/net/cnxk/cn10k_rx_mseg.c
index 3340771..e7c2321 100644
--- a/drivers/net/cnxk/cn10k_rx_mseg.c
+++ b/drivers/net/cnxk/cn10k_rx_mseg.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_rx.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name(     \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
diff --git a/drivers/net/cnxk/cn10k_rx_vec.c b/drivers/net/cnxk/cn10k_rx_vec.c
index 166735a..0ccc4df 100644
--- a/drivers/net/cnxk/cn10k_rx_vec.c
+++ b/drivers/net/cnxk/cn10k_rx_vec.c
@@ -5,14 +5,14 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_rx.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)				       \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)			       \
 	uint16_t __rte_noinline __rte_hot				       \
 		cn10k_nix_recv_pkts_vec_##name(void *rx_queue,                 \
 					       struct rte_mbuf **rx_pkts,      \
 					       uint16_t pkts)                  \
 	{                                                                      \
 		return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,     \
-						  (flags), NULL, NULL);        \
+						  (flags), NULL, NULL, 0);     \
 	}
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/net/cnxk/cn10k_rx_vec_mseg.c b/drivers/net/cnxk/cn10k_rx_vec_mseg.c
index 1f44ddd..38e0ec3 100644
--- a/drivers/net/cnxk/cn10k_rx_vec_mseg.c
+++ b/drivers/net/cnxk/cn10k_rx_vec_mseg.c
@@ -5,13 +5,13 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_rx.h"
 
-#define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_mseg_##name( \
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
 		return cn10k_nix_recv_pkts_vector(                             \
 			rx_queue, rx_pkts, pkts, (flags) | NIX_RX_MULTI_SEG_F, \
-			NULL, NULL);                                           \
+			NULL, NULL, 0);                                        \
 	}
 
 NIX_RX_FASTPATH_MODES
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 8577a7b..c81a612 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -51,9 +51,6 @@
 
 #define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
 
-#define LMT_OFF(lmt_addr, lmt_num, offset)                                     \
-	(void *)((lmt_addr) + ((lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
-
 /* Function to determine no of tx subdesc required in case ext
  * sub desc is enabled.
  */
-- 
2.8.4


  parent reply	other threads:[~2021-10-01 13:42 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` Nithin Dabilpuram [this message]
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001134022.22700-21-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.