All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx security offload on cn10k
Date: Fri, 1 Oct 2021 19:10:15 +0530	[thread overview]
Message-ID: <20211001134022.22700-22-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20211001134022.22700-1-ndabilpuram@marvell.com>

Add support to create and submit CPT instructions on Tx
on CN10K.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c          |  15 +-
 drivers/event/cnxk/cn10k_worker.h            |  74 +-
 drivers/event/cnxk/cn10k_worker_tx_enq.c     |   2 +-
 drivers/event/cnxk/cn10k_worker_tx_enq_seg.c |   2 +-
 drivers/net/cnxk/cn10k_tx.c                  |  31 +-
 drivers/net/cnxk/cn10k_tx.h                  | 981 +++++++++++++++++++++++----
 drivers/net/cnxk/cn10k_tx_mseg.c             |   2 +-
 drivers/net/cnxk/cn10k_tx_vec.c              |   2 +-
 drivers/net/cnxk/cn10k_tx_vec_mseg.c         |   2 +-
 9 files changed, 929 insertions(+), 182 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 9c0d84b..dec1653 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -17,7 +17,8 @@
 
 #define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
 	(enq_op =                                                              \
-		 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
+		 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]     \
+			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
@@ -380,17 +381,17 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 
 	/* Tx modes */
 	const event_tx_adapter_enqueue
-		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
+		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
 
 	const event_tx_adapter_enqueue
-		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                            \
-	[f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
+		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index b79bd90..1255662 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -423,7 +423,11 @@ cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
 		    ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
 
 			for (j = 0; j < 4; j++) {
+				uint8_t lnum = 0, loff = 0, shft = 0;
 				struct rte_mbuf *m = mbufs[i + j];
+				uintptr_t laddr;
+				uint16_t segdw;
+				bool sec;
 
 				txq = (struct cn10k_eth_txq *)
 					txq_data[port[j]][queue[j]];
@@ -434,19 +438,35 @@ cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
 				if (flags & NIX_TX_OFFLOAD_TSO_F)
 					cn10k_nix_xmit_prepare_tso(m, flags);
 
-				cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags,
-						       txq->lso_tun_fmt);
+				cn10k_nix_xmit_prepare(m, cmd, flags,
+						       txq->lso_tun_fmt, &sec);
+
+				laddr = lmt_addr;
+				/* Prepare CPT instruction and get nixtx addr if
+				 * it is for CPT on same lmtline.
+				 */
+				if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+					cn10k_nix_prep_sec(m, cmd, &laddr,
+							   lmt_addr, &lnum,
+							   &loff, &shft,
+							   txq->sa_base, flags);
+
+				/* Move NIX desc to LMT/NIXTX area */
+				cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+
 				if (flags & NIX_TX_MULTI_SEG_F) {
-					const uint16_t segdw =
-						cn10k_nix_prepare_mseg(
-							m, (uint64_t *)lmt_addr,
-							flags);
-					pa = txq->io_addr | ((segdw - 1) << 4);
+					segdw = cn10k_nix_prepare_mseg(m,
+						(uint64_t *)laddr, flags);
 				} else {
-					pa = txq->io_addr |
-					     (cn10k_nix_tx_ext_subs(flags) + 1)
-						     << 4;
+					segdw = cn10k_nix_tx_ext_subs(flags) +
+						2;
 				}
+
+				if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+					pa = txq->cpt_io_addr | 3 << 4;
+				else
+					pa = txq->io_addr | ((segdw - 1) << 4);
+
 				if (!sched_type)
 					roc_sso_hws_head_wait(base +
 							      SSOW_LF_GWS_TAG);
@@ -469,15 +489,19 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 		       const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
 		       const uint32_t flags)
 {
+	uint8_t lnum = 0, loff = 0, shft = 0;
 	struct cn10k_eth_txq *txq;
+	uint16_t ref_cnt, segdw;
 	struct rte_mbuf *m;
 	uintptr_t lmt_addr;
-	uint16_t ref_cnt;
+	uintptr_t c_laddr;
 	uint16_t lmt_id;
 	uintptr_t pa;
+	bool sec;
 
 	lmt_addr = ws->lmt_base;
 	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+	c_laddr = lmt_addr;
 
 	if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
 		struct rte_mbuf **mbufs = ev->vec->mbufs;
@@ -508,14 +532,28 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		cn10k_nix_xmit_prepare_tso(m, flags);
 
-	cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags, txq->lso_tun_fmt);
+	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+
+	/* Prepare CPT instruction and get nixtx addr if
+	 * it is for CPT on same lmtline.
+	 */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+		cn10k_nix_prep_sec(m, cmd, &lmt_addr, c_laddr, &lnum, &loff,
+				   &shft, txq->sa_base, flags);
+
+	/* Move NIX desc to LMT/NIXTX area */
+	cn10k_nix_xmit_mv_lmt_base(lmt_addr, cmd, flags);
 	if (flags & NIX_TX_MULTI_SEG_F) {
-		const uint16_t segdw =
-			cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
+		segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
+	} else {
+		segdw = cn10k_nix_tx_ext_subs(flags) + 2;
+	}
+
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+		pa = txq->cpt_io_addr | 3 << 4;
+	else
 		pa = txq->io_addr | ((segdw - 1) << 4);
-	} else {
-		pa = txq->io_addr | (cn10k_nix_tx_ext_subs(flags) + 1) << 4;
-	}
+
 	if (!ev->sched_type)
 		roc_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
 
@@ -531,7 +569,7 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 	return 1;
 }
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name(                  \
 		void *port, struct rte_event ev[], uint16_t nb_events);        \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name(              \
diff --git a/drivers/event/cnxk/cn10k_worker_tx_enq.c b/drivers/event/cnxk/cn10k_worker_tx_enq.c
index f9968ac..f14c7fc 100644
--- a/drivers/event/cnxk/cn10k_worker_tx_enq.c
+++ b/drivers/event/cnxk/cn10k_worker_tx_enq.c
@@ -4,7 +4,7 @@
 
 #include "cn10k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name(                  \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c b/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
index a24fc42..2ea61e5 100644
--- a/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn10k_worker_tx_enq_seg.c
@@ -4,7 +4,7 @@
 
 #include "cn10k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name(              \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/net/cnxk/cn10k_tx.c b/drivers/net/cnxk/cn10k_tx.c
index 0e1276c..eb962ef 100644
--- a/drivers/net/cnxk/cn10k_tx.c
+++ b/drivers/net/cnxk/cn10k_tx.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name(	       \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts)      \
 	{                                                                      \
@@ -24,12 +24,13 @@ NIX_TX_FASTPATH_MODES
 
 static inline void
 pick_tx_func(struct rte_eth_dev *eth_dev,
-	     const eth_tx_burst_t tx_burst[2][2][2][2][2][2])
+	     const eth_tx_burst_t tx_burst[2][2][2][2][2][2][2])
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
-	/* [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
+	/* [SEC] [TSP] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
 	eth_dev->tx_pkt_burst = tx_burst
+		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_SECURITY_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -43,33 +44,33 @@ cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
-	const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_##name,
+	const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_mseg_##name,
+	const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_mseg_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_##name,
+	const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_mseg_##name,
+	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn10k_nix_xmit_pkts_vec_mseg_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c81a612..52bb71d 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -6,6 +6,8 @@
 
 #include <rte_vect.h>
 
+#include <rte_eventdev.h>
+
 #define NIX_TX_OFFLOAD_NONE	      (0)
 #define NIX_TX_OFFLOAD_L3_L4_CSUM_F   BIT(0)
 #define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
@@ -57,12 +59,22 @@
 static __rte_always_inline int
 cn10k_nix_tx_ext_subs(const uint16_t flags)
 {
-	return (flags & NIX_TX_OFFLOAD_TSTAMP_F)
-		       ? 2
-		       : ((flags &
-			   (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F))
-				  ? 1
-				  : 0);
+	return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ?
+			     2 :
+			     ((flags &
+			 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ?
+				      1 :
+				      0);
+}
+
+static __rte_always_inline uint8_t
+cn10k_nix_tx_dwords(const uint16_t flags, const uint8_t segdw)
+{
+	if (!(flags & NIX_TX_MULTI_SEG_F))
+		return cn10k_nix_tx_ext_subs(flags) + 2;
+
+	/* Already everything is accounted for in segdw */
+	return segdw;
 }
 
 static __rte_always_inline uint8_t
@@ -144,6 +156,34 @@ cn10k_nix_tx_steor_vec_data(const uint16_t flags)
 	return data;
 }
 
+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+	/* We have two CPT instructions per LMTLine */
+	const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+	uint64_t data;
+
+	/* This will be moved to addr area */
+	data = dw_m1 << 16;
+	data |= dw_m1 << 19;
+	data |= dw_m1 << 22;
+	data |= dw_m1 << 25;
+	data |= dw_m1 << 28;
+	data |= dw_m1 << 31;
+	data |= dw_m1 << 34;
+	data |= dw_m1 << 37;
+	data |= dw_m1 << 40;
+	data |= dw_m1 << 43;
+	data |= dw_m1 << 46;
+	data |= dw_m1 << 49;
+	data |= dw_m1 << 52;
+	data |= dw_m1 << 55;
+	data |= dw_m1 << 58;
+	data |= dw_m1 << 61;
+
+	return data;
+}
+
 static __rte_always_inline void
 cn10k_nix_tx_skeleton(const struct cn10k_eth_txq *txq, uint64_t *cmd,
 		      const uint16_t flags)
@@ -165,6 +205,236 @@ cn10k_nix_tx_skeleton(const struct cn10k_eth_txq *txq, uint64_t *cmd,
 }
 
 static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+		     uint8_t loff, uint8_t shft)
+{
+	uint64_t data;
+	uintptr_t pa;
+
+	/* Check if there is any CPT instruction to submit */
+	if (!lnum && !loff)
+		return;
+
+	data = cn10k_cpt_tx_steor_data();
+	/* Update lmtline use for partial end line */
+	if (loff) {
+		data &= ~(0x7ULL << shft);
+		/* Update it to half full i.e 64B */
+		data |= (0x3UL << shft);
+	}
+
+	pa = io_addr | ((data >> 16) & 0x7) << 4;
+	data &= ~(0x7ULL << 16);
+	/* Update lines - 1 that contain valid data */
+	data |= ((uint64_t)(lnum + loff - 1)) << 12;
+	data |= lmt_id;
+
+	/* STEOR */
+	roc_lmt_submit_steorl(data, pa);
+}
+
+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline void
+cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
+		       uintptr_t *nixtx_addr, uintptr_t lbase, uint8_t *lnum,
+		       uint8_t *loff, uint8_t *shft, uint64_t sa_base,
+		       const uint16_t flags)
+{
+	struct cn10k_sec_sess_priv sess_priv;
+	uint32_t pkt_len, dlen_adj, rlen;
+	uint64x2_t cmd01, cmd23;
+	uintptr_t dptr, nixtx;
+	uint64_t ucode_cmd[4];
+	uint64_t *laddr;
+	uint8_t l2_len;
+	uint16_t tag;
+	uint64_t sa;
+
+	sess_priv.u64 = *rte_security_dynfield(m);
+
+	if (flags & NIX_TX_NEED_SEND_HDR_W1)
+		l2_len = vgetq_lane_u8(*cmd0, 8);
+	else
+		l2_len = m->l2_len;
+
+	/* Retrieve DPTR */
+	dptr = vgetq_lane_u64(*cmd1, 1);
+	pkt_len = vgetq_lane_u16(*cmd0, 0);
+
+	/* Calculate dlen adj */
+	dlen_adj = pkt_len - l2_len;
+	rlen = (dlen_adj + sess_priv.roundup_len) +
+	       (sess_priv.roundup_byte - 1);
+	rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+	rlen += sess_priv.partial_len;
+	dlen_adj = rlen - dlen_adj;
+
+	/* Update send descriptors. Security is single segment only */
+	*cmd0 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd0, 0);
+	*cmd1 = vsetq_lane_u16(pkt_len + dlen_adj, *cmd1, 0);
+
+	/* Get area where NIX descriptor needs to be stored */
+	nixtx = dptr + pkt_len + dlen_adj;
+	nixtx += BIT_ULL(7);
+	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+	/* Return nixtx addr */
+	*nixtx_addr = (nixtx + 16);
+
+	/* DLEN passed is excluding L2HDR */
+	pkt_len -= l2_len;
+	tag = sa_base & 0xFFFFUL;
+	sa_base &= ~0xFFFFUL;
+	sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+	ucode_cmd[0] =
+		(ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+	/* CPT Word 0 and Word 1 */
+	cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+	/* CPT_RES_S is 16B above NIXTX */
+	cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+	/* CPT word 2 and 3 */
+	cmd23 = vdupq_n_u64(0);
+	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+	dptr += l2_len;
+	ucode_cmd[1] = dptr;
+	ucode_cmd[2] = dptr;
+
+	/* Move to our line */
+	laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+	/* Write CPT instruction to lmt line */
+	vst1q_u64(laddr, cmd01);
+	vst1q_u64((laddr + 2), cmd23);
+
+	*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+	*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+	/* Move to next line for every other CPT inst */
+	*loff = !(*loff);
+	*lnum = *lnum + (*loff ? 0 : 1);
+	*shft = *shft + (*loff ? 0 : 3);
+}
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+		   uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+		   uint64_t sa_base, const uint16_t flags)
+{
+	struct cn10k_sec_sess_priv sess_priv;
+	uint32_t pkt_len, dlen_adj, rlen;
+	struct nix_send_hdr_s *send_hdr;
+	uint64x2_t cmd01, cmd23;
+	union nix_send_sg_s *sg;
+	uintptr_t dptr, nixtx;
+	uint64_t ucode_cmd[4];
+	uint64_t *laddr;
+	uint8_t l2_len;
+	uint16_t tag;
+	uint64_t sa;
+
+	/* Move to our line from base */
+	sess_priv.u64 = *rte_security_dynfield(m);
+	send_hdr = (struct nix_send_hdr_s *)cmd;
+	if (flags & NIX_TX_NEED_EXT_HDR)
+		sg = (union nix_send_sg_s *)&cmd[4];
+	else
+		sg = (union nix_send_sg_s *)&cmd[2];
+
+	if (flags & NIX_TX_NEED_SEND_HDR_W1)
+		l2_len = cmd[1] & 0xFF;
+	else
+		l2_len = m->l2_len;
+
+	/* Retrieve DPTR */
+	dptr = *(uint64_t *)(sg + 1);
+	pkt_len = send_hdr->w0.total;
+
+	/* Calculate dlen adj */
+	dlen_adj = pkt_len - l2_len;
+	rlen = (dlen_adj + sess_priv.roundup_len) +
+	       (sess_priv.roundup_byte - 1);
+	rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+	rlen += sess_priv.partial_len;
+	dlen_adj = rlen - dlen_adj;
+
+	/* Update send descriptors. Security is single segment only */
+	send_hdr->w0.total = pkt_len + dlen_adj;
+	sg->seg1_size = pkt_len + dlen_adj;
+
+	/* Get area where NIX descriptor needs to be stored */
+	nixtx = dptr + pkt_len + dlen_adj;
+	nixtx += BIT_ULL(7);
+	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+	/* Return nixtx addr */
+	*nixtx_addr = (nixtx + 16);
+
+	/* DLEN passed is excluding L2HDR */
+	pkt_len -= l2_len;
+	tag = sa_base & 0xFFFFUL;
+	sa_base &= ~0xFFFFUL;
+	sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+	ucode_cmd[0] =
+		(ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | pkt_len);
+
+	/* CPT Word 0 and Word 1. Assume no multi-seg support */
+	cmd01 = vdupq_n_u64((nixtx + 16) | (cn10k_nix_tx_ext_subs(flags) + 1));
+	/* CPT_RES_S is 16B above NIXTX */
+	cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+	/* CPT word 2 and 3 */
+	cmd23 = vdupq_n_u64(0);
+	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) | tag |
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+	dptr += l2_len;
+	ucode_cmd[1] = dptr;
+	ucode_cmd[2] = dptr;
+
+	/* Move to our line */
+	laddr = LMT_OFF(lbase, *lnum, *loff ? 64 : 0);
+
+	/* Write CPT instruction to lmt line */
+	vst1q_u64(laddr, cmd01);
+	vst1q_u64((laddr + 2), cmd23);
+
+	*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+	*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+	/* Move to next line for every other CPT inst */
+	*loff = !(*loff);
+	*lnum = *lnum + (*loff ? 0 : 1);
+	*shft = *shft + (*loff ? 0 : 3);
+}
+
+#else
+
+static __rte_always_inline void
+cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
+		   uintptr_t lbase, uint8_t *lnum, uint8_t *loff, uint8_t *shft,
+		   uint64_t sa_base, const uint16_t flags)
+{
+	RTE_SET_USED(m);
+	RTE_SET_USED(cmd);
+	RTE_SET_USED(nixtx_addr);
+	RTE_SET_USED(lbase);
+	RTE_SET_USED(lnum);
+	RTE_SET_USED(loff);
+	RTE_SET_USED(shft);
+	RTE_SET_USED(sa_base);
+	RTE_SET_USED(flags);
+}
+#endif
+
+static __rte_always_inline void
 cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
 	uint64_t mask, ol_flags = m->ol_flags;
@@ -217,8 +487,8 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 }
 
 static __rte_always_inline void
-cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
-		       const uint16_t flags, const uint64_t lso_tun_fmt)
+cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
+		       const uint64_t lso_tun_fmt, bool *sec)
 {
 	struct nix_send_ext_s *send_hdr_ext;
 	struct nix_send_hdr_s *send_hdr;
@@ -237,16 +507,16 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
 
-	if (flags & NIX_TX_NEED_SEND_HDR_W1) {
+	if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
 		ol_flags = m->ol_flags;
 		w1.u = 0;
 	}
 
-	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+	if (!(flags & NIX_TX_MULTI_SEG_F))
 		send_hdr->w0.total = m->data_len;
-		send_hdr->w0.aura =
-			roc_npa_aura_handle_to_aura(m->pool->pool_id);
-	}
+	else
+		send_hdr->w0.total = m->pkt_len;
+	send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
 
 	/*
 	 * L3type:  2 => IPV4
@@ -376,7 +646,7 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
-		sg->seg1_size = m->data_len;
+		sg->seg1_size = send_hdr->w0.total;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
@@ -389,17 +659,38 @@ cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, uintptr_t lmt_addr,
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
 			__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+	} else {
+		sg->seg1_size = m->data_len;
+		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+
+		/* NOFF is handled later for multi-seg */
 	}
 
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+		*sec = !!(ol_flags & PKT_TX_SEC_OFFLOAD);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_mv_lmt_base(uintptr_t lmt_addr, uint64_t *cmd,
+			   const uint16_t flags)
+{
+	struct nix_send_ext_s *send_hdr_ext;
+	union nix_send_sg_s *sg;
+
 	/* With minimal offloads, 'cmd' being local could be optimized out to
 	 * registers. In other cases, 'cmd' will be in stack. Intent is
 	 * 'cmd' stores content from txq->cmd which is copied only once.
 	 */
-	*((struct nix_send_hdr_s *)lmt_addr) = *send_hdr;
+	*((struct nix_send_hdr_s *)lmt_addr) = *(struct nix_send_hdr_s *)cmd;
 	lmt_addr += 16;
 	if (flags & NIX_TX_NEED_EXT_HDR) {
+		send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
 		*((struct nix_send_ext_s *)lmt_addr) = *send_hdr_ext;
 		lmt_addr += 16;
+
+		sg = (union nix_send_sg_s *)(cmd + 4);
+	} else {
+		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
 	/* In case of multi-seg, sg template is stored here */
 	*((union nix_send_sg_s *)lmt_addr) = *sg;
@@ -414,7 +705,7 @@ cn10k_nix_xmit_prepare_tstamp(uintptr_t lmt_addr, const uint64_t *cmd,
 	if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
 		const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
 		struct nix_send_ext_s *send_hdr_ext =
-					(struct nix_send_ext_s *)lmt_addr + 16;
+			(struct nix_send_ext_s *)lmt_addr + 16;
 		uint64_t *lmt = (uint64_t *)lmt_addr;
 		uint16_t off = (no_segdw - 1) << 1;
 		struct nix_send_mem_s *send_mem;
@@ -457,8 +748,6 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 	uint8_t off, i;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
-	send_hdr->w0.total = m->pkt_len;
-	send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
 
 	if (flags & NIX_TX_NEED_EXT_HDR)
 		off = 2;
@@ -466,13 +755,27 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		off = 0;
 
 	sg = (union nix_send_sg_s *)&cmd[2 + off];
-	/* Clear sg->u header before use */
-	sg->u &= 0xFC00000000000000;
+
+	/* Start from second segment, first segment is already there */
+	i = 1;
 	sg_u = sg->u;
-	slist = &cmd[3 + off];
+	nb_segs = m->nb_segs - 1;
+	m_next = m->next;
+	slist = &cmd[3 + off + 1];
 
-	i = 0;
-	nb_segs = m->nb_segs;
+	/* Set invert df if buffer is not to be freed by H/W */
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		sg_u |= (cnxk_nix_prefree_seg(m) << 55);
+
+		/* Mark mempool object as "put" since it is freed by NIX */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+	if (!(sg_u & (1ULL << 55)))
+		__mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+	rte_io_wmb();
+#endif
+	m = m_next;
+	if (!m)
+		goto done;
 
 	/* Fill mbuf segments */
 	do {
@@ -504,6 +807,7 @@ cn10k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 		m = m_next;
 	} while (nb_segs);
 
+done:
 	sg->u = sg_u;
 	sg->segs = i;
 	segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
@@ -522,10 +826,17 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 {
 	struct cn10k_eth_txq *txq = tx_queue;
 	const rte_iova_t io_addr = txq->io_addr;
-	uintptr_t pa, lmt_addr = txq->lmt_base;
+	uint8_t lnum, c_lnum, c_shft, c_loff;
+	uintptr_t pa, lbase = txq->lmt_base;
 	uint16_t lmt_id, burst, left, i;
+	uintptr_t c_lbase = lbase;
+	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
+	uint16_t c_lmt_id;
+	uint64_t sa_base;
+	uintptr_t laddr;
 	uint64_t data;
+	bool sec;
 
 	if (!(flags & NIX_TX_VWQE_F)) {
 		NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -540,10 +851,24 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 		lso_tun_fmt = txq->lso_tun_fmt;
 
 	/* Get LMT base address and LMT ID as lcore id */
-	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+		c_io_addr = txq->cpt_io_addr;
+		sa_base = txq->sa_base;
+	}
+
 	left = pkts;
 again:
 	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		c_lnum = 0;
+		c_loff = 0;
+		c_shft = 16;
+	}
+
 	for (i = 0; i < burst; i++) {
 		/* Perform header writes for TSO, barrier at
 		 * lmt steorl will suffice.
@@ -551,16 +876,39 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 		if (flags & NIX_TX_OFFLOAD_TSO_F)
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
-		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
-				       lso_tun_fmt);
-		cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				       &sec);
+
+		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+		/* Prepare CPT instruction and get nixtx addr */
+		if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+			cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+					   &c_lnum, &c_loff, &c_shft, sa_base,
+					   flags);
+
+		/* Move NIX desc to LMT/NIXTX area */
+		cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+		cn10k_nix_xmit_prepare_tstamp(laddr, &txq->cmd[0],
 					      tx_pkts[i]->ol_flags, 4, flags);
-		lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
+		if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec)
+			lnum++;
 	}
 
 	if (flags & NIX_TX_VWQE_F)
 		roc_sso_hws_head_wait(base);
 
+	left -= burst;
+	tx_pkts += burst;
+
+	/* Submit CPT instructions if any */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		/* Reduce pkts to be sent to CPT */
+		burst -= ((c_lnum << 1) + c_loff);
+		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+				     c_shft);
+	}
+
 	/* Trigger LMTST */
 	if (burst > 16) {
 		data = cn10k_nix_tx_steor_data(flags);
@@ -591,16 +939,9 @@ cn10k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
 		roc_lmt_submit_steorl(data, pa);
 	}
 
-	left -= burst;
 	rte_io_wmb();
-	if (left) {
-		/* Start processing another burst */
-		tx_pkts += burst;
-		/* Reset lmt base addr */
-		lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
-		lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+	if (left)
 		goto again;
-	}
 
 	return pkts;
 }
@@ -611,13 +952,20 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 			 const uint16_t flags)
 {
 	struct cn10k_eth_txq *txq = tx_queue;
-	uintptr_t pa0, pa1, lmt_addr = txq->lmt_base;
+	uintptr_t pa0, pa1, lbase = txq->lmt_base;
 	const rte_iova_t io_addr = txq->io_addr;
 	uint16_t segdw, lmt_id, burst, left, i;
+	uint8_t lnum, c_lnum, c_loff;
+	uintptr_t c_lbase = lbase;
 	uint64_t data0, data1;
+	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
+	uint8_t shft, c_shft;
 	__uint128_t data128;
-	uint16_t shft;
+	uint16_t c_lmt_id;
+	uint64_t sa_base;
+	uintptr_t laddr;
+	bool sec;
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
 
@@ -630,12 +978,26 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 		lso_tun_fmt = txq->lso_tun_fmt;
 
 	/* Get LMT base address and LMT ID as lcore id */
-	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+		c_io_addr = txq->cpt_io_addr;
+		sa_base = txq->sa_base;
+	}
+
 	left = pkts;
 again:
 	burst = left > 32 ? 32 : left;
 	shft = 16;
 	data128 = 0;
+
+	lnum = 0;
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		c_lnum = 0;
+		c_loff = 0;
+		c_shft = 16;
+	}
+
 	for (i = 0; i < burst; i++) {
 		/* Perform header writes for TSO, barrier at
 		 * lmt steorl will suffice.
@@ -643,22 +1005,47 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if (flags & NIX_TX_OFFLOAD_TSO_F)
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
-		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, lmt_addr, flags,
-				       lso_tun_fmt);
+		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				       &sec);
+
+		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
+
+		/* Prepare CPT instruction and get nixtx addr */
+		if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+			cn10k_nix_prep_sec(tx_pkts[i], cmd, &laddr, c_lbase,
+					   &c_lnum, &c_loff, &c_shft, sa_base,
+					   flags);
+
+		/* Move NIX desc to LMT/NIXTX area */
+		cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+
 		/* Store sg list directly on lmt line */
-		segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)lmt_addr,
+		segdw = cn10k_nix_prepare_mseg(tx_pkts[i], (uint64_t *)laddr,
 					       flags);
-		cn10k_nix_xmit_prepare_tstamp(lmt_addr, &txq->cmd[0],
+		cn10k_nix_xmit_prepare_tstamp(laddr, &txq->cmd[0],
 					      tx_pkts[i]->ol_flags, segdw,
 					      flags);
-		lmt_addr += (1ULL << ROC_LMT_LINE_SIZE_LOG2);
-		data128 |= (((__uint128_t)(segdw - 1)) << shft);
-		shft += 3;
+		if (!(flags & NIX_TX_OFFLOAD_SECURITY_F) || !sec) {
+			lnum++;
+			data128 |= (((__uint128_t)(segdw - 1)) << shft);
+			shft += 3;
+		}
 	}
 
 	if (flags & NIX_TX_VWQE_F)
 		roc_sso_hws_head_wait(base);
 
+	left -= burst;
+	tx_pkts += burst;
+
+	/* Submit CPT instructions if any */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		/* Reduce pkts to be sent to CPT */
+		burst -= ((c_lnum << 1) + c_loff);
+		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+				     c_shft);
+	}
+
 	data0 = (uint64_t)data128;
 	data1 = (uint64_t)(data128 >> 64);
 	/* Make data0 similar to data1 */
@@ -695,16 +1082,9 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
 		roc_lmt_submit_steorl(data0, pa0);
 	}
 
-	left -= burst;
 	rte_io_wmb();
-	if (left) {
-		/* Start processing another burst */
-		tx_pkts += burst;
-		/* Reset lmt base addr */
-		lmt_addr -= (1ULL << ROC_LMT_LINE_SIZE_LOG2);
-		lmt_addr &= (~(BIT_ULL(ROC_LMT_BASE_PER_CORE_LOG2) - 1));
+	if (left)
 		goto again;
-	}
 
 	return pkts;
 }
@@ -989,6 +1369,90 @@ cn10k_nix_prep_lmt_mseg_vector(struct rte_mbuf **mbufs, uint64x2_t *cmd0,
 	return lmt_used;
 }
 
+static __rte_always_inline void
+cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff,
+		   uint8_t *shift, __uint128_t *data128, uintptr_t *next)
+{
+	/* Go to next line if we are out of space */
+	if ((*loff + (dw << 4)) > 128) {
+		*data128 = *data128 |
+			   (((__uint128_t)((*loff >> 4) - 1)) << *shift);
+		*shift = *shift + 3;
+		*loff = 0;
+		*lnum = *lnum + 1;
+	}
+
+	*next = (uintptr_t)LMT_OFF(laddr, *lnum, *loff);
+	*loff = *loff + (dw << 4);
+}
+
+static __rte_always_inline void
+cn10k_nix_xmit_store(struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr,
+		     uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2,
+		     uint64x2_t cmd3, const uint16_t flags)
+{
+	uint8_t off;
+
+	/* Handle no fast free when security is enabled without mseg */
+	if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+	    (flags & NIX_TX_OFFLOAD_SECURITY_F) &&
+	    !(flags & NIX_TX_MULTI_SEG_F)) {
+		union nix_send_sg_s sg;
+
+		sg.u = vgetq_lane_u64(cmd1, 0);
+		sg.u |= (cnxk_nix_prefree_seg(mbuf) << 55);
+		cmd1 = vsetq_lane_u64(sg.u, cmd1, 0);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+		sg.u = vgetq_lane_u64(cmd1, 0);
+		if (!(sg.u & (1ULL << 55)))
+			__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1,
+						0);
+		rte_io_wmb();
+#endif
+	}
+	if (flags & NIX_TX_MULTI_SEG_F) {
+		if ((flags & NIX_TX_NEED_EXT_HDR) &&
+		    (flags & NIX_TX_OFFLOAD_TSTAMP_F)) {
+			cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+						   &cmd0, &cmd1, segdw, flags);
+			vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+			vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+			vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+			off = segdw - 4;
+			off <<= 4;
+			vst1q_u64(LMT_OFF(laddr, 0, 48 + off), cmd3);
+		} else if (flags & NIX_TX_NEED_EXT_HDR) {
+			cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 48),
+						   &cmd0, &cmd1, segdw, flags);
+			vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+			vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+			vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+		} else {
+			cn10k_nix_prepare_mseg_vec(mbuf, LMT_OFF(laddr, 0, 32),
+						   &cmd0, &cmd1, segdw, flags);
+			vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+			vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+		}
+	} else if (flags & NIX_TX_NEED_EXT_HDR) {
+		/* Store the prepared send desc to LMT lines */
+		if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+			vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+			vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+			vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+			vst1q_u64(LMT_OFF(laddr, 0, 48), cmd3);
+		} else {
+			vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+			vst1q_u64(LMT_OFF(laddr, 0, 16), cmd2);
+			vst1q_u64(LMT_OFF(laddr, 0, 32), cmd1);
+		}
+	} else {
+		/* Store the prepared send desc to LMT lines */
+		vst1q_u64(LMT_OFF(laddr, 0, 0), cmd0);
+		vst1q_u64(LMT_OFF(laddr, 0, 16), cmd1);
+	}
+}
+
 static __rte_always_inline uint16_t
 cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			   uint16_t pkts, uint64_t *cmd, uintptr_t base,
@@ -998,10 +1462,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
 	uint64x2_t cmd0[NIX_DESCS_PER_LOOP], cmd1[NIX_DESCS_PER_LOOP],
 		cmd2[NIX_DESCS_PER_LOOP], cmd3[NIX_DESCS_PER_LOOP];
+	uint16_t left, scalar, burst, i, lmt_id, c_lmt_id;
 	uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3, pa;
 	uint64x2_t senddesc01_w0, senddesc23_w0;
 	uint64x2_t senddesc01_w1, senddesc23_w1;
-	uint16_t left, scalar, burst, i, lmt_id;
 	uint64x2_t sendext01_w0, sendext23_w0;
 	uint64x2_t sendext01_w1, sendext23_w1;
 	uint64x2_t sendmem01_w0, sendmem23_w0;
@@ -1010,12 +1474,16 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64x2_t sgdesc01_w0, sgdesc23_w0;
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn10k_eth_txq *txq = tx_queue;
-	uintptr_t laddr = txq->lmt_base;
 	rte_iova_t io_addr = txq->io_addr;
+	uintptr_t laddr = txq->lmt_base;
+	uint8_t c_lnum, c_shft, c_loff;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
-	uint8_t lnum, shift;
+	uintptr_t c_laddr = laddr;
+	uint8_t lnum, shift, loff;
+	rte_iova_t c_io_addr;
+	uint64_t sa_base;
 	union wdata {
 		__uint128_t data128;
 		uint64_t data[2];
@@ -1061,19 +1529,36 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	/* Get LMT base address and LMT ID as lcore id */
 	ROC_LMT_BASE_ID_GET(laddr, lmt_id);
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		ROC_LMT_CPT_BASE_ID_GET(c_laddr, c_lmt_id);
+		c_io_addr = txq->cpt_io_addr;
+		sa_base = txq->sa_base;
+	}
+
 	left = pkts;
 again:
 	/* Number of packets to prepare depends on offloads enabled. */
 	burst = left > cn10k_nix_pkts_per_vec_brst(flags) ?
 			      cn10k_nix_pkts_per_vec_brst(flags) :
 			      left;
-	if (flags & NIX_TX_MULTI_SEG_F) {
+	if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)) {
 		wd.data128 = 0;
 		shift = 16;
 	}
 	lnum = 0;
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		loff = 0;
+		c_loff = 0;
+		c_lnum = 0;
+		c_shft = 16;
+	}
 
 	for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) {
+		if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) {
+			burst = i;
+			break;
+		}
+
 		if (flags & NIX_TX_MULTI_SEG_F) {
 			uint8_t j;
 
@@ -1833,7 +2318,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		}
 
 		if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
-		    !(flags & NIX_TX_MULTI_SEG_F)) {
+		    !(flags & NIX_TX_MULTI_SEG_F) &&
+		    !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Set don't free bit if reference count > 1 */
 			xmask01 = vdupq_n_u64(0);
 			xmask23 = xmask01;
@@ -1873,7 +2359,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 					(void **)&mbuf3, 1, 0);
 			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
 			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
-		} else if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		} else if (!(flags & NIX_TX_MULTI_SEG_F) &&
+			   !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Move mbufs to iova */
 			mbuf0 = (uint64_t *)tx_pkts[0];
 			mbuf1 = (uint64_t *)tx_pkts[1];
@@ -1918,7 +2405,84 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 			cmd2[3] = vzip2q_u64(sendext23_w0, sendext23_w1);
 		}
 
-		if (flags & NIX_TX_MULTI_SEG_F) {
+		if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+			const uint64x2_t olf = {PKT_TX_SEC_OFFLOAD,
+						PKT_TX_SEC_OFFLOAD};
+			uintptr_t next;
+			uint8_t dw;
+
+			/* Extract ol_flags. */
+			xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+			ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+			xtmp128 = vtstq_u64(olf, xtmp128);
+			ytmp128 = vtstq_u64(olf, ytmp128);
+
+			/* Process mbuf0 */
+			dw = cn10k_nix_tx_dwords(flags, segdw[0]);
+			if (vgetq_lane_u64(xtmp128, 0))
+				cn10k_nix_prep_sec_vec(tx_pkts[0], &cmd0[0],
+						       &cmd1[0], &next, c_laddr,
+						       &c_lnum, &c_loff,
+						       &c_shft, sa_base, flags);
+			else
+				cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+						   &shift, &wd.data128, &next);
+
+			/* Store mbuf0 to LMTLINE/CPT NIXTX area */
+			cn10k_nix_xmit_store(tx_pkts[0], segdw[0], next,
+					     cmd0[0], cmd1[0], cmd2[0], cmd3[0],
+					     flags);
+
+			/* Process mbuf1 */
+			dw = cn10k_nix_tx_dwords(flags, segdw[1]);
+			if (vgetq_lane_u64(xtmp128, 1))
+				cn10k_nix_prep_sec_vec(tx_pkts[1], &cmd0[1],
+						       &cmd1[1], &next, c_laddr,
+						       &c_lnum, &c_loff,
+						       &c_shft, sa_base, flags);
+			else
+				cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+						   &shift, &wd.data128, &next);
+
+			/* Store mbuf1 to LMTLINE/CPT NIXTX area */
+			cn10k_nix_xmit_store(tx_pkts[1], segdw[1], next,
+					     cmd0[1], cmd1[1], cmd2[1], cmd3[1],
+					     flags);
+
+			/* Process mbuf2 */
+			dw = cn10k_nix_tx_dwords(flags, segdw[2]);
+			if (vgetq_lane_u64(ytmp128, 0))
+				cn10k_nix_prep_sec_vec(tx_pkts[2], &cmd0[2],
+						       &cmd1[2], &next, c_laddr,
+						       &c_lnum, &c_loff,
+						       &c_shft, sa_base, flags);
+			else
+				cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+						   &shift, &wd.data128, &next);
+
+			/* Store mbuf2 to LMTLINE/CPT NIXTX area */
+			cn10k_nix_xmit_store(tx_pkts[2], segdw[2], next,
+					     cmd0[2], cmd1[2], cmd2[2], cmd3[2],
+					     flags);
+
+			/* Process mbuf3 */
+			dw = cn10k_nix_tx_dwords(flags, segdw[3]);
+			if (vgetq_lane_u64(ytmp128, 1))
+				cn10k_nix_prep_sec_vec(tx_pkts[3], &cmd0[3],
+						       &cmd1[3], &next, c_laddr,
+						       &c_lnum, &c_loff,
+						       &c_shft, sa_base, flags);
+			else
+				cn10k_nix_lmt_next(dw, laddr, &lnum, &loff,
+						   &shift, &wd.data128, &next);
+
+			/* Store mbuf3 to LMTLINE/CPT NIXTX area */
+			cn10k_nix_xmit_store(tx_pkts[3], segdw[3], next,
+					     cmd0[3], cmd1[3], cmd2[3], cmd3[3],
+					     flags);
+
+		} else if (flags & NIX_TX_MULTI_SEG_F) {
 			uint8_t j;
 
 			segdw[4] = 8;
@@ -1982,21 +2546,35 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
 	}
 
-	if (flags & NIX_TX_MULTI_SEG_F)
+	/* Roundup lnum to last line if it is partial */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		lnum = lnum + !!loff;
+		wd.data128 = wd.data128 |
+			(((__uint128_t)(((loff >> 4) - 1) & 0x7) << shift));
+	}
+
+	if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
 		wd.data[0] >>= 16;
 
 	if (flags & NIX_TX_VWQE_F)
 		roc_sso_hws_head_wait(base);
 
+	left -= burst;
+
+	/* Submit CPT instructions if any */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
+				     c_shft);
+
 	/* Trigger LMTST */
 	if (lnum > 16) {
-		if (!(flags & NIX_TX_MULTI_SEG_F))
+		if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
 			wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
 
 		pa = io_addr | (wd.data[0] & 0x7) << 4;
 		wd.data[0] &= ~0x7ULL;
 
-		if (flags & NIX_TX_MULTI_SEG_F)
+		if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
 			wd.data[0] <<= 16;
 
 		wd.data[0] |= (15ULL << 12);
@@ -2005,13 +2583,13 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 
-		if (!(flags & NIX_TX_MULTI_SEG_F))
+		if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
 			wd.data[1] = cn10k_nix_tx_steor_vec_data(flags);
 
 		pa = io_addr | (wd.data[1] & 0x7) << 4;
 		wd.data[1] &= ~0x7ULL;
 
-		if (flags & NIX_TX_MULTI_SEG_F)
+		if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
 			wd.data[1] <<= 16;
 
 		wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
@@ -2020,13 +2598,13 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		/* STEOR1 */
 		roc_lmt_submit_steorl(wd.data[1], pa);
 	} else if (lnum) {
-		if (!(flags & NIX_TX_MULTI_SEG_F))
+		if (!(flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F)))
 			wd.data[0] = cn10k_nix_tx_steor_vec_data(flags);
 
 		pa = io_addr | (wd.data[0] & 0x7) << 4;
 		wd.data[0] &= ~0x7ULL;
 
-		if (flags & NIX_TX_MULTI_SEG_F)
+		if (flags & (NIX_TX_MULTI_SEG_F | NIX_TX_OFFLOAD_SECURITY_F))
 			wd.data[0] <<= 16;
 
 		wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
@@ -2036,7 +2614,6 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		roc_lmt_submit_steorl(wd.data[0], pa);
 	}
 
-	left -= burst;
 	rte_io_wmb();
 	if (left)
 		goto again;
@@ -2076,139 +2653,269 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 #define NOFF_F	     NIX_TX_OFFLOAD_MBUF_NOFF_F
 #define TSO_F	     NIX_TX_OFFLOAD_TSO_F
 #define TSP_F	     NIX_TX_OFFLOAD_TSTAMP_F
+#define T_SEC_F      NIX_TX_OFFLOAD_SECURITY_F
 
-/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+/* [T_SEC_F] [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
 #define NIX_TX_FASTPATH_MODES						\
-T(no_offload,				0, 0, 0, 0, 0, 0,	4,	\
+T(no_offload,				0, 0, 0, 0, 0, 0, 0,	4,	\
 		NIX_TX_OFFLOAD_NONE)					\
-T(l3l4csum,				0, 0, 0, 0, 0, 1,	4,	\
+T(l3l4csum,				0, 0, 0, 0, 0, 0, 1,	4,	\
 		L3L4CSUM_F)						\
-T(ol3ol4csum,				0, 0, 0, 0, 1, 0,	4,	\
+T(ol3ol4csum,				0, 0, 0, 0, 0, 1, 0,	4,	\
 		OL3OL4CSUM_F)						\
-T(ol3ol4csum_l3l4csum,			0, 0, 0, 0, 1, 1,	4,	\
+T(ol3ol4csum_l3l4csum,			0, 0, 0, 0, 0, 1, 1,	4,	\
 		OL3OL4CSUM_F | L3L4CSUM_F)				\
-T(vlan,					0, 0, 0, 1, 0, 0,	6,	\
+T(vlan,					0, 0, 0, 0, 1, 0, 0,	6,	\
 		VLAN_F)							\
-T(vlan_l3l4csum,			0, 0, 0, 1, 0, 1,	6,	\
+T(vlan_l3l4csum,			0, 0, 0, 0, 1, 0, 1,	6,	\
 		VLAN_F | L3L4CSUM_F)					\
-T(vlan_ol3ol4csum,			0, 0, 0, 1, 1, 0,	6,	\
+T(vlan_ol3ol4csum,			0, 0, 0, 0, 1, 1, 0,	6,	\
 		VLAN_F | OL3OL4CSUM_F)					\
-T(vlan_ol3ol4csum_l3l4csum,		0, 0, 0, 1, 1, 1,	6,	\
+T(vlan_ol3ol4csum_l3l4csum,		0, 0, 0, 0, 1, 1, 1,	6,	\
 		VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)			\
-T(noff,					0, 0, 1, 0, 0, 0,	4,	\
+T(noff,					0, 0, 0, 1, 0, 0, 0,	4,	\
 		NOFF_F)							\
-T(noff_l3l4csum,			0, 0, 1, 0, 0, 1,	4,	\
+T(noff_l3l4csum,			0, 0, 0, 1, 0, 0, 1,	4,	\
 		NOFF_F | L3L4CSUM_F)					\
-T(noff_ol3ol4csum,			0, 0, 1, 0, 1, 0,	4,	\
+T(noff_ol3ol4csum,			0, 0, 0, 1, 0, 1, 0,	4,	\
 		NOFF_F | OL3OL4CSUM_F)					\
-T(noff_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 1, 1,	4,	\
+T(noff_ol3ol4csum_l3l4csum,		0, 0, 0, 1, 0, 1, 1,	4,	\
 		NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)			\
-T(noff_vlan,				0, 0, 1, 1, 0, 0,	6,	\
+T(noff_vlan,				0, 0, 0, 1, 1, 0, 0,	6,	\
 		NOFF_F | VLAN_F)					\
-T(noff_vlan_l3l4csum,			0, 0, 1, 1, 0, 1,	6,	\
+T(noff_vlan_l3l4csum,			0, 0, 0, 1, 1, 0, 1,	6,	\
 		NOFF_F | VLAN_F | L3L4CSUM_F)				\
-T(noff_vlan_ol3ol4csum,			0, 0, 1, 1, 1, 0,	6,	\
+T(noff_vlan_ol3ol4csum,			0, 0, 0, 1, 1, 1, 0,	6,	\
 		NOFF_F | VLAN_F | OL3OL4CSUM_F)				\
-T(noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 1, 1, 1, 1,	6,	\
+T(noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 0, 1, 1, 1, 1,	6,	\
 		NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
-T(tso,					0, 1, 0, 0, 0, 0,	6,	\
+T(tso,					0, 0, 1, 0, 0, 0, 0,	6,	\
 		TSO_F)							\
-T(tso_l3l4csum,				0, 1, 0, 0, 0, 1,	6,	\
+T(tso_l3l4csum,				0, 0, 1, 0, 0, 0, 1,	6,	\
 		TSO_F | L3L4CSUM_F)					\
-T(tso_ol3ol4csum,			0, 1, 0, 0, 1, 0,	6,	\
+T(tso_ol3ol4csum,			0, 0, 1, 0, 0, 1, 0,	6,	\
 		TSO_F | OL3OL4CSUM_F)					\
-T(tso_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 1, 1,	6,	\
+T(tso_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 0, 1, 1,	6,	\
 		TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
-T(tso_vlan,				0, 1, 0, 1, 0, 0,	6,	\
+T(tso_vlan,				0, 0, 1, 0, 1, 0, 0,	6,	\
 		TSO_F | VLAN_F)						\
-T(tso_vlan_l3l4csum,			0, 1, 0, 1, 0, 1,	6,	\
+T(tso_vlan_l3l4csum,			0, 0, 1, 0, 1, 0, 1,	6,	\
 		TSO_F | VLAN_F | L3L4CSUM_F)				\
-T(tso_vlan_ol3ol4csum,			0, 1, 0, 1, 1, 0,	6,	\
+T(tso_vlan_ol3ol4csum,			0, 0, 1, 0, 1, 1, 0,	6,	\
 		TSO_F | VLAN_F | OL3OL4CSUM_F)				\
-T(tso_vlan_ol3ol4csum_l3l4csum,		0, 1, 0, 1, 1, 1,	6,	\
+T(tso_vlan_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 1, 1, 1,	6,	\
 		TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
-T(tso_noff,				0, 1, 1, 0, 0, 0,	6,	\
+T(tso_noff,				0, 0, 1, 1, 0, 0, 0,	6,	\
 		TSO_F | NOFF_F)						\
-T(tso_noff_l3l4csum,			0, 1, 1, 0, 0, 1,	6,	\
+T(tso_noff_l3l4csum,			0, 0, 1, 1, 0, 0, 1,	6,	\
 		TSO_F | NOFF_F | L3L4CSUM_F)				\
-T(tso_noff_ol3ol4csum,			0, 1, 1, 0, 1, 0,	6,	\
+T(tso_noff_ol3ol4csum,			0, 0, 1, 1, 0, 1, 0,	6,	\
 		TSO_F | NOFF_F | OL3OL4CSUM_F)				\
-T(tso_noff_ol3ol4csum_l3l4csum,		0, 1, 1, 0, 1, 1,	6,	\
+T(tso_noff_ol3ol4csum_l3l4csum,		0, 0, 1, 1, 0, 1, 1,	6,	\
 		TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
-T(tso_noff_vlan,			0, 1, 1, 1, 0, 0,	6,	\
+T(tso_noff_vlan,			0, 0, 1, 1, 1, 0, 0,	6,	\
 		TSO_F | NOFF_F | VLAN_F)				\
-T(tso_noff_vlan_l3l4csum,		0, 1, 1, 1, 0, 1,	6,	\
+T(tso_noff_vlan_l3l4csum,		0, 0, 1, 1, 1, 0, 1,	6,	\
 		TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
-T(tso_noff_vlan_ol3ol4csum,		0, 1, 1, 1, 1, 0,	6,	\
+T(tso_noff_vlan_ol3ol4csum,		0, 0, 1, 1, 1, 1, 0,	6,	\
 		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			\
-T(tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 1, 1,	6,	\
+T(tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 1, 1, 1, 1, 1,	6,	\
 		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
-T(ts,					1, 0, 0, 0, 0, 0,	8,	\
+T(ts,					0, 1, 0, 0, 0, 0, 0,	8,	\
 		TSP_F)							\
-T(ts_l3l4csum,				1, 0, 0, 0, 0, 1,	8,	\
+T(ts_l3l4csum,				0, 1, 0, 0, 0, 0, 1,	8,	\
 		TSP_F | L3L4CSUM_F)					\
-T(ts_ol3ol4csum,			1, 0, 0, 0, 1, 0,	8,	\
+T(ts_ol3ol4csum,			0, 1, 0, 0, 0, 1, 0,	8,	\
 		TSP_F | OL3OL4CSUM_F)					\
-T(ts_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 1, 1,	8,	\
+T(ts_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 0, 1, 1,	8,	\
 		TSP_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
-T(ts_vlan,				1, 0, 0, 1, 0, 0,	8,	\
+T(ts_vlan,				0, 1, 0, 0, 1, 0, 0,	8,	\
 		TSP_F | VLAN_F)						\
-T(ts_vlan_l3l4csum,			1, 0, 0, 1, 0, 1,	8,	\
+T(ts_vlan_l3l4csum,			0, 1, 0, 0, 1, 0, 1,	8,	\
 		TSP_F | VLAN_F | L3L4CSUM_F)				\
-T(ts_vlan_ol3ol4csum,			1, 0, 0, 1, 1, 0,	8,	\
+T(ts_vlan_ol3ol4csum,			0, 1, 0, 0, 1, 1, 0,	8,	\
 		TSP_F | VLAN_F | OL3OL4CSUM_F)				\
-T(ts_vlan_ol3ol4csum_l3l4csum,		1, 0, 0, 1, 1, 1,	8,	\
+T(ts_vlan_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 1, 1, 1,	8,	\
 		TSP_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
-T(ts_noff,				1, 0, 1, 0, 0, 0,	8,	\
+T(ts_noff,				0, 1, 0, 1, 0, 0, 0,	8,	\
 		TSP_F | NOFF_F)						\
-T(ts_noff_l3l4csum,			1, 0, 1, 0, 0, 1,	8,	\
+T(ts_noff_l3l4csum,			0, 1, 0, 1, 0, 0, 1,	8,	\
 		TSP_F | NOFF_F | L3L4CSUM_F)				\
-T(ts_noff_ol3ol4csum,			1, 0, 1, 0, 1, 0,	8,	\
+T(ts_noff_ol3ol4csum,			0, 1, 0, 1, 0, 1, 0,	8,	\
 		TSP_F | NOFF_F | OL3OL4CSUM_F)				\
-T(ts_noff_ol3ol4csum_l3l4csum,		1, 0, 1, 0, 1, 1,	8,	\
+T(ts_noff_ol3ol4csum_l3l4csum,		0, 1, 0, 1, 0, 1, 1,	8,	\
 		TSP_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
-T(ts_noff_vlan,				1, 0, 1, 1, 0, 0,	8,	\
+T(ts_noff_vlan,				0, 1, 0, 1, 1, 0, 0,	8,	\
 		TSP_F | NOFF_F | VLAN_F)				\
-T(ts_noff_vlan_l3l4csum,		1, 0, 1, 1, 0, 1,	8,	\
+T(ts_noff_vlan_l3l4csum,		0, 1, 0, 1, 1, 0, 1,	8,	\
 		TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
-T(ts_noff_vlan_ol3ol4csum,		1, 0, 1, 1, 1, 0,	8,	\
+T(ts_noff_vlan_ol3ol4csum,		0, 1, 0, 1, 1, 1, 0,	8,	\
 		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			\
-T(ts_noff_vlan_ol3ol4csum_l3l4csum,	1, 0, 1, 1, 1, 1,	8,	\
+T(ts_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 0, 1, 1, 1, 1,	8,	\
 		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
-T(ts_tso,				1, 1, 0, 0, 0, 0,	8,	\
+T(ts_tso,				0, 1, 1, 0, 0, 0, 0,	8,	\
 		TSP_F | TSO_F)						\
-T(ts_tso_l3l4csum,			1, 1, 0, 0, 0, 1,	8,	\
+T(ts_tso_l3l4csum,			0, 1, 1, 0, 0, 0, 1,	8,	\
 		TSP_F | TSO_F | L3L4CSUM_F)				\
-T(ts_tso_ol3ol4csum,			1, 1, 0, 0, 1, 0,	8,	\
+T(ts_tso_ol3ol4csum,			0, 1, 1, 0, 0, 1, 0,	8,	\
 		TSP_F | TSO_F | OL3OL4CSUM_F)				\
-T(ts_tso_ol3ol4csum_l3l4csum,		1, 1, 0, 0, 1, 1,	8,	\
+T(ts_tso_ol3ol4csum_l3l4csum,		0, 1, 1, 0, 0, 1, 1,	8,	\
 		TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
-T(ts_tso_vlan,				1, 1, 0, 1, 0, 0,	8,	\
+T(ts_tso_vlan,				0, 1, 1, 0, 1, 0, 0,	8,	\
 		TSP_F | TSO_F | VLAN_F)					\
-T(ts_tso_vlan_l3l4csum,			1, 1, 0, 1, 0, 1,	8,	\
+T(ts_tso_vlan_l3l4csum,			0, 1, 1, 0, 1, 0, 1,	8,	\
 		TSP_F | TSO_F | VLAN_F | L3L4CSUM_F)			\
-T(ts_tso_vlan_ol3ol4csum,		1, 1, 0, 1, 1, 0,	8,	\
+T(ts_tso_vlan_ol3ol4csum,		0, 1, 1, 0, 1, 1, 0,	8,	\
 		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F)			\
-T(ts_tso_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 1, 1,	8,	\
+T(ts_tso_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 0, 1, 1, 1,	8,	\
 		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
-T(ts_tso_noff,				1, 1, 1, 0, 0, 0,	8,	\
+T(ts_tso_noff,				0, 1, 1, 1, 0, 0, 0,	8,	\
 		TSP_F | TSO_F | NOFF_F)					\
-T(ts_tso_noff_l3l4csum,			1, 1, 1, 0, 0, 1,	8,	\
+T(ts_tso_noff_l3l4csum,			0, 1, 1, 1, 0, 0, 1,	8,	\
 		TSP_F | TSO_F | NOFF_F | L3L4CSUM_F)			\
-T(ts_tso_noff_ol3ol4csum,		1, 1, 1, 0, 1, 0,	8,	\
+T(ts_tso_noff_ol3ol4csum,		0, 1, 1, 1, 0, 1, 0,	8,	\
 		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F)			\
-T(ts_tso_noff_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 1, 1,	8,	\
+T(ts_tso_noff_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 0, 1, 1,	8,	\
 		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
-T(ts_tso_noff_vlan,			1, 1, 1, 1, 0, 0,	8,	\
+T(ts_tso_noff_vlan,			0, 1, 1, 1, 1, 0, 0,	8,	\
 		TSP_F | TSO_F | NOFF_F | VLAN_F)			\
-T(ts_tso_noff_vlan_l3l4csum,		1, 1, 1, 1, 0, 1,	8,	\
+T(ts_tso_noff_vlan_l3l4csum,		0, 1, 1, 1, 1, 0, 1,	8,	\
 		TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
-T(ts_tso_noff_vlan_ol3ol4csum,		1, 1, 1, 1, 1, 0,	8,	\
+T(ts_tso_noff_vlan_ol3ol4csum,		0, 1, 1, 1, 1, 1, 0,	8,	\
 		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)		\
-T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum,	1, 1, 1, 1, 1, 1,	8,	\
-		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 1, 1, 1,	8,	\
+		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec,					1, 0, 0, 0, 0, 0, 0,	4,	\
+		T_SEC_F)						\
+T(sec_l3l4csum,				1, 0, 0, 0, 0, 0, 1,	4,	\
+		T_SEC_F | L3L4CSUM_F)					\
+T(sec_ol3ol4csum,			1, 0, 0, 0, 0, 1, 0,	4,	\
+		T_SEC_F | OL3OL4CSUM_F)					\
+T(sec_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 0, 1, 1,	4,	\
+		T_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
+T(sec_vlan,				1, 0, 0, 0, 1, 0, 0,	6,	\
+		T_SEC_F | VLAN_F)					\
+T(sec_vlan_l3l4csum,			1, 0, 0, 0, 1, 0, 1,	6,	\
+		T_SEC_F | VLAN_F | L3L4CSUM_F)				\
+T(sec_vlan_ol3ol4csum,			1, 0, 0, 0, 1, 1, 0,	6,	\
+		T_SEC_F | VLAN_F | OL3OL4CSUM_F)			\
+T(sec_vlan_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 1, 1, 1,	6,	\
+		T_SEC_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(sec_noff,				1, 0, 0, 1, 0, 0, 0,	4,	\
+		T_SEC_F | NOFF_F)					\
+T(sec_noff_l3l4csum,			1, 0, 0, 1, 0, 0, 1,	4,	\
+		T_SEC_F | NOFF_F | L3L4CSUM_F)				\
+T(sec_noff_ol3ol4csum,			1, 0, 0, 1, 0, 1, 0,	4,	\
+		T_SEC_F | NOFF_F | OL3OL4CSUM_F)			\
+T(sec_noff_ol3ol4csum_l3l4csum,		1, 0, 0, 1, 0, 1, 1,	4,	\
+		T_SEC_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(sec_noff_vlan,			1, 0, 0, 1, 1, 0, 0,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F)				\
+T(sec_noff_vlan_l3l4csum,		1, 0, 0, 1, 1, 0, 1,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_noff_vlan_ol3ol4csum,		1, 0, 0, 1, 1, 1, 0,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_noff_vlan_ol3ol4csum_l3l4csum,	1, 0, 0, 1, 1, 1, 1,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso,				1, 0, 1, 0, 0, 0, 0,	6,	\
+		T_SEC_F | TSO_F)					\
+T(sec_tso_l3l4csum,			1, 0, 1, 0, 0, 0, 1,	6,	\
+		T_SEC_F | TSO_F | L3L4CSUM_F)				\
+T(sec_tso_ol3ol4csum,			1, 0, 1, 0, 0, 1, 0,	6,	\
+		T_SEC_F | TSO_F | OL3OL4CSUM_F)				\
+T(sec_tso_ol3ol4csum_l3l4csum,		1, 0, 1, 0, 0, 1, 1,	6,	\
+		T_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(sec_tso_vlan,				1, 0, 1, 0, 1, 0, 0,	6,	\
+		T_SEC_F | TSO_F | VLAN_F)				\
+T(sec_tso_vlan_l3l4csum,		1, 0, 1, 0, 1, 0, 1,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_tso_vlan_ol3ol4csum,		1, 0, 1, 0, 1, 1, 0,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_tso_vlan_ol3ol4csum_l3l4csum,	1, 0, 1, 0, 1, 1, 1,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso_noff,				1, 0, 1, 1, 0, 0, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F)				\
+T(sec_tso_noff_l3l4csum,		1, 0, 1, 1, 0, 0, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F)			\
+T(sec_tso_noff_ol3ol4csum,		1, 0, 1, 1, 0, 1, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F)		\
+T(sec_tso_noff_ol3ol4csum_l3l4csum,	1, 0, 1, 1, 0, 1, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso_noff_vlan,			1, 0, 1, 1, 1, 0, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F)			\
+T(sec_tso_noff_vlan_l3l4csum,		1, 0, 1, 1, 1, 0, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_tso_noff_vlan_ol3ol4csum,		1, 0, 1, 1, 1, 1, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts,				1, 1, 0, 0, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F)					\
+T(sec_ts_l3l4csum,			1, 1, 0, 0, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | L3L4CSUM_F)				\
+T(sec_ts_ol3ol4csum,			1, 1, 0, 0, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | OL3OL4CSUM_F)				\
+T(sec_ts_ol3ol4csum_l3l4csum,		1, 1, 0, 0, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(sec_ts_vlan,				1, 1, 0, 0, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | VLAN_F)				\
+T(sec_ts_vlan_l3l4csum,			1, 1, 0, 0, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_ts_vlan_ol3ol4csum,		1, 1, 0, 0, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_ts_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 0, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_noff,				1, 1, 0, 1, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F)				\
+T(sec_ts_noff_l3l4csum,			1, 1, 0, 1, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F)			\
+T(sec_ts_noff_ol3ol4csum,		1, 1, 0, 1, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F)		\
+T(sec_ts_noff_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_noff_vlan,			1, 1, 0, 1, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F)			\
+T(sec_ts_noff_vlan_l3l4csum,		1, 1, 0, 1, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_ts_noff_vlan_ol3ol4csum,		1, 1, 0, 1, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts_tso,				1, 1, 1, 0, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F)				\
+T(sec_ts_tso_l3l4csum,			1, 1, 1, 0, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | L3L4CSUM_F)			\
+T(sec_ts_tso_ol3ol4csum,		1, 1, 1, 0, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F)			\
+T(sec_ts_tso_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_tso_vlan,			1, 1, 1, 0, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F)			\
+T(sec_ts_tso_vlan_l3l4csum,		1, 1, 1, 0, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_ts_tso_vlan_ol3ol4csum,		1, 1, 1, 0, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_ts_tso_vlan_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff,			1, 1, 1, 1, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F)			\
+T(sec_ts_tso_noff_l3l4csum,		1, 1, 1, 1, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | L3L4CSUM_F)		\
+T(sec_ts_tso_noff_ol3ol4csum,		1, 1, 1, 1, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F)	\
+T(sec_ts_tso_noff_ol3ol4csum_l3l4csum,	1, 1, 1, 1, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts_tso_noff_vlan,			1, 1, 1, 1, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F)		\
+T(sec_ts_tso_noff_vlan_l3l4csum,	1, 1, 1, 1, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)	\
+T(sec_ts_tso_noff_vlan_ol3ol4csum,	1, 1, 1, 1, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)\
+T(sec_ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 1, 8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+		L3L4CSUM_F)
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_##name(          \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);     \
 									       \
diff --git a/drivers/net/cnxk/cn10k_tx_mseg.c b/drivers/net/cnxk/cn10k_tx_mseg.c
index 4ea4c8a..2b83409 100644
--- a/drivers/net/cnxk/cn10k_tx_mseg.c
+++ b/drivers/net/cnxk/cn10k_tx_mseg.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot				       \
 		cn10k_nix_xmit_pkts_mseg_##name(void *tx_queue,                \
 						struct rte_mbuf **tx_pkts,     \
diff --git a/drivers/net/cnxk/cn10k_tx_vec.c b/drivers/net/cnxk/cn10k_tx_vec.c
index a035049..2789b13 100644
--- a/drivers/net/cnxk/cn10k_tx_vec.c
+++ b/drivers/net/cnxk/cn10k_tx_vec.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot				       \
 		cn10k_nix_xmit_pkts_vec_##name(void *tx_queue,                 \
 					       struct rte_mbuf **tx_pkts,      \
diff --git a/drivers/net/cnxk/cn10k_tx_vec_mseg.c b/drivers/net/cnxk/cn10k_tx_vec_mseg.c
index 7f98f79..98000df 100644
--- a/drivers/net/cnxk/cn10k_tx_vec_mseg.c
+++ b/drivers/net/cnxk/cn10k_tx_vec_mseg.c
@@ -5,7 +5,7 @@
 #include "cn10k_ethdev.h"
 #include "cn10k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_noinline __rte_hot cn10k_nix_xmit_pkts_vec_mseg_##name( \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts)      \
 	{                                                                      \
-- 
2.8.4


  parent reply	other threads:[~2021-10-01 13:43 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` Nithin Dabilpuram [this message]
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001134022.22700-22-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.