All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx security offload on cn9k
Date: Fri, 1 Oct 2021 19:10:13 +0530	[thread overview]
Message-ID: <20211001134022.22700-20-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20211001134022.22700-1-ndabilpuram@marvell.com>

Add support to create and submit CPT instructions on Tx
on CN9K SoC.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn9k_eventdev.c               |  29 +-
 drivers/event/cnxk/cn9k_worker.h                 | 163 +++++++++-
 drivers/event/cnxk/cn9k_worker_dual_tx_enq.c     |   2 +-
 drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c |   2 +-
 drivers/event/cnxk/cn9k_worker_tx_enq.c          |   2 +-
 drivers/event/cnxk/cn9k_worker_tx_enq_seg.c      |   2 +-
 drivers/net/cnxk/cn9k_tx.c                       |  29 +-
 drivers/net/cnxk/cn9k_tx.h                       | 392 +++++++++++++++--------
 drivers/net/cnxk/cn9k_tx_mseg.c                  |   2 +-
 drivers/net/cnxk/cn9k_tx_vec.c                   |   2 +-
 drivers/net/cnxk/cn9k_tx_vec_mseg.c              |   2 +-
 11 files changed, 459 insertions(+), 168 deletions(-)

diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 64d9ded..806dcb0 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -19,8 +19,8 @@
 			 [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
 
 #define CN9K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                            \
-	(enq_op =                                                              \
-		 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
+	(enq_op = enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]    \
+			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
@@ -515,33 +515,34 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 
 	/* Tx modes */
 	const event_tx_adapter_enqueue
-		sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
+		sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
 
 	const event_tx_adapter_enqueue
-		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
+		sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_tx_adptr_enq_seg_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
 
 	const event_tx_adapter_enqueue
-		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
+		sso_hws_dual_tx_adptr_enq[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
 
 	const event_tx_adapter_enqueue
-		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
-	[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
+		sso_hws_dual_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
+	[f6][f5][f4][f3][f2][f1][f0] =                                         \
+			cn9k_sso_hws_dual_tx_adptr_enq_seg_##name,
 			NIX_TX_FASTPATH_MODES
 #undef T
 		};
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index f1d2e47..6be9be0 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -478,6 +478,145 @@ cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
 	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
 }
 
+#if defined(RTE_ARCH_ARM64)
+
+static __rte_always_inline void
+cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
+			  struct rte_mbuf *m, uint64_t *cmd,
+			  uint32_t flags)
+{
+	struct cn9k_outb_priv_data *outb_priv;
+	rte_iova_t io_addr = txq->cpt_io_addr;
+	uint64_t *lmt_addr = txq->lmt_addr;
+	struct cn9k_sec_sess_priv mdata;
+	struct nix_send_hdr_s *send_hdr;
+	uint64_t sa_base = txq->sa_base;
+	uint32_t pkt_len, dlen_adj, rlen;
+	uint64x2_t cmd01, cmd23;
+	uint64_t lmt_status, sa;
+	union nix_send_sg_s *sg;
+	uintptr_t dptr, nixtx;
+	uint64_t ucode_cmd[4];
+	uint64_t esn, *iv;
+	uint8_t l2_len;
+
+	mdata.u64 = *rte_security_dynfield(m);
+	send_hdr = (struct nix_send_hdr_s *)cmd;
+	if (flags & NIX_TX_NEED_EXT_HDR)
+		sg = (union nix_send_sg_s *)&cmd[4];
+	else
+		sg = (union nix_send_sg_s *)&cmd[2];
+
+	if (flags & NIX_TX_NEED_SEND_HDR_W1)
+		l2_len = cmd[1] & 0xFF;
+	else
+		l2_len = m->l2_len;
+
+	/* Retrieve DPTR */
+	dptr = *(uint64_t *)(sg + 1);
+	pkt_len = send_hdr->w0.total;
+
+	/* Calculate rlen */
+	rlen = pkt_len - l2_len;
+	rlen = (rlen + mdata.roundup_len) + (mdata.roundup_byte - 1);
+	rlen &= ~(uint64_t)(mdata.roundup_byte - 1);
+	rlen += mdata.partial_len;
+	dlen_adj = rlen - pkt_len + l2_len;
+
+	/* Update send descriptors. Security is single segment only */
+	send_hdr->w0.total = pkt_len + dlen_adj;
+	sg->seg1_size = pkt_len + dlen_adj;
+
+	/* Get area where NIX descriptor needs to be stored */
+	nixtx = dptr + pkt_len + dlen_adj;
+	nixtx += BIT_ULL(7);
+	nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+
+	roc_lmt_mov((void *)(nixtx + 16), cmd, cn9k_nix_tx_ext_subs(flags));
+
+	/* Load opcode and cptr already prepared at pkt metadata set */
+	pkt_len -= l2_len;
+	pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
+		    ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
+	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+	sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
+	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
+	ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
+			0x40UL << 48 | pkt_len);
+
+	/* CPT Word 0 and Word 1 */
+	cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
+	/* CPT_RES_S is 16B above NIXTX */
+	cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
+
+	/* CPT word 2 and 3 */
+	cmd23 = vdupq_n_u64(0);
+	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
+	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+
+	dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
+		sizeof(struct roc_onf_ipsec_outb_hdr);
+	ucode_cmd[1] = dptr;
+	ucode_cmd[2] = dptr;
+
+	/* Update IV to zero and l2 sz */
+	*(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
+		rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
+	iv = (uint64_t *)(dptr + 8);
+	iv[0] = 0;
+	iv[1] = 0;
+
+	/* Head wait if needed */
+	if (base)
+		roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+
+	/* ESN */
+	outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
+	esn = outb_priv->esn;
+	outb_priv->esn = esn + 1;
+
+	ucode_cmd[0] |= (esn >> 32) << 16;
+	esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+
+	/* Update ESN and IPID and IV */
+	*(uint64_t *)dptr = esn << 32 | esn;
+
+	rte_io_wmb();
+	cn9k_sso_txq_fc_wait(txq);
+
+	/* Write CPT instruction to lmt line */
+	vst1q_u64(lmt_addr, cmd01);
+	vst1q_u64(lmt_addr + 2, cmd23);
+
+	roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
+
+	if (roc_lmt_submit_ldeor(io_addr) == 0) {
+		do {
+			vst1q_u64(lmt_addr, cmd01);
+			vst1q_u64(lmt_addr + 2, cmd23);
+			roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
+
+			lmt_status = roc_lmt_submit_ldeor(io_addr);
+		} while (lmt_status == 0);
+	}
+}
+#else
+
+static inline void
+cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
+			  struct rte_mbuf *m, uint64_t *cmd,
+			  uint32_t flags)
+{
+	RTE_SET_USED(txq);
+	RTE_SET_USED(base);
+	RTE_SET_USED(m);
+	RTE_SET_USED(cmd);
+	RTE_SET_USED(flags);
+}
+#endif
+
 static __rte_always_inline uint16_t
 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 		      const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
@@ -494,11 +633,30 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	 * In case of fast free is not set, both cn9k_nix_prepare_mseg()
 	 * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
 	 */
-	if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+	if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
+	    !(flags & NIX_TX_OFFLOAD_SECURITY_F))
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
 	cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
 
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint64_t ol_flags = m->ol_flags;
+
+		if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+			uintptr_t ssow_base = base;
+
+			if (ev->sched_type)
+				ssow_base = 0;
+
+			cn9k_sso_hws_xmit_sec_one(txq, ssow_base, m, cmd,
+						  flags);
+			goto done;
+		}
+
+		if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+			rte_io_wmb();
+	}
+
 	if (flags & NIX_TX_MULTI_SEG_F) {
 		const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
 		if (!CNXK_TT_FROM_EVENT(ev->event)) {
@@ -526,6 +684,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 		}
 	}
 
+done:
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 		if (ref_cnt > 1)
 			return 1;
@@ -537,7 +696,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	return 1;
 }
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name(                   \
 		void *port, struct rte_event ev[], uint16_t nb_events);        \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name(               \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c b/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
index 92e2981..db045d0 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_tx_enq.c
@@ -4,7 +4,7 @@
 
 #include "cn9k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name(              \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c b/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
index dfb574c..95d711f 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_tx_enq_seg.c
@@ -4,7 +4,7 @@
 
 #include "cn9k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name(          \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn9k_worker_tx_enq.c b/drivers/event/cnxk/cn9k_worker_tx_enq.c
index 3df649c..026cef8 100644
--- a/drivers/event/cnxk/cn9k_worker_tx_enq.c
+++ b/drivers/event/cnxk/cn9k_worker_tx_enq.c
@@ -4,7 +4,7 @@
 
 #include "cn9k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name(                   \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c b/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
index 0efe291..97cd7c7 100644
--- a/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
+++ b/drivers/event/cnxk/cn9k_worker_tx_enq_seg.c
@@ -4,7 +4,7 @@
 
 #include "cn9k_worker.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name(               \
 		void *port, struct rte_event ev[], uint16_t nb_events)         \
 	{                                                                      \
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index 763f9a1..e5691a2 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -5,7 +5,7 @@
 #include "cn9k_ethdev.h"
 #include "cn9k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name(	       \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts)      \
 	{                                                                      \
@@ -23,12 +23,13 @@ NIX_TX_FASTPATH_MODES
 
 static inline void
 pick_tx_func(struct rte_eth_dev *eth_dev,
-	     const eth_tx_burst_t tx_burst[2][2][2][2][2][2])
+	     const eth_tx_burst_t tx_burst[2][2][2][2][2][2][2])
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
 	/* [TS] [TSO] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
 	eth_dev->tx_pkt_burst = tx_burst
+		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_SECURITY_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
 		[!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
@@ -42,33 +43,33 @@ cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 
-	const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
-	[f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,
+	const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
-	[f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_mseg_##name,
+	const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_mseg_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
-	[f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_##name,
+	const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
 	};
 
-	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[2][2][2][2][2][2] = {
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
-	[f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_mseg_##name,
+	const eth_tx_burst_t nix_eth_tx_vec_burst_mseg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+	[f6][f5][f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_vec_mseg_##name,
 
 		NIX_TX_FASTPATH_MODES
 #undef T
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index a27ff76..44273ec 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -1819,139 +1819,269 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 #define NOFF_F	     NIX_TX_OFFLOAD_MBUF_NOFF_F
 #define TSO_F	     NIX_TX_OFFLOAD_TSO_F
 #define TSP_F	     NIX_TX_OFFLOAD_TSTAMP_F
+#define T_SEC_F      NIX_TX_OFFLOAD_SECURITY_F
 
-/* [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
-#define NIX_TX_FASTPATH_MODES						       \
-T(no_offload,				0, 0, 0, 0, 0, 0,	4,	       \
-		NIX_TX_OFFLOAD_NONE)					       \
-T(l3l4csum,				0, 0, 0, 0, 0, 1,	4,	       \
-		L3L4CSUM_F)						       \
-T(ol3ol4csum,				0, 0, 0, 0, 1, 0,	4,	       \
-		OL3OL4CSUM_F)						       \
-T(ol3ol4csum_l3l4csum,			0, 0, 0, 0, 1, 1,	4,	       \
-		OL3OL4CSUM_F | L3L4CSUM_F)				       \
-T(vlan,					0, 0, 0, 1, 0, 0,	6,	       \
-		VLAN_F)							       \
-T(vlan_l3l4csum,			0, 0, 0, 1, 0, 1,	6,	       \
-		VLAN_F | L3L4CSUM_F)					       \
-T(vlan_ol3ol4csum,			0, 0, 0, 1, 1, 0,	6,	       \
-		VLAN_F | OL3OL4CSUM_F)					       \
-T(vlan_ol3ol4csum_l3l4csum,		0, 0, 0, 1, 1, 1,	6,	       \
-		VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)			       \
-T(noff,					0, 0, 1, 0, 0, 0,	4,	       \
-		NOFF_F)							       \
-T(noff_l3l4csum,			0, 0, 1, 0, 0, 1,	4,	       \
-		NOFF_F | L3L4CSUM_F)					       \
-T(noff_ol3ol4csum,			0, 0, 1, 0, 1, 0,	4,	       \
-		NOFF_F | OL3OL4CSUM_F)					       \
-T(noff_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 1, 1,	4,	       \
-		NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)			       \
-T(noff_vlan,				0, 0, 1, 1, 0, 0,	6,	       \
-		NOFF_F | VLAN_F)					       \
-T(noff_vlan_l3l4csum,			0, 0, 1, 1, 0, 1,	6,	       \
-		NOFF_F | VLAN_F | L3L4CSUM_F)				       \
-T(noff_vlan_ol3ol4csum,			0, 0, 1, 1, 1, 0,	6,	       \
-		NOFF_F | VLAN_F | OL3OL4CSUM_F)				       \
-T(noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 1, 1, 1, 1,	6,	       \
-		NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)		       \
-T(tso,					0, 1, 0, 0, 0, 0,	6,	       \
-		TSO_F)							       \
-T(tso_l3l4csum,				0, 1, 0, 0, 0, 1,	6,	       \
-		TSO_F | L3L4CSUM_F)					       \
-T(tso_ol3ol4csum,			0, 1, 0, 0, 1, 0,	6,	       \
-		TSO_F | OL3OL4CSUM_F)					       \
-T(tso_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 1, 1,	6,	       \
-		TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)			       \
-T(tso_vlan,				0, 1, 0, 1, 0, 0,	6,	       \
-		TSO_F | VLAN_F)						       \
-T(tso_vlan_l3l4csum,			0, 1, 0, 1, 0, 1,	6,	       \
-		TSO_F | VLAN_F | L3L4CSUM_F)				       \
-T(tso_vlan_ol3ol4csum,			0, 1, 0, 1, 1, 0,	6,	       \
-		TSO_F | VLAN_F | OL3OL4CSUM_F)				       \
-T(tso_vlan_ol3ol4csum_l3l4csum,		0, 1, 0, 1, 1, 1,	6,	       \
-		TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		       \
-T(tso_noff,				0, 1, 1, 0, 0, 0,	6,	       \
-		TSO_F | NOFF_F)						       \
-T(tso_noff_l3l4csum,			0, 1, 1, 0, 0, 1,	6,	       \
-		TSO_F | NOFF_F | L3L4CSUM_F)				       \
-T(tso_noff_ol3ol4csum,			0, 1, 1, 0, 1, 0,	6,	       \
-		TSO_F | NOFF_F | OL3OL4CSUM_F)				       \
-T(tso_noff_ol3ol4csum_l3l4csum,		0, 1, 1, 0, 1, 1,	6,	       \
-		TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		       \
-T(tso_noff_vlan,			0, 1, 1, 1, 0, 0,	6,	       \
-		TSO_F | NOFF_F | VLAN_F)				       \
-T(tso_noff_vlan_l3l4csum,		0, 1, 1, 1, 0, 1,	6,	       \
-		TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)			       \
-T(tso_noff_vlan_ol3ol4csum,		0, 1, 1, 1, 1, 0,	6,	       \
-		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			       \
-T(tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 1, 1,	6,	       \
-		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	       \
-T(ts,					1, 0, 0, 0, 0, 0,	8,	       \
-		TSP_F)							       \
-T(ts_l3l4csum,				1, 0, 0, 0, 0, 1,	8,	       \
-		TSP_F | L3L4CSUM_F)					       \
-T(ts_ol3ol4csum,			1, 0, 0, 0, 1, 0,	8,	       \
-		TSP_F | OL3OL4CSUM_F)					       \
-T(ts_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 1, 1,	8,	       \
-		TSP_F | OL3OL4CSUM_F | L3L4CSUM_F)			       \
-T(ts_vlan,				1, 0, 0, 1, 0, 0,	8,	       \
-		TSP_F | VLAN_F)						       \
-T(ts_vlan_l3l4csum,			1, 0, 0, 1, 0, 1,	8,	       \
-		TSP_F | VLAN_F | L3L4CSUM_F)				       \
-T(ts_vlan_ol3ol4csum,			1, 0, 0, 1, 1, 0,	8,	       \
-		TSP_F | VLAN_F | OL3OL4CSUM_F)				       \
-T(ts_vlan_ol3ol4csum_l3l4csum,		1, 0, 0, 1, 1, 1,	8,	       \
-		TSP_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		       \
-T(ts_noff,				1, 0, 1, 0, 0, 0,	8,	       \
-		TSP_F | NOFF_F)						       \
-T(ts_noff_l3l4csum,			1, 0, 1, 0, 0, 1,	8,	       \
-		TSP_F | NOFF_F | L3L4CSUM_F)				       \
-T(ts_noff_ol3ol4csum,			1, 0, 1, 0, 1, 0,	8,	       \
-		TSP_F | NOFF_F | OL3OL4CSUM_F)				       \
-T(ts_noff_ol3ol4csum_l3l4csum,		1, 0, 1, 0, 1, 1,	8,	       \
-		TSP_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		       \
-T(ts_noff_vlan,				1, 0, 1, 1, 0, 0,	8,	       \
-		TSP_F | NOFF_F | VLAN_F)				       \
-T(ts_noff_vlan_l3l4csum,		1, 0, 1, 1, 0, 1,	8,	       \
-		TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)			       \
-T(ts_noff_vlan_ol3ol4csum,		1, 0, 1, 1, 1, 0,	8,	       \
-		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			       \
-T(ts_noff_vlan_ol3ol4csum_l3l4csum,	1, 0, 1, 1, 1, 1,	8,	       \
-		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	       \
-T(ts_tso,				1, 1, 0, 0, 0, 0,	8,	       \
-		TSP_F | TSO_F)						       \
-T(ts_tso_l3l4csum,			1, 1, 0, 0, 0, 1,	8,	       \
-		TSP_F | TSO_F | L3L4CSUM_F)				       \
-T(ts_tso_ol3ol4csum,			1, 1, 0, 0, 1, 0,	8,	       \
-		TSP_F | TSO_F | OL3OL4CSUM_F)				       \
-T(ts_tso_ol3ol4csum_l3l4csum,		1, 1, 0, 0, 1, 1,	8,	       \
-		TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)		       \
-T(ts_tso_vlan,				1, 1, 0, 1, 0, 0,	8,	       \
-		TSP_F | TSO_F | VLAN_F)					       \
-T(ts_tso_vlan_l3l4csum,			1, 1, 0, 1, 0, 1,	8,	       \
-		TSP_F | TSO_F | VLAN_F | L3L4CSUM_F)			       \
-T(ts_tso_vlan_ol3ol4csum,		1, 1, 0, 1, 1, 0,	8,	       \
-		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F)			       \
-T(ts_tso_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 1, 1,	8,	       \
-		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)	       \
-T(ts_tso_noff,				1, 1, 1, 0, 0, 0,	8,	       \
-		TSP_F | TSO_F | NOFF_F)					       \
-T(ts_tso_noff_l3l4csum,			1, 1, 1, 0, 0, 1,	8,	       \
-		TSP_F | TSO_F | NOFF_F | L3L4CSUM_F)			       \
-T(ts_tso_noff_ol3ol4csum,		1, 1, 1, 0, 1, 0,	8,	       \
-		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F)			       \
-T(ts_tso_noff_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 1, 1,	8,	       \
-		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)	       \
-T(ts_tso_noff_vlan,			1, 1, 1, 1, 0, 0,	8,	       \
-		TSP_F | TSO_F | NOFF_F | VLAN_F)			       \
-T(ts_tso_noff_vlan_l3l4csum,		1, 1, 1, 1, 0, 1,	8,	       \
-		TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)		       \
-T(ts_tso_noff_vlan_ol3ol4csum,		1, 1, 1, 1, 1, 0,	8,	       \
-		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)		       \
-T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum,	1, 1, 1, 1, 1, 1,	8,	       \
-		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)
+/* [T_SEC_F] [TSP] [TSO] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+#define NIX_TX_FASTPATH_MODES						\
+T(no_offload,				0, 0, 0, 0, 0, 0, 0,	4,	\
+		NIX_TX_OFFLOAD_NONE)					\
+T(l3l4csum,				0, 0, 0, 0, 0, 0, 1,	4,	\
+		L3L4CSUM_F)						\
+T(ol3ol4csum,				0, 0, 0, 0, 0, 1, 0,	4,	\
+		OL3OL4CSUM_F)						\
+T(ol3ol4csum_l3l4csum,			0, 0, 0, 0, 0, 1, 1,	4,	\
+		OL3OL4CSUM_F | L3L4CSUM_F)				\
+T(vlan,					0, 0, 0, 0, 1, 0, 0,	6,	\
+		VLAN_F)							\
+T(vlan_l3l4csum,			0, 0, 0, 0, 1, 0, 1,	6,	\
+		VLAN_F | L3L4CSUM_F)					\
+T(vlan_ol3ol4csum,			0, 0, 0, 0, 1, 1, 0,	6,	\
+		VLAN_F | OL3OL4CSUM_F)					\
+T(vlan_ol3ol4csum_l3l4csum,		0, 0, 0, 0, 1, 1, 1,	6,	\
+		VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)			\
+T(noff,					0, 0, 0, 1, 0, 0, 0,	4,	\
+		NOFF_F)							\
+T(noff_l3l4csum,			0, 0, 0, 1, 0, 0, 1,	4,	\
+		NOFF_F | L3L4CSUM_F)					\
+T(noff_ol3ol4csum,			0, 0, 0, 1, 0, 1, 0,	4,	\
+		NOFF_F | OL3OL4CSUM_F)					\
+T(noff_ol3ol4csum_l3l4csum,		0, 0, 0, 1, 0, 1, 1,	4,	\
+		NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)			\
+T(noff_vlan,				0, 0, 0, 1, 1, 0, 0,	6,	\
+		NOFF_F | VLAN_F)					\
+T(noff_vlan_l3l4csum,			0, 0, 0, 1, 1, 0, 1,	6,	\
+		NOFF_F | VLAN_F | L3L4CSUM_F)				\
+T(noff_vlan_ol3ol4csum,			0, 0, 0, 1, 1, 1, 0,	6,	\
+		NOFF_F | VLAN_F | OL3OL4CSUM_F)				\
+T(noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 0, 1, 1, 1, 1,	6,	\
+		NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(tso,					0, 0, 1, 0, 0, 0, 0,	6,	\
+		TSO_F)							\
+T(tso_l3l4csum,				0, 0, 1, 0, 0, 0, 1,	6,	\
+		TSO_F | L3L4CSUM_F)					\
+T(tso_ol3ol4csum,			0, 0, 1, 0, 0, 1, 0,	6,	\
+		TSO_F | OL3OL4CSUM_F)					\
+T(tso_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 0, 1, 1,	6,	\
+		TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
+T(tso_vlan,				0, 0, 1, 0, 1, 0, 0,	6,	\
+		TSO_F | VLAN_F)						\
+T(tso_vlan_l3l4csum,			0, 0, 1, 0, 1, 0, 1,	6,	\
+		TSO_F | VLAN_F | L3L4CSUM_F)				\
+T(tso_vlan_ol3ol4csum,			0, 0, 1, 0, 1, 1, 0,	6,	\
+		TSO_F | VLAN_F | OL3OL4CSUM_F)				\
+T(tso_vlan_ol3ol4csum_l3l4csum,		0, 0, 1, 0, 1, 1, 1,	6,	\
+		TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
+T(tso_noff,				0, 0, 1, 1, 0, 0, 0,	6,	\
+		TSO_F | NOFF_F)						\
+T(tso_noff_l3l4csum,			0, 0, 1, 1, 0, 0, 1,	6,	\
+		TSO_F | NOFF_F | L3L4CSUM_F)				\
+T(tso_noff_ol3ol4csum,			0, 0, 1, 1, 0, 1, 0,	6,	\
+		TSO_F | NOFF_F | OL3OL4CSUM_F)				\
+T(tso_noff_ol3ol4csum_l3l4csum,		0, 0, 1, 1, 0, 1, 1,	6,	\
+		TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
+T(tso_noff_vlan,			0, 0, 1, 1, 1, 0, 0,	6,	\
+		TSO_F | NOFF_F | VLAN_F)				\
+T(tso_noff_vlan_l3l4csum,		0, 0, 1, 1, 1, 0, 1,	6,	\
+		TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
+T(tso_noff_vlan_ol3ol4csum,		0, 0, 1, 1, 1, 1, 0,	6,	\
+		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			\
+T(tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 0, 1, 1, 1, 1, 1,	6,	\
+		TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(ts,					0, 1, 0, 0, 0, 0, 0,	8,	\
+		TSP_F)							\
+T(ts_l3l4csum,				0, 1, 0, 0, 0, 0, 1,	8,	\
+		TSP_F | L3L4CSUM_F)					\
+T(ts_ol3ol4csum,			0, 1, 0, 0, 0, 1, 0,	8,	\
+		TSP_F | OL3OL4CSUM_F)					\
+T(ts_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 0, 1, 1,	8,	\
+		TSP_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
+T(ts_vlan,				0, 1, 0, 0, 1, 0, 0,	8,	\
+		TSP_F | VLAN_F)						\
+T(ts_vlan_l3l4csum,			0, 1, 0, 0, 1, 0, 1,	8,	\
+		TSP_F | VLAN_F | L3L4CSUM_F)				\
+T(ts_vlan_ol3ol4csum,			0, 1, 0, 0, 1, 1, 0,	8,	\
+		TSP_F | VLAN_F | OL3OL4CSUM_F)				\
+T(ts_vlan_ol3ol4csum_l3l4csum,		0, 1, 0, 0, 1, 1, 1,	8,	\
+		TSP_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
+T(ts_noff,				0, 1, 0, 1, 0, 0, 0,	8,	\
+		TSP_F | NOFF_F)						\
+T(ts_noff_l3l4csum,			0, 1, 0, 1, 0, 0, 1,	8,	\
+		TSP_F | NOFF_F | L3L4CSUM_F)				\
+T(ts_noff_ol3ol4csum,			0, 1, 0, 1, 0, 1, 0,	8,	\
+		TSP_F | NOFF_F | OL3OL4CSUM_F)				\
+T(ts_noff_ol3ol4csum_l3l4csum,		0, 1, 0, 1, 0, 1, 1,	8,	\
+		TSP_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)		\
+T(ts_noff_vlan,				0, 1, 0, 1, 1, 0, 0,	8,	\
+		TSP_F | NOFF_F | VLAN_F)				\
+T(ts_noff_vlan_l3l4csum,		0, 1, 0, 1, 1, 0, 1,	8,	\
+		TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
+T(ts_noff_vlan_ol3ol4csum,		0, 1, 0, 1, 1, 1, 0,	8,	\
+		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)			\
+T(ts_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 0, 1, 1, 1, 1,	8,	\
+		TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(ts_tso,				0, 1, 1, 0, 0, 0, 0,	8,	\
+		TSP_F | TSO_F)						\
+T(ts_tso_l3l4csum,			0, 1, 1, 0, 0, 0, 1,	8,	\
+		TSP_F | TSO_F | L3L4CSUM_F)				\
+T(ts_tso_ol3ol4csum,			0, 1, 1, 0, 0, 1, 0,	8,	\
+		TSP_F | TSO_F | OL3OL4CSUM_F)				\
+T(ts_tso_ol3ol4csum_l3l4csum,		0, 1, 1, 0, 0, 1, 1,	8,	\
+		TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(ts_tso_vlan,				0, 1, 1, 0, 1, 0, 0,	8,	\
+		TSP_F | TSO_F | VLAN_F)					\
+T(ts_tso_vlan_l3l4csum,			0, 1, 1, 0, 1, 0, 1,	8,	\
+		TSP_F | TSO_F | VLAN_F | L3L4CSUM_F)			\
+T(ts_tso_vlan_ol3ol4csum,		0, 1, 1, 0, 1, 1, 0,	8,	\
+		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F)			\
+T(ts_tso_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 0, 1, 1, 1,	8,	\
+		TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(ts_tso_noff,				0, 1, 1, 1, 0, 0, 0,	8,	\
+		TSP_F | TSO_F | NOFF_F)					\
+T(ts_tso_noff_l3l4csum,			0, 1, 1, 1, 0, 0, 1,	8,	\
+		TSP_F | TSO_F | NOFF_F | L3L4CSUM_F)			\
+T(ts_tso_noff_ol3ol4csum,		0, 1, 1, 1, 0, 1, 0,	8,	\
+		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F)			\
+T(ts_tso_noff_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 0, 1, 1,	8,	\
+		TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(ts_tso_noff_vlan,			0, 1, 1, 1, 1, 0, 0,	8,	\
+		TSP_F | TSO_F | NOFF_F | VLAN_F)			\
+T(ts_tso_noff_vlan_l3l4csum,		0, 1, 1, 1, 1, 0, 1,	8,	\
+		TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
+T(ts_tso_noff_vlan_ol3ol4csum,		0, 1, 1, 1, 1, 1, 0,	8,	\
+		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)		\
+T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum,	0, 1, 1, 1, 1, 1, 1,	8,	\
+		TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec,					1, 0, 0, 0, 0, 0, 0,	4,	\
+		T_SEC_F)						\
+T(sec_l3l4csum,				1, 0, 0, 0, 0, 0, 1,	4,	\
+		T_SEC_F | L3L4CSUM_F)					\
+T(sec_ol3ol4csum,			1, 0, 0, 0, 0, 1, 0,	4,	\
+		T_SEC_F | OL3OL4CSUM_F)					\
+T(sec_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 0, 1, 1,	4,	\
+		T_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F)			\
+T(sec_vlan,				1, 0, 0, 0, 1, 0, 0,	6,	\
+		T_SEC_F | VLAN_F)					\
+T(sec_vlan_l3l4csum,			1, 0, 0, 0, 1, 0, 1,	6,	\
+		T_SEC_F | VLAN_F | L3L4CSUM_F)				\
+T(sec_vlan_ol3ol4csum,			1, 0, 0, 0, 1, 1, 0,	6,	\
+		T_SEC_F | VLAN_F | OL3OL4CSUM_F)			\
+T(sec_vlan_ol3ol4csum_l3l4csum,		1, 0, 0, 0, 1, 1, 1,	6,	\
+		T_SEC_F | VLAN_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(sec_noff,				1, 0, 0, 1, 0, 0, 0,	4,	\
+		T_SEC_F | NOFF_F)					\
+T(sec_noff_l3l4csum,			1, 0, 0, 1, 0, 0, 1,	4,	\
+		T_SEC_F | NOFF_F | L3L4CSUM_F)				\
+T(sec_noff_ol3ol4csum,			1, 0, 0, 1, 0, 1, 0,	4,	\
+		T_SEC_F | NOFF_F | OL3OL4CSUM_F)			\
+T(sec_noff_ol3ol4csum_l3l4csum,		1, 0, 0, 1, 0, 1, 1,	4,	\
+		T_SEC_F | NOFF_F | OL3OL4CSUM_F |	L3L4CSUM_F)	\
+T(sec_noff_vlan,			1, 0, 0, 1, 1, 0, 0,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F)				\
+T(sec_noff_vlan_l3l4csum,		1, 0, 0, 1, 1, 0, 1,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_noff_vlan_ol3ol4csum,		1, 0, 0, 1, 1, 1, 0,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_noff_vlan_ol3ol4csum_l3l4csum,	1, 0, 0, 1, 1, 1, 1,	6,	\
+		T_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso,				1, 0, 1, 0, 0, 0, 0,	6,	\
+		T_SEC_F | TSO_F)					\
+T(sec_tso_l3l4csum,			1, 0, 1, 0, 0, 0, 1,	6,	\
+		T_SEC_F | TSO_F | L3L4CSUM_F)				\
+T(sec_tso_ol3ol4csum,			1, 0, 1, 0, 0, 1, 0,	6,	\
+		T_SEC_F | TSO_F | OL3OL4CSUM_F)				\
+T(sec_tso_ol3ol4csum_l3l4csum,		1, 0, 1, 0, 0, 1, 1,	6,	\
+		T_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(sec_tso_vlan,				1, 0, 1, 0, 1, 0, 0,	6,	\
+		T_SEC_F | TSO_F | VLAN_F)				\
+T(sec_tso_vlan_l3l4csum,		1, 0, 1, 0, 1, 0, 1,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_tso_vlan_ol3ol4csum,		1, 0, 1, 0, 1, 1, 0,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_tso_vlan_ol3ol4csum_l3l4csum,	1, 0, 1, 0, 1, 1, 1,	6,	\
+		T_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso_noff,				1, 0, 1, 1, 0, 0, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F)				\
+T(sec_tso_noff_l3l4csum,		1, 0, 1, 1, 0, 0, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F)			\
+T(sec_tso_noff_ol3ol4csum,		1, 0, 1, 1, 0, 1, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F)		\
+T(sec_tso_noff_ol3ol4csum_l3l4csum,	1, 0, 1, 1, 0, 1, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_tso_noff_vlan,			1, 0, 1, 1, 1, 0, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F)			\
+T(sec_tso_noff_vlan_l3l4csum,		1, 0, 1, 1, 1, 0, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_tso_noff_vlan_ol3ol4csum,		1, 0, 1, 1, 1, 1, 0,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 1,	6,	\
+		T_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts,				1, 1, 0, 0, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F)					\
+T(sec_ts_l3l4csum,			1, 1, 0, 0, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | L3L4CSUM_F)				\
+T(sec_ts_ol3ol4csum,			1, 1, 0, 0, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | OL3OL4CSUM_F)				\
+T(sec_ts_ol3ol4csum_l3l4csum,		1, 1, 0, 0, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F)		\
+T(sec_ts_vlan,				1, 1, 0, 0, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | VLAN_F)				\
+T(sec_ts_vlan_l3l4csum,			1, 1, 0, 0, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F)			\
+T(sec_ts_vlan_ol3ol4csum,		1, 1, 0, 0, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F)		\
+T(sec_ts_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 0, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_noff,				1, 1, 0, 1, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F)				\
+T(sec_ts_noff_l3l4csum,			1, 1, 0, 1, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F)			\
+T(sec_ts_noff_ol3ol4csum,		1, 1, 0, 1, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F)		\
+T(sec_ts_noff_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_noff_vlan,			1, 1, 0, 1, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F)			\
+T(sec_ts_noff_vlan_l3l4csum,		1, 1, 0, 1, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_ts_noff_vlan_ol3ol4csum,		1, 1, 0, 1, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum,	1, 1, 0, 1, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F)\
+T(sec_ts_tso,				1, 1, 1, 0, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F)				\
+T(sec_ts_tso_l3l4csum,			1, 1, 1, 0, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | L3L4CSUM_F)			\
+T(sec_ts_tso_ol3ol4csum,		1, 1, 1, 0, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F)			\
+T(sec_ts_tso_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F)	\
+T(sec_ts_tso_vlan,			1, 1, 1, 0, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F)			\
+T(sec_ts_tso_vlan_l3l4csum,		1, 1, 1, 0, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | L3L4CSUM_F)		\
+T(sec_ts_tso_vlan_ol3ol4csum,		1, 1, 1, 0, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F)	\
+T(sec_ts_tso_vlan_ol3ol4csum_l3l4csum,	1, 1, 1, 0, 1, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff,			1, 1, 1, 1, 0, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F)			\
+T(sec_ts_tso_noff_l3l4csum,		1, 1, 1, 1, 0, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | L3L4CSUM_F)		\
+T(sec_ts_tso_noff_ol3ol4csum,		1, 1, 1, 1, 0, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F)	\
+T(sec_ts_tso_noff_ol3ol4csum_l3l4csum,	1, 1, 1, 1, 0, 1, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_tso_noff_vlan,			1, 1, 1, 1, 1, 0, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F)		\
+T(sec_ts_tso_noff_vlan_l3l4csum,	1, 1, 1, 1, 1, 0, 1,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F)	\
+T(sec_ts_tso_noff_vlan_ol3ol4csum,	1, 1, 1, 1, 1, 1, 0,	8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F)\
+T(sec_ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 1, 8,	\
+		T_SEC_F | TSP_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+		L3L4CSUM_F)
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name(           \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);     \
 									       \
diff --git a/drivers/net/cnxk/cn9k_tx_mseg.c b/drivers/net/cnxk/cn9k_tx_mseg.c
index f3c427c..37cba78 100644
--- a/drivers/net/cnxk/cn9k_tx_mseg.c
+++ b/drivers/net/cnxk/cn9k_tx_mseg.c
@@ -5,7 +5,7 @@
 #include "cn9k_ethdev.h"
 #include "cn9k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot				       \
 		cn9k_nix_xmit_pkts_mseg_##name(void *tx_queue,                 \
 					       struct rte_mbuf **tx_pkts,      \
diff --git a/drivers/net/cnxk/cn9k_tx_vec.c b/drivers/net/cnxk/cn9k_tx_vec.c
index 56a3e25..b424f95 100644
--- a/drivers/net/cnxk/cn9k_tx_vec.c
+++ b/drivers/net/cnxk/cn9k_tx_vec.c
@@ -5,7 +5,7 @@
 #include "cn9k_ethdev.h"
 #include "cn9k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)			       \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			       \
 	uint16_t __rte_noinline __rte_hot				       \
 		cn9k_nix_xmit_pkts_vec_##name(void *tx_queue,                  \
 					      struct rte_mbuf **tx_pkts,       \
diff --git a/drivers/net/cnxk/cn9k_tx_vec_mseg.c b/drivers/net/cnxk/cn9k_tx_vec_mseg.c
index 0256efd..5fdf0a9 100644
--- a/drivers/net/cnxk/cn9k_tx_vec_mseg.c
+++ b/drivers/net/cnxk/cn9k_tx_vec_mseg.c
@@ -5,7 +5,7 @@
 #include "cn9k_ethdev.h"
 #include "cn9k_tx.h"
 
-#define T(name, f5, f4, f3, f2, f1, f0, sz, flags)                             \
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 	uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_vec_mseg_##name(  \
 		void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts)      \
 	{                                                                      \
-- 
2.8.4


  parent reply	other threads:[~2021-10-01 13:42 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` Nithin Dabilpuram [this message]
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001134022.22700-20-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.