All of lore.kernel.org
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <gakhil@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>,
	Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH v2 2/3] net/cnxk: support inline ingress out of place session
Date: Thu, 21 Sep 2023 07:45:47 +0530	[thread overview]
Message-ID: <20230921021548.1196858-2-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20230921021548.1196858-1-ndabilpuram@marvell.com>

Add support for inline ingress session with out-of-place
support.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_worker.h   |  12 +-
 drivers/net/cnxk/cn10k_ethdev.c     |  13 +-
 drivers/net/cnxk/cn10k_ethdev_sec.c |  43 +++++++
 drivers/net/cnxk/cn10k_rx.h         | 181 ++++++++++++++++++++++++----
 drivers/net/cnxk/cn10k_rxtx.h       |   1 +
 drivers/net/cnxk/cnxk_ethdev.h      |   9 ++
 6 files changed, 229 insertions(+), 30 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index b4ee023723..46bfa9dd9d 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -59,9 +59,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
 	uint16_t lmt_id, d_off;
 	struct rte_mbuf **wqe;
 	struct rte_mbuf *mbuf;
+	uint64_t sa_base = 0;
 	uintptr_t cpth = 0;
 	uint8_t loff = 0;
-	uint64_t sa_base;
 	int i;
 
 	mbuf_init |= ((uint64_t)port_id) << 48;
@@ -125,6 +125,11 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
 
 			cpth = ((uintptr_t)mbuf + (uint16_t)d_off);
 
+			/* Update mempool pointer for full mode pkt */
+			if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+			    !((*(uint64_t *)cpth) & BIT(15)))
+				mbuf->pool = mp;
+
 			mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr,
 						       &loff, mbuf, d_off,
 						       flags, mbuf_init);
@@ -199,6 +204,11 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
 			meta_aura = mp ? mp->pool_id : m->pool->pool_id;
 
+			/* Update mempool pointer for full mode pkt */
+			if (mp && (flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+			    !((*(uint64_t *)cpth) & BIT(15)))
+				((struct rte_mbuf *)mbuf)->pool = mp;
+
 			mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
 				cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
 				(struct rte_mbuf *)mbuf, d_off, flags,
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 4c4acc7cf0..f1504a6873 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -355,11 +355,13 @@ cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev *eth_dev)
 		rq = &dev->rqs[i];
 		rxq = eth_dev->data->rx_queues[i];
 		rxq->meta_aura = rq->meta_aura_handle;
+		rxq->meta_pool = dev->nix.meta_mempool;
 		/* Assume meta packet from normal aura if meta aura is not setup
 		 */
 		if (!rxq->meta_aura) {
 			rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 			rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
+			rxq->meta_pool = (uintptr_t)rxq_sp->qconf.mp;
 		}
 	}
 	/* Store mempool in lookup mem */
@@ -639,14 +641,17 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 
 	if (!conf->flags) {
 		/* Clear offload flags on disable */
-		dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+		if (!dev->inb.nb_oop)
+			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+		dev->inb.reass_en = false;
 		return 0;
 	}
 
-	rc = roc_nix_reassembly_configure(conf->timeout_ms,
-				conf->max_frags);
-	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+	rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags);
+	if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
 		dev->rx_offload_flags |= NIX_RX_REAS_F;
+		dev->inb.reass_en = true;
+	}
 
 	return rc;
 }
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index a7473922af..9a831634da 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -9,6 +9,7 @@
 #include <rte_pmd_cnxk.h>
 
 #include <cn10k_ethdev.h>
+#include <cn10k_rx.h>
 #include <cnxk_ethdev_mcs.h>
 #include <cnxk_security.h>
 #include <roc_priv.h>
@@ -324,6 +325,7 @@ static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] =
 				.l4_csum_enable = 1,
 				.stats = 1,
 				.esn = 1,
+				.ingress_oop = 1,
 			},
 		},
 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -373,6 +375,7 @@ static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] =
 				.l4_csum_enable = 1,
 				.stats = 1,
 				.esn = 1,
+				.ingress_oop = 1,
 			},
 		},
 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -396,6 +399,7 @@ static const struct rte_security_capability cn10k_eth_sec_ipsec_capabilities[] =
 				.l4_csum_enable = 1,
 				.stats = 1,
 				.esn = 1,
+				.ingress_oop = 1,
 			},
 		},
 		.crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -746,6 +750,20 @@ cn10k_eth_sec_session_create(void *device,
 			return -rte_errno;
 	}
 
+	if (conf->ipsec.options.ingress_oop &&
+	    rte_security_oop_dynfield_offset < 0) {
+		/* Register for security OOP dynfield if required */
+		if (rte_security_oop_dynfield_register() < 0)
+			return -rte_errno;
+	}
+
+	/* We cannot support inbound reassembly and OOP together */
+	if (conf->ipsec.options.ip_reassembly_en &&
+	    conf->ipsec.options.ingress_oop) {
+		plt_err("Cannot support Inbound reassembly and OOP together");
+		return -ENOTSUP;
+	}
+
 	ipsec = &conf->ipsec;
 	crypto = conf->crypto_xform;
 	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
@@ -832,6 +850,12 @@ cn10k_eth_sec_session_create(void *device,
 			inb_sa_dptr->w0.s.count_mib_bytes = 1;
 			inb_sa_dptr->w0.s.count_mib_pkts = 1;
 		}
+
+		/* Enable out-of-place processing */
+		if (ipsec->options.ingress_oop)
+			inb_sa_dptr->w0.s.pkt_format =
+				ROC_IE_OT_SA_PKT_FMT_FULL;
+
 		/* Prepare session priv */
 		sess_priv.inb_sa = 1;
 		sess_priv.sa_idx = ipsec->spi & spi_mask;
@@ -843,6 +867,7 @@ cn10k_eth_sec_session_create(void *device,
 		eth_sec->spi = ipsec->spi;
 		eth_sec->inl_dev = !!dev->inb.inl_dev;
 		eth_sec->inb = true;
+		eth_sec->inb_oop = !!ipsec->options.ingress_oop;
 
 		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
 		dev->inb.nb_sess++;
@@ -858,6 +883,15 @@ cn10k_eth_sec_session_create(void *device,
 			inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
 		}
 
+		if (ipsec->options.ingress_oop)
+			dev->inb.nb_oop++;
+
+		/* Update function pointer to handle OOP sessions */
+		if (dev->inb.nb_oop &&
+		    !(dev->rx_offload_flags & NIX_RX_REAS_F)) {
+			dev->rx_offload_flags |= NIX_RX_REAS_F;
+			cn10k_eth_set_rx_function(eth_dev);
+		}
 	} else {
 		struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
 		struct cn10k_outb_priv_data *outb_priv;
@@ -1007,6 +1041,15 @@ cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 				      sizeof(struct roc_ot_ipsec_inb_sa));
 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
 		dev->inb.nb_sess--;
+		if (eth_sec->inb_oop)
+			dev->inb.nb_oop--;
+
+		/* Clear offload flags if was used by OOP */
+		if (!dev->inb.nb_oop && !dev->inb.reass_en &&
+		    dev->rx_offload_flags & NIX_RX_REAS_F) {
+			dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+			cn10k_eth_set_rx_function(eth_dev);
+		}
 	} else {
 		/* Disable SA */
 		sa_dptr = dev->outb.sa_dptr;
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 3bf89b8c6c..6533804827 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -402,6 +402,41 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *hea
 	return head;
 }
 
+static inline struct rte_mbuf *
+nix_sec_oop_process(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *mbuf, uint64_t *mbuf_init)
+{
+	uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr);
+	union nix_rx_parse_u *inner_rx;
+	struct rte_mbuf *inner;
+	uint16_t data_off;
+
+	inner = ((struct rte_mbuf *)wqe) - 1;
+
+	inner_rx = (union nix_rx_parse_u *)(wqe + 8);
+	inner->pkt_len = inner_rx->pkt_lenm1 + 1;
+	inner->data_len = inner_rx->pkt_lenm1 + 1;
+
+	/* Mark inner mbuf as get */
+	RTE_MEMPOOL_CHECK_COOKIES(inner->pool,
+				  (void **)&inner, 1, 1);
+	/* Update rearm data for full mbuf as it has
+	 * cpt parse header that needs to be skipped.
+	 *
+	 * Since meta pool will not have private area while
+	 * ethdev RQ's first skip would be considering private area
+	 * calculate actual data off and update in meta mbuf.
+	 */
+	data_off = (uintptr_t)hdr - (uintptr_t)mbuf->buf_addr;
+	data_off += sizeof(struct cpt_parse_hdr_s);
+	data_off += hdr->w0.pad_len;
+	*mbuf_init &= ~0xFFFFUL;
+	*mbuf_init |= (uint64_t)data_off;
+
+	*rte_security_oop_dynfield(mbuf) = inner;
+	/* Return outer instead of inner mbuf as inner mbuf would have original encrypted packet */
+	return mbuf;
+}
+
 static __rte_always_inline struct rte_mbuf *
 nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 			uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,
@@ -422,14 +457,18 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 	if (!(cq_w1 & BIT(11)))
 		return mbuf;
 
-	inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
-				    sizeof(struct rte_mbuf));
+	if (flags & NIX_RX_REAS_F && hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) {
+		inner = nix_sec_oop_process(hdr, mbuf, &mbuf_init);
+	} else {
+		inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
+					    sizeof(struct rte_mbuf));
 
-	/* Store meta in lmtline to free
-	 * Assume all meta's from same aura.
-	 */
-	*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
-	*loff = *loff + 1;
+		/* Store meta in lmtline to free
+		 * Assume all meta's from same aura.
+		 */
+		*(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
+		*loff = *loff + 1;
+	}
 
 	/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
 	w0 = hdr->w0.u64;
@@ -471,11 +510,13 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 			 & 0xFF) << 1 : RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 	}
 
-	/* Mark meta mbuf as put */
-	RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
+	if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) {
+		/* Mark meta mbuf as put */
+		RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
 
-	/* Mark inner mbuf as get */
-	RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
+		/* Mark inner mbuf as get */
+		RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
+	}
 
 	/* Skip reassembly processing when multi-seg is enabled */
 	if (!(flags & NIX_RX_MULTI_SEG_F) && (flags & NIX_RX_REAS_F) && hdr->w0.num_frags) {
@@ -522,7 +563,9 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 	*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
 
 	/* Mark inner mbuf as get */
-	RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
+	if (!(flags & NIX_RX_REAS_F) ||
+	    hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+		RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
 
 	if (!(flags & NIX_RX_MULTI_SEG_F) && flags & NIX_RX_REAS_F && hdr->w0.num_frags) {
 		if ((!(hdr->w0.err_sum) || roc_ie_ot_ucc_is_success(hdr->w3.uc_ccode)) &&
@@ -552,6 +595,19 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 			nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
 			*ol_flags |= inner->ol_flags;
 		}
+	} else if (flags & NIX_RX_REAS_F) {
+		/* Without fragmentation but may have to handle OOP session */
+		if (hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) {
+			uint64_t mbuf_init = 0;
+
+			/* Caller has already prepared to return second pass
+			 * mbuf and inner mbuf is actually outer.
+			 * Store original buffer pointer in dynfield.
+			 */
+			nix_sec_oop_process(hdr, inner, &mbuf_init);
+			/* Clear and update lower 16 bit of data offset */
+			*rearm = (*rearm & ~(BIT_ULL(16) - 1)) | mbuf_init;
+		}
 	}
 }
 #endif
@@ -628,6 +684,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 	uint64_t cq_w1;
 	int64_t len;
 	uint64_t sg;
+	uintptr_t p;
 
 	cq_w1 = *(const uint64_t *)rx;
 	if (flags & NIX_RX_REAS_F)
@@ -635,7 +692,9 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 	/* Use inner rx parse for meta pkts sg list */
 	if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
 		const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
-		rx = (const union nix_rx_parse_u *)(wqe + 1);
+
+		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+			rx = (const union nix_rx_parse_u *)(wqe + 1);
 	}
 
 	sg = *(const uint64_t *)(rx + 1);
@@ -761,6 +820,31 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		num_frags--;
 		frag_i++;
 		goto again;
+	} else if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) && !reas_success &&
+		   hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) {
+		uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr);
+
+		/* Process OOP packet inner buffer mseg. reas_success flag is used here only
+		 * to avoid looping.
+		 */
+		mbuf = ((struct rte_mbuf *)wqe) - 1;
+		rx = (const union nix_rx_parse_u *)(wqe + 8);
+		eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
+		sg = *(const uint64_t *)(rx + 1);
+		nb_segs = (sg >> 48) & 0x3;
+
+
+		len = mbuf->pkt_len;
+		p = (uintptr_t)&mbuf->rearm_data;
+		*(uint64_t *)p = rearm;
+		mbuf->data_len = (sg & 0xFFFF) -
+				 (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
+				  CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
+		head = mbuf;
+		head->nb_segs = nb_segs;
+		/* Using this flag to avoid looping in case of OOP */
+		reas_success = true;
+		goto again;
 	}
 
 	/* Update for last failure fragment */
@@ -899,6 +983,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	const uint64_t mbuf_init = rxq->mbuf_initializer;
 	const void *lookup_mem = rxq->lookup_mem;
 	const uint64_t data_off = rxq->data_off;
+	struct rte_mempool *meta_pool = NULL;
 	const uintptr_t desc = rxq->desc;
 	const uint64_t wdata = rxq->wdata;
 	const uint32_t qmask = rxq->qmask;
@@ -923,6 +1008,8 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 		laddr = lbase;
 		laddr += 8;
+		if (flags & NIX_RX_REAS_F)
+			meta_pool = (struct rte_mempool *)rxq->meta_pool;
 	}
 
 	while (packets < nb_pkts) {
@@ -943,6 +1030,11 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 
 			cpth = ((uintptr_t)mbuf + (uint16_t)data_off);
 
+			/* Update mempool pointer for full mode pkt */
+			if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+			    !((*(uint64_t *)cpth) & BIT(15)))
+				mbuf->pool = meta_pool;
+
 			mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr,
 						       &loff, mbuf, data_off,
 						       flags, mbuf_init);
@@ -1047,6 +1139,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
 	uint8_t loff = 0, lnum = 0, shft = 0;
+	struct rte_mempool *meta_pool = NULL;
 	uint8x16_t f0, f1, f2, f3;
 	uint16_t lmt_id, d_off;
 	uint64_t lbase, laddr;
@@ -1099,6 +1192,9 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			/* Get SA Base from lookup tbl using port_id */
 			port = mbuf_initializer >> 48;
 			sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
+			if (flags & NIX_RX_REAS_F)
+				meta_pool = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port,
+											lookup_mem);
 
 			lbase = lmt_base;
 		} else {
@@ -1106,6 +1202,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			d_off = rxq->data_off;
 			sa_base = rxq->sa_base;
 			lbase = rxq->lmt_base;
+			if (flags & NIX_RX_REAS_F)
+				meta_pool = (struct rte_mempool *)rxq->meta_pool;
 		}
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -1510,10 +1608,19 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				uint16_t len = vget_lane_u16(lens, 0);
 
 				cpth0 = (uintptr_t)mbuf0 + d_off;
+
 				/* Free meta to aura */
-				NIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff);
-				mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);
-				mbuf0 = (struct rte_mbuf *)wqe;
+				if (!(flags & NIX_RX_REAS_F) ||
+				    *(uint64_t *)cpth0 & BIT_ULL(15)) {
+					/* Free meta to aura */
+					NIX_PUSH_META_TO_FREE(mbuf0, laddr,
+							      &loff);
+					mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);
+					mbuf0 = (struct rte_mbuf *)wqe;
+				} else if (flags & NIX_RX_REAS_F) {
+					/* Update meta pool for full mode pkts */
+					mbuf0->pool = meta_pool;
+				}
 
 				/* Update pkt_len and data_len */
 				f0 = vsetq_lane_u16(len, f0, 2);
@@ -1535,10 +1642,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				uint16_t len = vget_lane_u16(lens, 1);
 
 				cpth1 = (uintptr_t)mbuf1 + d_off;
+
 				/* Free meta to aura */
-				NIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff);
-				mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);
-				mbuf1 = (struct rte_mbuf *)wqe;
+				if (!(flags & NIX_RX_REAS_F) ||
+				    *(uint64_t *)cpth1 & BIT_ULL(15)) {
+					NIX_PUSH_META_TO_FREE(mbuf1, laddr,
+							      &loff);
+					mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);
+					mbuf1 = (struct rte_mbuf *)wqe;
+				} else if (flags & NIX_RX_REAS_F) {
+					/* Update meta pool for full mode pkts */
+					mbuf1->pool = meta_pool;
+				}
 
 				/* Update pkt_len and data_len */
 				f1 = vsetq_lane_u16(len, f1, 2);
@@ -1559,10 +1674,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				uint16_t len = vget_lane_u16(lens, 2);
 
 				cpth2 = (uintptr_t)mbuf2 + d_off;
+
 				/* Free meta to aura */
-				NIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff);
-				mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);
-				mbuf2 = (struct rte_mbuf *)wqe;
+				if (!(flags & NIX_RX_REAS_F) ||
+				    *(uint64_t *)cpth2 & BIT_ULL(15)) {
+					NIX_PUSH_META_TO_FREE(mbuf2, laddr,
+							      &loff);
+					mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);
+					mbuf2 = (struct rte_mbuf *)wqe;
+				} else if (flags & NIX_RX_REAS_F) {
+					/* Update meta pool for full mode pkts */
+					mbuf2->pool = meta_pool;
+				}
 
 				/* Update pkt_len and data_len */
 				f2 = vsetq_lane_u16(len, f2, 2);
@@ -1583,10 +1706,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				uint16_t len = vget_lane_u16(lens, 3);
 
 				cpth3 = (uintptr_t)mbuf3 + d_off;
+
 				/* Free meta to aura */
-				NIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff);
-				mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);
-				mbuf3 = (struct rte_mbuf *)wqe;
+				if (!(flags & NIX_RX_REAS_F) ||
+				    *(uint64_t *)cpth3 & BIT_ULL(15)) {
+					NIX_PUSH_META_TO_FREE(mbuf3, laddr,
+							      &loff);
+					mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);
+					mbuf3 = (struct rte_mbuf *)wqe;
+				} else if (flags & NIX_RX_REAS_F) {
+					/* Update meta pool for full mode pkts */
+					mbuf3->pool = meta_pool;
+				}
 
 				/* Update pkt_len and data_len */
 				f3 = vsetq_lane_u16(len, f3, 2);
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index b4287e2864..aeffc4ac92 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -78,6 +78,7 @@ struct cn10k_eth_rxq {
 	uint64_t sa_base;
 	uint64_t lmt_base;
 	uint64_t meta_aura;
+	uintptr_t meta_pool;
 	uint16_t rq;
 	struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ed531fb277..2b9ff11a6a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -219,6 +219,9 @@ struct cnxk_eth_sec_sess {
 
 	/* Inbound session on inl dev */
 	bool inl_dev;
+
+	/* Out-Of-Place processing */
+	bool inb_oop;
 };
 
 TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
@@ -246,6 +249,12 @@ struct cnxk_eth_dev_sec_inb {
 	/* DPTR for WRITE_SA microcode op */
 	void *sa_dptr;
 
+	/* Number of oop sessions */
+	uint16_t nb_oop;
+
+	/* Reassembly enabled */
+	bool reass_en;
+
 	/* Lock to synchronize sa setup/release */
 	rte_spinlock_t lock;
 };
-- 
2.25.1


  reply	other threads:[~2023-09-21  2:18 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-09  8:56 [RFC 1/2] security: introduce out of place support for inline ingress Nithin Dabilpuram
2023-03-09  8:56 ` [RFC 2/2] test/security: add unittest for inline ingress oop Nithin Dabilpuram
2023-04-11 10:04 ` [PATCH 1/3] security: introduce out of place support for inline ingress Nithin Dabilpuram
2023-04-11 10:04   ` [PATCH 2/3] net/cnxk: support inline ingress out of place session Nithin Dabilpuram
2023-04-11 10:04   ` [PATCH 3/3] test/security: add unittest for inline ingress oop Nithin Dabilpuram
2023-04-11 18:05   ` [PATCH 1/3] security: introduce out of place support for inline ingress Stephen Hemminger
2023-04-18  8:33     ` Jerin Jacob
2023-04-24 22:41       ` Thomas Monjalon
2023-05-19  8:07         ` Jerin Jacob
2023-05-30  9:23           ` Jerin Jacob
2023-05-30 13:51             ` Thomas Monjalon
2023-05-31  9:26               ` Morten Brørup
2023-07-01  7:15   ` [PATCH] doc: announce addition of new security IPsec SA option Nithin Dabilpuram
2023-07-03 14:35     ` Akhil Goyal
2023-07-04  5:15     ` [PATCH v2] " Nithin Dabilpuram
2023-07-05 14:07       ` Jerin Jacob
2023-07-11  8:55         ` [EXT] " Akhil Goyal
2023-07-06 23:05     ` [PATCH] " Ji, Kai
2023-08-11  8:54 ` [PATCH 1/3] security: introduce out of place support for inline ingress Nithin Dabilpuram
2023-08-11  8:54   ` [PATCH 2/3] net/cnxk: support inline ingress out of place session Nithin Dabilpuram
2023-08-11  8:54   ` [PATCH 3/3] test/security: add unittest for inline ingress oop Nithin Dabilpuram
2023-09-19 19:55   ` [PATCH 1/3] security: introduce out of place support for inline ingress Akhil Goyal
2023-09-21  2:15 ` [PATCH v2 " Nithin Dabilpuram
2023-09-21  2:15   ` Nithin Dabilpuram [this message]
2023-09-21  2:15   ` [PATCH v2 3/3] test/security: add unittest for inline ingress oop Nithin Dabilpuram
2023-09-21 10:44   ` [PATCH v2 1/3] security: introduce out of place support for inline ingress Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230921021548.1196858-2-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.