All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1] ip_frag: add IPv4 fragment copy packet API
@ 2022-06-09  2:39 Huichao Cai
  2022-06-09 14:19 ` [PATCH v2] " Huichao Cai
  0 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-06-09  2:39 UTC (permalink / raw)
  To: dev; +Cc: konstantin.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool and has refcnt = 1)
offload. In order to adapt to this offload function, add this API.
Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   8 +-
 lib/ip_frag/rte_ip_frag.h            |  26 ++++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 165 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 199 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index 610a86b..e565e86 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -408,10 +408,16 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out,
+						       BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_packet(b, pkts_out,
+							   BURST,
+							   tests[i].mtu_size);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..767320b 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,32 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   The size of the array that stores the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * <errno>.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index a562424..1142b5a 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -262,3 +262,168 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   The size of the array that stores the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * <errno>.
+ */
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) ||
+	    unlikely(pkt_in->pool == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate buffer from pkt_in->pool*/
+		out_pkt = rte_pktmbuf_alloc(pkt_in->pool);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			rte_memcpy(
+			    rte_pktmbuf_mtod_offset(
+				    out_pkt, char *, out_pkt->pkt_len),
+			    rte_pktmbuf_mtod_offset(
+				    in_seg, char *, in_seg_data_pos),
+			    len);
+			out_pkt->data_len = (uint16_t)(len +
+				out_pkt->data_len);
+
+			out_pkt->pkt_len = (uint16_t)(len +
+				out_pkt->pkt_len);
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index e537224..4aa66bc 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH v2] ip_frag: add IPv4 fragment copy packet API
  2022-06-09  2:39 [PATCH v1] ip_frag: add IPv4 fragment copy packet API Huichao Cai
@ 2022-06-09 14:19 ` Huichao Cai
  2022-07-10 23:35   ` Konstantin Ananyev
  2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
  0 siblings, 2 replies; 26+ messages in thread
From: Huichao Cai @ 2022-06-09 14:19 UTC (permalink / raw)
  To: dev; +Cc: konstantin.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool and has refcnt = 1)
offload. In order to adapt to this offload function, add this API.
Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   8 +-
 lib/ip_frag/rte_ip_frag.h            |  26 ++++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 165 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 199 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index 610a86b..e565e86 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -408,10 +408,16 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out,
+						       BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_packet(b, pkts_out,
+							   BURST,
+							   tests[i].mtu_size);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..3e1a9ac 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,32 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   The size of the array that stores the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index a562424..9e050cc 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -262,3 +262,168 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   The size of the array that stores the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) ||
+	    unlikely(pkt_in->pool == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate buffer from pkt_in->pool*/
+		out_pkt = rte_pktmbuf_alloc(pkt_in->pool);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			rte_memcpy(
+			    rte_pktmbuf_mtod_offset(
+				    out_pkt, char *, out_pkt->pkt_len),
+			    rte_pktmbuf_mtod_offset(
+				    in_seg, char *, in_seg_data_pos),
+			    len);
+			out_pkt->data_len = (uint16_t)(len +
+				out_pkt->data_len);
+
+			out_pkt->pkt_len = (uint16_t)(len +
+				out_pkt->pkt_len);
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index e537224..4aa66bc 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH v2] ip_frag: add IPv4 fragment copy packet API
  2022-06-09 14:19 ` [PATCH v2] " Huichao Cai
@ 2022-07-10 23:35   ` Konstantin Ananyev
  2022-07-11  9:14     ` Konstantin Ananyev
  2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
  1 sibling, 1 reply; 26+ messages in thread
From: Konstantin Ananyev @ 2022-07-10 23:35 UTC (permalink / raw)
  To: chcchc88; +Cc: dev

 > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
 > for fast release of mbufs. When set application must guarantee that
 > per-queue all mbufs comes from the same mempool and has refcnt = 1)
 > offload. In order to adapt to this offload function, add this API.
 > Add some test data for this API.
 >
 > Signed-off-by: Huichao Cai <chcchc88@163.com>
 > ---

...

 > --- a/lib/ip_frag/rte_ip_frag.h
 > +++ b/lib/ip_frag/rte_ip_frag.h
 > @@ -179,6 +179,32 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf
 > *pkt_in,
 >               struct rte_mempool *pool_indirect);
 >
 >   /**
 > + * IPv4 fragmentation by copy.
 > + *
 > + * This function implements the fragmentation of IPv4 packets by copy.
 > + *
 > + * @param pkt_in
 > + *   The input packet.
 > + * @param pkts_out
 > + *   Array storing the output fragments.
 > + * @param nb_pkts_out
 > + *   The size of the array that stores the output fragments.
 > + * @param mtu_size
 > + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
 > IPv4
 > + *   datagrams. This value includes the size of the IPv4 header.
 > + * @return
 > + *   Upon successful completion - number of output fragments placed
 > + *   in the pkts_out array.
 > + *   Otherwise - (-1) * errno.
 > + */
 > +__rte_experimental
 > +int32_t
 > +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
 > +    struct rte_mbuf **pkts_out,
 > +    uint16_t nb_pkts_out,
 > +    uint16_t mtu_size);
 > +
 > +/**
 >    * This function implements reassembly of fragmented IPv4 packets.
 >    * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
 >    *
 > diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c
 > b/lib/ip_frag/rte_ipv4_fragmentation.c
 > index a562424..9e050cc 100644
 > --- a/lib/ip_frag/rte_ipv4_fragmentation.c
 > +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
 > @@ -262,3 +262,168 @@ static inline uint16_t
 > __create_ipopt_frag_hdr(uint8_t *iph,
 >
 >       return out_pkt_pos;
 >   }
 > +
 > +/**
 > + * IPv4 fragmentation by copy.
 > + *
 > + * This function implements the fragmentation of IPv4 packets by copy.
 > + *
 > + * @param pkt_in
 > + *   The input packet.
 > + * @param pkts_out
 > + *   Array storing the output fragments.
 > + * @param nb_pkts_out
 > + *   The size of the array that stores the output fragments.
 > + * @param mtu_size
 > + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
 > IPv4
 > + *   datagrams. This value includes the size of the IPv4 header.
 > + * @return
 > + *   Upon successful completion - number of output fragments placed
 > + *   in the pkts_out array.
 > + *   Otherwise - (-1) * errno.
 > + */
 > +int32_t
 > +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
 > +    struct rte_mbuf **pkts_out,
 > +    uint16_t nb_pkts_out,
 > +    uint16_t mtu_size)
 > +{
 > +    struct rte_mbuf *in_seg = NULL;
 > +    struct rte_ipv4_hdr *in_hdr;
 > +    uint32_t out_pkt_pos, in_seg_data_pos;
 > +    uint32_t more_in_segs;
 > +    uint16_t fragment_offset, flag_offset, frag_size, header_len;
 > +    uint16_t frag_bytes_remaining;
 > +    uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
 > +    uint16_t ipopt_len;
 > +
 > +    /*
 > +     * Formal parameter checking.
 > +     */
 > +    if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
 > +        unlikely(nb_pkts_out == 0) ||
 > +        unlikely(pkt_in->pool == NULL) ||
 > +        unlikely(mtu_size < RTE_ETHER_MIN_MTU))
 > +        return -EINVAL;
 > +
 > +    in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
 > +    header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
 > +        RTE_IPV4_IHL_MULTIPLIER;
 > +
 > +    /* Check IP header length */
 > +    if (unlikely(pkt_in->data_len < header_len) ||
 > +        unlikely(mtu_size < header_len))
 > +        return -EINVAL;
 > +
 > +    /*
 > +     * Ensure the IP payload length of all fragments is aligned to a
 > +     * multiple of 8 bytes as per RFC791 section 2.3.
 > +     */
 > +    frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
 > +                    IPV4_HDR_FO_ALIGN);
 > +
 > +    flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
 > +
 > +    /* If Don't Fragment flag is set */
 > +    if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
 > +        return -ENOTSUP;
 > +
 > +    /* Check that pkts_out is big enough to hold all fragments */
 > +    if (unlikely(frag_size * nb_pkts_out <
 > +        (uint16_t)(pkt_in->pkt_len - header_len)))
 > +        return -EINVAL;
 > +
 > +    in_seg = pkt_in;
 > +    in_seg_data_pos = header_len;
 > +    out_pkt_pos = 0;
 > +    fragment_offset = 0;
 > +
 > +    ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
 > +    if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
 > +        return -EINVAL;
 > +
 > +    more_in_segs = 1;
 > +    while (likely(more_in_segs)) {
 > +        struct rte_mbuf *out_pkt = NULL;
 > +        uint32_t more_out_segs;
 > +        struct rte_ipv4_hdr *out_hdr;
 > +
 > +        /* Allocate buffer from pkt_in->pool*/
 > +        out_pkt = rte_pktmbuf_alloc(pkt_in->pool);


Instead of implicitly assuming that output mbufs will be allocated
from pkt_in pool, it would be better to have output_pool as explicit
parameter for that function.
In a same way we have it for rte_ipv4_fragment_packet().

 > +        if (unlikely(out_pkt == NULL)) {
 > +            __free_fragments(pkts_out, out_pkt_pos);
 > +            return -ENOMEM;
 > +        }
 > +
 > +        /* Reserve space for the IP header that will be built later */
 > +        out_pkt->data_len = header_len;
 > +        out_pkt->pkt_len = header_len;
 > +        frag_bytes_remaining = frag_size;
 > +
 > +        more_out_segs = 1;
 > +        while (likely(more_out_segs && more_in_segs)) {


If I understand correctly, here you assume that out_pkt will always
be big enough to hold entire fragment, right?
But that can not always be the case and probably we shouldn't assume
that for generic function.
I suppose safest way would be either use rte_pktmbuf_copy() here 
directly or do something similar to what that function doing ourselves here.


 > +            uint32_t len;
 > +
 > +            len = frag_bytes_remaining;
 > +            if (len > (in_seg->data_len - in_seg_data_pos))
 > +                len = in_seg->data_len - in_seg_data_pos;
 > +
 > +            rte_memcpy(
 > +                rte_pktmbuf_mtod_offset(
 > +                    out_pkt, char *, out_pkt->pkt_len),
 > +                rte_pktmbuf_mtod_offset(
 > +                    in_seg, char *, in_seg_data_pos),
 > +                len);
 > +            out_pkt->data_len = (uint16_t)(len +
 > +                out_pkt->data_len);
 > +
 > +            out_pkt->pkt_len = (uint16_t)(len +
 > +                out_pkt->pkt_len);
 > +            in_seg_data_pos += len;
 > +            frag_bytes_remaining -= len;
 > +
 > +            /* Current output packet (i.e. fragment) done ? */
 > +            if (unlikely(frag_bytes_remaining == 0))
 > +                more_out_segs = 0;
 > +
 > +            /* Current input segment done ? */
 > +            if (unlikely(in_seg_data_pos == in_seg->data_len)) {
 > +                in_seg = in_seg->next;
 > +                in_seg_data_pos = 0;
 > +
 > +                if (unlikely(in_seg == NULL))
 > +                    more_in_segs = 0;
 > +            }
 > +        }
 > +
 > +        /* Build the IP header */
 > +
 > +        out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
 > +
 > +        __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
 > +            (uint16_t)out_pkt->pkt_len,
 > +            flag_offset, fragment_offset, more_in_segs);
 > +
 > +        if (unlikely((fragment_offset == 0) && (ipopt_len) &&
 > +                ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
 > +            ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
 > +                ipopt_len, ipopt_frag_hdr);
 > +            fragment_offset = (uint16_t)(fragment_offset +
 > +                out_pkt->pkt_len - header_len);
 > +            out_pkt->l3_len = header_len;
 > +
 > +            header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
 > +            in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
 > +        } else {
 > +            fragment_offset = (uint16_t)(fragment_offset +
 > +                out_pkt->pkt_len - header_len);
 > +            out_pkt->l3_len = header_len;
 > +        }
 > +
 > +        /* Write the fragment to the output list */
 > +        pkts_out[out_pkt_pos] = out_pkt;
 > +        out_pkt_pos++;
 > +    }
 > +
 > +    return out_pkt_pos;
 > +}
 > diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
 > index e537224..4aa66bc 100644
 > --- a/lib/ip_frag/version.map
 > +++ b/lib/ip_frag/version.map
 > @@ -17,4 +17,5 @@ EXPERIMENTAL {
 >       global:
 >
 >       rte_ip_frag_table_del_expired_entries;
 > +    rte_ipv4_fragment_copy_packet;
 >   };

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v2] ip_frag: add IPv4 fragment copy packet API
  2022-07-10 23:35   ` Konstantin Ananyev
@ 2022-07-11  9:14     ` Konstantin Ananyev
  2022-07-15  8:05       ` Huichao Cai
  0 siblings, 1 reply; 26+ messages in thread
From: Konstantin Ananyev @ 2022-07-11  9:14 UTC (permalink / raw)
  To: chcchc88; +Cc: dev

11/07/2022 00:35, Konstantin Ananyev пишет:
>  > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
>  > for fast release of mbufs. When set application must guarantee that
>  > per-queue all mbufs comes from the same mempool and has refcnt = 1)
>  > offload. In order to adapt to this offload function, add this API.
>  > Add some test data for this API.
>  >
>  > Signed-off-by: Huichao Cai <chcchc88@163.com>
>  > ---
> 
> ...
> 
>  > --- a/lib/ip_frag/rte_ip_frag.h
>  > +++ b/lib/ip_frag/rte_ip_frag.h
>  > @@ -179,6 +179,32 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf
>  > *pkt_in,
>  >               struct rte_mempool *pool_indirect);
>  >
>  >   /**
>  > + * IPv4 fragmentation by copy.
>  > + *
>  > + * This function implements the fragmentation of IPv4 packets by copy.
>  > + *
>  > + * @param pkt_in
>  > + *   The input packet.
>  > + * @param pkts_out
>  > + *   Array storing the output fragments.
>  > + * @param nb_pkts_out
>  > + *   The size of the array that stores the output fragments.
>  > + * @param mtu_size
>  > + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
>  > IPv4
>  > + *   datagrams. This value includes the size of the IPv4 header.
>  > + * @return
>  > + *   Upon successful completion - number of output fragments placed
>  > + *   in the pkts_out array.
>  > + *   Otherwise - (-1) * errno.
>  > + */
>  > +__rte_experimental
>  > +int32_t
>  > +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
>  > +    struct rte_mbuf **pkts_out,
>  > +    uint16_t nb_pkts_out,
>  > +    uint16_t mtu_size);
>  > +
>  > +/**
>  >    * This function implements reassembly of fragmented IPv4 packets.
>  >    * Incoming mbufs should have its l2_len/l3_len fields setup 
> correctly.
>  >    *
>  > diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c
>  > b/lib/ip_frag/rte_ipv4_fragmentation.c
>  > index a562424..9e050cc 100644
>  > --- a/lib/ip_frag/rte_ipv4_fragmentation.c
>  > +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
>  > @@ -262,3 +262,168 @@ static inline uint16_t
>  > __create_ipopt_frag_hdr(uint8_t *iph,
>  >
>  >       return out_pkt_pos;
>  >   }
>  > +
>  > +/**
>  > + * IPv4 fragmentation by copy.
>  > + *
>  > + * This function implements the fragmentation of IPv4 packets by copy.
>  > + *
>  > + * @param pkt_in
>  > + *   The input packet.
>  > + * @param pkts_out
>  > + *   Array storing the output fragments.
>  > + * @param nb_pkts_out
>  > + *   The size of the array that stores the output fragments.
>  > + * @param mtu_size
>  > + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
>  > IPv4
>  > + *   datagrams. This value includes the size of the IPv4 header.
>  > + * @return
>  > + *   Upon successful completion - number of output fragments placed
>  > + *   in the pkts_out array.
>  > + *   Otherwise - (-1) * errno.
>  > + */
>  > +int32_t
>  > +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
>  > +    struct rte_mbuf **pkts_out,
>  > +    uint16_t nb_pkts_out,
>  > +    uint16_t mtu_size)


Forgot to mention, new API has to be experimental.

>  > +{
>  > +    struct rte_mbuf *in_seg = NULL;
>  > +    struct rte_ipv4_hdr *in_hdr;
>  > +    uint32_t out_pkt_pos, in_seg_data_pos;
>  > +    uint32_t more_in_segs;
>  > +    uint16_t fragment_offset, flag_offset, frag_size, header_len;
>  > +    uint16_t frag_bytes_remaining;
>  > +    uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
>  > +    uint16_t ipopt_len;
>  > +
>  > +    /*
>  > +     * Formal parameter checking.
>  > +     */
>  > +    if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
>  > +        unlikely(nb_pkts_out == 0) ||
>  > +        unlikely(pkt_in->pool == NULL) ||
>  > +        unlikely(mtu_size < RTE_ETHER_MIN_MTU))
>  > +        return -EINVAL;
>  > +
>  > +    in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
>  > +    header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
>  > +        RTE_IPV4_IHL_MULTIPLIER;
>  > +
>  > +    /* Check IP header length */
>  > +    if (unlikely(pkt_in->data_len < header_len) ||
>  > +        unlikely(mtu_size < header_len))
>  > +        return -EINVAL;
>  > +
>  > +    /*
>  > +     * Ensure the IP payload length of all fragments is aligned to a
>  > +     * multiple of 8 bytes as per RFC791 section 2.3.
>  > +     */
>  > +    frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
>  > +                    IPV4_HDR_FO_ALIGN);
>  > +
>  > +    flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
>  > +
>  > +    /* If Don't Fragment flag is set */
>  > +    if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
>  > +        return -ENOTSUP;
>  > +
>  > +    /* Check that pkts_out is big enough to hold all fragments */
>  > +    if (unlikely(frag_size * nb_pkts_out <
>  > +        (uint16_t)(pkt_in->pkt_len - header_len)))
>  > +        return -EINVAL;
>  > +
>  > +    in_seg = pkt_in;
>  > +    in_seg_data_pos = header_len;
>  > +    out_pkt_pos = 0;
>  > +    fragment_offset = 0;
>  > +
>  > +    ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
>  > +    if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
>  > +        return -EINVAL;
>  > +
>  > +    more_in_segs = 1;
>  > +    while (likely(more_in_segs)) {
>  > +        struct rte_mbuf *out_pkt = NULL;
>  > +        uint32_t more_out_segs;
>  > +        struct rte_ipv4_hdr *out_hdr;
>  > +
>  > +        /* Allocate buffer from pkt_in->pool*/
>  > +        out_pkt = rte_pktmbuf_alloc(pkt_in->pool);
> 
> 
> Instead of implicitly assuming that output mbufs will be allocated
> from pkt_in pool, it would be better to have output_pool as explicit
> parameter for that function.
> In a same way we have it for rte_ipv4_fragment_packet().
> 
>  > +        if (unlikely(out_pkt == NULL)) {
>  > +            __free_fragments(pkts_out, out_pkt_pos);
>  > +            return -ENOMEM;
>  > +        }
>  > +
>  > +        /* Reserve space for the IP header that will be built later */
>  > +        out_pkt->data_len = header_len;
>  > +        out_pkt->pkt_len = header_len;
>  > +        frag_bytes_remaining = frag_size;
>  > +
>  > +        more_out_segs = 1;
>  > +        while (likely(more_out_segs && more_in_segs)) {
> 
> 
> If I understand correctly, here you assume that out_pkt will always
> be big enough to hold entire fragment, right?
> But that can not always be the case and probably we shouldn't assume
> that for generic function.
> I suppose safest way would be either use rte_pktmbuf_copy() here 
> directly or do something similar to what that function doing ourselves 
> here.
> 
> 
>  > +            uint32_t len;
>  > +
>  > +            len = frag_bytes_remaining;
>  > +            if (len > (in_seg->data_len - in_seg_data_pos))
>  > +                len = in_seg->data_len - in_seg_data_pos;
>  > +
>  > +            rte_memcpy(
>  > +                rte_pktmbuf_mtod_offset(
>  > +                    out_pkt, char *, out_pkt->pkt_len),
>  > +                rte_pktmbuf_mtod_offset(
>  > +                    in_seg, char *, in_seg_data_pos),
>  > +                len);
>  > +            out_pkt->data_len = (uint16_t)(len +
>  > +                out_pkt->data_len);
>  > +
>  > +            out_pkt->pkt_len = (uint16_t)(len +
>  > +                out_pkt->pkt_len);
>  > +            in_seg_data_pos += len;
>  > +            frag_bytes_remaining -= len;
>  > +
>  > +            /* Current output packet (i.e. fragment) done ? */
>  > +            if (unlikely(frag_bytes_remaining == 0))
>  > +                more_out_segs = 0;
>  > +
>  > +            /* Current input segment done ? */
>  > +            if (unlikely(in_seg_data_pos == in_seg->data_len)) {
>  > +                in_seg = in_seg->next;
>  > +                in_seg_data_pos = 0;
>  > +
>  > +                if (unlikely(in_seg == NULL))
>  > +                    more_in_segs = 0;
>  > +            }
>  > +        }
>  > +
>  > +        /* Build the IP header */
>  > +
>  > +        out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
>  > +
>  > +        __fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
>  > +            (uint16_t)out_pkt->pkt_len,
>  > +            flag_offset, fragment_offset, more_in_segs);
>  > +
>  > +        if (unlikely((fragment_offset == 0) && (ipopt_len) &&
>  > +                ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
>  > +            ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
>  > +                ipopt_len, ipopt_frag_hdr);
>  > +            fragment_offset = (uint16_t)(fragment_offset +
>  > +                out_pkt->pkt_len - header_len);
>  > +            out_pkt->l3_len = header_len;
>  > +
>  > +            header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
>  > +            in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
>  > +        } else {
>  > +            fragment_offset = (uint16_t)(fragment_offset +
>  > +                out_pkt->pkt_len - header_len);
>  > +            out_pkt->l3_len = header_len;
>  > +        }
>  > +
>  > +        /* Write the fragment to the output list */
>  > +        pkts_out[out_pkt_pos] = out_pkt;
>  > +        out_pkt_pos++;
>  > +    }
>  > +
>  > +    return out_pkt_pos;
>  > +}
>  > diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
>  > index e537224..4aa66bc 100644
>  > --- a/lib/ip_frag/version.map
>  > +++ b/lib/ip_frag/version.map
>  > @@ -17,4 +17,5 @@ EXPERIMENTAL {
>  >       global:
>  >
>  >       rte_ip_frag_table_del_expired_entries;
>  > +    rte_ipv4_fragment_copy_packet;
>  >   };


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re:Re: [PATCH v2] ip_frag: add IPv4 fragment copy packet API
  2022-07-11  9:14     ` Konstantin Ananyev
@ 2022-07-15  8:05       ` Huichao Cai
  2022-07-19  8:19         ` Konstantin Ananyev
  0 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-07-15  8:05 UTC (permalink / raw)
  To: Konstantin Ananyev; +Cc: dev

[-- Attachment #1: Type: text/plain, Size: 904 bytes --]

Hi Konstantin,


    I've been busy lately, sorry to reply to you late.


    > Instead of implicitly assuming that output mbufs will be allocated
> from pkt_in pool, it would be better to have output_pool as explicit
> parameter for that function.

> In a same way we have it for rte_ipv4_fragment_packet().


> If I understand correctly, here you assume that out_pkt will always
> be big enough to hold entire fragment, right?
> But that can not always be the case and probably we shouldn't assume
> that for generic function.
> I suppose safest way would be either use rte_pktmbuf_copy() here 
> directly or do something similar to what that function doing ourselves here.
reply: Thanks for the reminder, I will use explicit parameters and rte_pktmbuf_copy();
> Forgot to mention, new API has to be experimental.
reply: Does this mean adding _rte_experimental when declaring a function?


Huichao,Cai

[-- Attachment #2: Type: text/html, Size: 4648 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v2] ip_frag: add IPv4 fragment copy packet API
  2022-07-15  8:05       ` Huichao Cai
@ 2022-07-19  8:19         ` Konstantin Ananyev
  0 siblings, 0 replies; 26+ messages in thread
From: Konstantin Ananyev @ 2022-07-19  8:19 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev

Hi Huichao,


> 
>      I've been busy lately, sorry to reply to you late.

No worries at all.

> 
>> Instead of implicitly assuming that output mbufs will be allocated
> 
>> from pkt_in pool, it would be better to have output_pool as explicit
>> parameter for that function.
>> In a same way we have it for rte_ipv4_fragment_packet().
> 
>> If I understand correctly, here you assume that out_pkt will always
>> be big enough to hold entire fragment, right?
>> But that can not always be the case and probably we shouldn't assume
>> that for generic function.
>> I suppose safest way would be either use rte_pktmbuf_copy() here
>> directly or do something similar to what that function doing ourselves here.
> 
> reply: Thanks for the reminder, I will use explicit parameters and rte_pktmbuf_copy();
>> Forgot to mention, new API has to be experimental.
> 
> reply: Does this mean adding _rte_experimental when declaring a function?
> 

Yes, I meant _rte_experimental tag.
Thanks
Konstantin

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-06-09 14:19 ` [PATCH v2] " Huichao Cai
  2022-07-10 23:35   ` Konstantin Ananyev
@ 2022-07-22 13:01   ` Huichao Cai
  2022-07-22 14:42     ` Morten Brørup
                       ` (2 more replies)
  1 sibling, 3 replies; 26+ messages in thread
From: Huichao Cai @ 2022-07-22 13:01 UTC (permalink / raw)
  To: dev; +Cc: konstantin.v.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool and has refcnt = 1)
offload. In order to adapt to this offload function, add this API.
Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   8 +-
 lib/ip_frag/rte_ip_frag.h            |  27 +++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 208 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 243 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index ba0ffd0..bb7c4d3 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -418,10 +418,16 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_packet(b, pkts_out,
+						       BURST,
+						       tests[i].mtu_size,
+						       pkt_pool);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..c2a7e1e 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,33 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index 27a8ad2..cb15781 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -83,6 +83,48 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 	return ipopt_len;
 }
 
+static struct rte_mbuf *
+__copy_to_pktmbuf(char *src, struct rte_mbuf *m_head,
+	struct rte_mbuf *m_tail, struct rte_mempool *mp, uint32_t len)
+{
+	struct rte_mbuf *m_last, **prev;
+
+	m_last = m_tail;
+	prev = &m_last->next;
+	while (len > 0) {
+		uint32_t copy_len;
+
+		/* current buffer is full, chain a new one */
+		if (unlikely(rte_pktmbuf_tailroom(m_last) == 0)) {
+			m_last = rte_pktmbuf_alloc(mp);
+			if (unlikely(m_last == NULL))
+				return NULL;
+
+			++m_head->nb_segs;
+			*prev = m_last;
+			prev = &m_last->next;
+		}
+
+		/*
+		 * copy the min of data in len
+		 * vs space available in output (m_last)
+		 */
+		copy_len = RTE_MIN(rte_pktmbuf_tailroom(m_last), len);
+
+		/* append from seg to m_last */
+		memcpy(rte_pktmbuf_mtod_offset(m_last, char *, m_last->data_len),
+			src, copy_len);
+
+		/* update offsets and lengths */
+		m_last->data_len += copy_len;
+		m_head->pkt_len += copy_len;
+		src += copy_len;
+		len -= copy_len;
+	}
+
+	return m_last;
+}
+
 /**
  * IPv4 fragmentation.
  *
@@ -259,3 +301,169 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) ||
+	    unlikely(pool_direct == NULL) ||
+	    unlikely(pool_direct != pkt_in->pool) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL, *tail_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate direct buffer */
+		out_pkt = rte_pktmbuf_alloc(pool_direct);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+		tail_pkt = out_pkt;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			tail_pkt = __copy_to_pktmbuf(rte_pktmbuf_mtod_offset(
+					in_seg, char *, in_seg_data_pos),
+				out_pkt, tail_pkt, pool_direct, len);
+			if (unlikely(!tail_pkt)) {
+				rte_pktmbuf_free(out_pkt);
+				__free_fragments(pkts_out, out_pkt_pos);
+				return -ENOMEM;
+			}
+
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index b9c1cca..122ae8d 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
@ 2022-07-22 14:42     ` Morten Brørup
  2022-07-22 14:49     ` Stephen Hemminger
  2022-07-22 14:49     ` [PATCH v4] " Huichao Cai
  2 siblings, 0 replies; 26+ messages in thread
From: Morten Brørup @ 2022-07-22 14:42 UTC (permalink / raw)
  To: Huichao Cai, dev, konstantin.v.ananyev; +Cc: Olivier Matz

> From: Huichao Cai [mailto:chcchc88@163.com]
> Sent: Friday, 22 July 2022 15.02
> To: dev@dpdk.org
> Cc: konstantin.v.ananyev@yandex.ru
> Subject: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
> 
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool and has refcnt = 1)
> offload. In order to adapt to this offload function, add this API.
> Add some test data for this API.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
>  app/test/test_ipfrag.c               |   8 +-
>  lib/ip_frag/rte_ip_frag.h            |  27 +++++
>  lib/ip_frag/rte_ipv4_fragmentation.c | 208
> +++++++++++++++++++++++++++++++++++
>  lib/ip_frag/version.map              |   1 +
>  4 files changed, 243 insertions(+), 1 deletion(-)
> 
> diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
> index ba0ffd0..bb7c4d3 100644
> --- a/app/test/test_ipfrag.c
> +++ b/app/test/test_ipfrag.c
> @@ -418,10 +418,16 @@ static void ut_teardown(void)
>  		}
> 
>  		if (tests[i].ipv == 4)
> -			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
> +			if (i % 2)
> +				len = rte_ipv4_fragment_packet(b, pkts_out,
> BURST,
>  						       tests[i].mtu_size,
>  						       direct_pool,
>  						       indirect_pool);
> +			else
> +				len = rte_ipv4_fragment_copy_packet(b,
> pkts_out,
> +						       BURST,
> +						       tests[i].mtu_size,
> +						       pkt_pool);
>  		else if (tests[i].ipv == 6)
>  			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
>  						       tests[i].mtu_size,
> diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
> index 7d2abe1..c2a7e1e 100644
> --- a/lib/ip_frag/rte_ip_frag.h
> +++ b/lib/ip_frag/rte_ip_frag.h
> @@ -179,6 +179,33 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf
> *pkt_in,
>  			struct rte_mempool *pool_indirect);
> 
>  /**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy.
> + *
> + * @param pkt_in
> + *   The input packet.
> + * @param pkts_out
> + *   Array storing the output fragments.
> + * @param mtu_size
> + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing
> IPv4
> + *   datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + *   MBUF pool used for allocating direct buffers for the output
> fragments.
> + * @return
> + *   Upon successful completion - number of output fragments placed
> + *   in the pkts_out array.
> + *   Otherwise - (-1) * errno.
> + */
> +__rte_experimental
> +int32_t
> +rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
> +	struct rte_mbuf **pkts_out,
> +	uint16_t nb_pkts_out,
> +	uint16_t mtu_size,
> +	struct rte_mempool *pool_direct);
> +
> +/**
>   * This function implements reassembly of fragmented IPv4 packets.
>   * Incoming mbufs should have its l2_len/l3_len fields setup
> correctly.
>   *
> diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c
> b/lib/ip_frag/rte_ipv4_fragmentation.c
> index 27a8ad2..cb15781 100644
> --- a/lib/ip_frag/rte_ipv4_fragmentation.c
> +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
> @@ -83,6 +83,48 @@ static inline uint16_t
> __create_ipopt_frag_hdr(uint8_t *iph,
>  	return ipopt_len;
>  }
> 
> +static struct rte_mbuf *
> +__copy_to_pktmbuf(char *src, struct rte_mbuf *m_head,
> +	struct rte_mbuf *m_tail, struct rte_mempool *mp, uint32_t len)
> +{
> +	struct rte_mbuf *m_last, **prev;
> +
> +	m_last = m_tail;
> +	prev = &m_last->next;
> +	while (len > 0) {
> +		uint32_t copy_len;
> +
> +		/* current buffer is full, chain a new one */
> +		if (unlikely(rte_pktmbuf_tailroom(m_last) == 0)) {
> +			m_last = rte_pktmbuf_alloc(mp);
> +			if (unlikely(m_last == NULL))
> +				return NULL;
> +
> +			++m_head->nb_segs;
> +			*prev = m_last;
> +			prev = &m_last->next;
> +		}

I think that MBUF_FAST_FREE also requires non-segmented packets, although this requirement is missing in the documentation. I have asked Olivier (as MBUF maintainer) to confirm this requirement [1].

[1] http://inbox.dpdk.org/dev/98CBD80474FA8B44BF855DF32C47DC35D871C5@smartserver.smartshare.dk/

> +
> +		/*
> +		 * copy the min of data in len
> +		 * vs space available in output (m_last)
> +		 */
> +		copy_len = RTE_MIN(rte_pktmbuf_tailroom(m_last), len);
> +
> +		/* append from seg to m_last */
> +		memcpy(rte_pktmbuf_mtod_offset(m_last, char *, m_last-
> >data_len),
> +			src, copy_len);
> +
> +		/* update offsets and lengths */
> +		m_last->data_len += copy_len;
> +		m_head->pkt_len += copy_len;
> +		src += copy_len;
> +		len -= copy_len;
> +	}
> +
> +	return m_last;
> +}
> +
>  /**
>   * IPv4 fragmentation.
>   *


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
  2022-07-22 14:42     ` Morten Brørup
@ 2022-07-22 14:49     ` Stephen Hemminger
  2022-07-22 15:52       ` Morten Brørup
  2022-07-22 14:49     ` [PATCH v4] " Huichao Cai
  2 siblings, 1 reply; 26+ messages in thread
From: Stephen Hemminger @ 2022-07-22 14:49 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, konstantin.v.ananyev

On Fri, 22 Jul 2022 21:01:50 +0800
Huichao Cai <chcchc88@163.com> wrote:

> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool and has refcnt = 1)
> offload. In order to adapt to this offload function, add this API.
> Add some test data for this API.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>

The code should just be checking that refcnt == 1 directly.

There are cases where sender passes a cloned mbuf.  This is independent
of the fast free optimization.

Similar to what Linux kernel does with skb_cow().

^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH v4] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
  2022-07-22 14:42     ` Morten Brørup
  2022-07-22 14:49     ` Stephen Hemminger
@ 2022-07-22 14:49     ` Huichao Cai
  2022-07-24  4:50       ` [PATCH v5] " Huichao Cai
  2 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-07-22 14:49 UTC (permalink / raw)
  To: dev; +Cc: konstantin.v.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool and has refcnt = 1)
offload. In order to adapt to this offload function, add this API.
Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   8 +-
 lib/ip_frag/rte_ip_frag.h            |  29 +++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 208 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 245 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index ba0ffd0..8eddbe9 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -418,10 +418,16 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_packet(b, pkts_out,
+						       BURST,
+						       tests[i].mtu_size,
+						       direct_pool);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..a84e5f7 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,35 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index 27a8ad2..326c640 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -83,6 +83,48 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 	return ipopt_len;
 }
 
+static struct rte_mbuf *
+__copy_to_pktmbuf(char *src, struct rte_mbuf *m_head,
+	struct rte_mbuf *m_tail, struct rte_mempool *mp, uint32_t len)
+{
+	struct rte_mbuf *m_last, **prev;
+
+	m_last = m_tail;
+	prev = &m_last->next;
+	while (len > 0) {
+		uint32_t copy_len;
+
+		/* current buffer is full, chain a new one */
+		if (unlikely(rte_pktmbuf_tailroom(m_last) == 0)) {
+			m_last = rte_pktmbuf_alloc(mp);
+			if (unlikely(m_last == NULL))
+				return NULL;
+
+			++m_head->nb_segs;
+			*prev = m_last;
+			prev = &m_last->next;
+		}
+
+		/*
+		 * copy the min of data in len
+		 * vs space available in output (m_last)
+		 */
+		copy_len = RTE_MIN(rte_pktmbuf_tailroom(m_last), len);
+
+		/* append from seg to m_last */
+		memcpy(rte_pktmbuf_mtod_offset(m_last, char *, m_last->data_len),
+			src, copy_len);
+
+		/* update offsets and lengths */
+		m_last->data_len += copy_len;
+		m_head->pkt_len += copy_len;
+		src += copy_len;
+		len -= copy_len;
+	}
+
+	return m_last;
+}
+
 /**
  * IPv4 fragmentation.
  *
@@ -259,3 +301,169 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL, *tail_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate direct buffer */
+		out_pkt = rte_pktmbuf_alloc(pool_direct);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+		tail_pkt = out_pkt;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			tail_pkt = __copy_to_pktmbuf(rte_pktmbuf_mtod_offset(
+					in_seg, char *, in_seg_data_pos),
+				out_pkt, tail_pkt, pool_direct, len);
+			if (unlikely(!tail_pkt)) {
+				rte_pktmbuf_free(out_pkt);
+				__free_fragments(pkts_out, out_pkt_pos);
+				return -ENOMEM;
+			}
+
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index b9c1cca..122ae8d 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 14:49     ` Stephen Hemminger
@ 2022-07-22 15:52       ` Morten Brørup
  2022-07-22 15:58         ` Huichao Cai
  0 siblings, 1 reply; 26+ messages in thread
From: Morten Brørup @ 2022-07-22 15:52 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, Stephen Hemminger, konstantin.v.ananyev, Olivier Matz

> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Friday, 22 July 2022 16.49
> 
> On Fri, 22 Jul 2022 21:01:50 +0800
> Huichao Cai <chcchc88@163.com> wrote:
> 
> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> > for fast release of mbufs. When set application must guarantee that
> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
> > offload. In order to adapt to this offload function, add this API.
> > Add some test data for this API.
> >
> > Signed-off-by: Huichao Cai <chcchc88@163.com>
> 
> The code should just be checking that refcnt == 1 directly.
> 
> There are cases where sender passes a cloned mbuf.  This is independent
> of the fast free optimization.
> 
> Similar to what Linux kernel does with skb_cow().

Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.

This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re:RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 15:52       ` Morten Brørup
@ 2022-07-22 15:58         ` Huichao Cai
  2022-07-22 16:14           ` Morten Brørup
  0 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-07-22 15:58 UTC (permalink / raw)
  To: Morten Brørup
  Cc: dev, Stephen Hemminger, konstantin.v.ananyev, Olivier Matz

[-- Attachment #1: Type: text/plain, Size: 1406 bytes --]




















At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>> Sent: Friday, 22 July 2022 16.49
>> 
>> On Fri, 22 Jul 2022 21:01:50 +0800
>> Huichao Cai <chcchc88@163.com> wrote:
>> 
>> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
>> > for fast release of mbufs. When set application must guarantee that
>> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
>> > offload. In order to adapt to this offload function, add this API.
>> > Add some test data for this API.
>> >
>> > Signed-off-by: Huichao Cai <chcchc88@163.com>
>> 
>> The code should just be checking that refcnt == 1 directly.
>> 
>> There are cases where sender passes a cloned mbuf.  This is independent
>> of the fast free optimization.
>> 
>> Similar to what Linux kernel does with skb_cow().
>
>Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
>

>This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.


When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released.Does this mean that there can be mbuf segments?

[-- Attachment #2: Type: text/html, Size: 1985 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 15:58         ` Huichao Cai
@ 2022-07-22 16:14           ` Morten Brørup
  2022-07-22 22:35             ` Konstantin Ananyev
  0 siblings, 1 reply; 26+ messages in thread
From: Morten Brørup @ 2022-07-22 16:14 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, Stephen Hemminger, konstantin.v.ananyev, Olivier Matz

From: Huichao Cai [mailto:chcchc88@163.com] 
Sent: Friday, 22 July 2022 17.59

> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
> >> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >> Sent: Friday, 22 July 2022 16.49
> >> 
> >> On Fri, 22 Jul 2022 21:01:50 +0800
> >> Huichao Cai <chcchc88@163.com> wrote:
> >> 
> >> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> >> > for fast release of mbufs. When set application must guarantee that
> >> > per-queue all mbufs comes from the same mempool and has refcnt = 1)
> >> > offload. In order to adapt to this offload function, add this API.
> >> > Add some test data for this API.
> >> >
> >> > Signed-off-by: Huichao Cai <chcchc88@163.com>
> >> 
> >> The code should just be checking that refcnt == 1 directly.
> >> 
> >> There are cases where sender passes a cloned mbuf.  This is independent
> >> of the fast free optimization.
> >> 
> >> Similar to what Linux kernel does with skb_cow().
> >
> >Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
> >
> >This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
> 
> When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released. Does this mean that there can be mbuf segments?

Only if the bnxt driver also resets the segmentation fields (nb_segs and next) in those mbufs, which I suppose it does, if it supports MBUF_FAST_FREE with segmented packets.

However, other Ethernet drivers don't do that, so a generic library function cannot rely on it. These missing requirements for MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or in the drivers where MBUF_FAST_FREE only works correctly with direct and non-segmented mbufs.


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 16:14           ` Morten Brørup
@ 2022-07-22 22:35             ` Konstantin Ananyev
  2022-07-23  8:24               ` Morten Brørup
  0 siblings, 1 reply; 26+ messages in thread
From: Konstantin Ananyev @ 2022-07-22 22:35 UTC (permalink / raw)
  To: Morten Brørup, Huichao Cai; +Cc: dev, Stephen Hemminger, Olivier Matz

22/07/2022 17:14, Morten Brørup пишет:
> From: Huichao Cai [mailto:chcchc88@163.com]
> Sent: Friday, 22 July 2022 17.59
> 
>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com> wrote:
>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>> Sent: Friday, 22 July 2022 16.49
>>>>
>>>> On Fri, 22 Jul 2022 21:01:50 +0800
>>>> Huichao Cai <chcchc88@163.com> wrote:
>>>>
>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
>>>>> for fast release of mbufs. When set application must guarantee that
>>>>> per-queue all mbufs comes from the same mempool and has refcnt = 1)
>>>>> offload. In order to adapt to this offload function, add this API.
>>>>> Add some test data for this API.
>>>>>
>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
>>>>
>>>> The code should just be checking that refcnt == 1 directly.
>>>>
>>>> There are cases where sender passes a cloned mbuf.  This is independent
>>>> of the fast free optimization.
>>>>
>>>> Similar to what Linux kernel does with skb_cow().
>>>
>>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs are direct and non-segmented, although these requirements are not yet documented.
>>>
>>> This means that you should not generate segmented mbufs with this patch. I don't know what to do instead; probably fail with an appropriate errno.
>>
>> When the bnxt driver sends mbuf, it will take the mbuf segments apart and hang it to the tx_buf_ring, so there is no mbuf segments when it is released. Does this mean that there can be mbuf segments?
> 
> Only if the bnxt driver also resets the segmentation fields (nb_segs and next) in those mbufs, which I suppose it does, if it supports MBUF_FAST_FREE with segmented packets.
> 
> However, other Ethernet drivers don't do that, so a generic library function cannot rely on it. These missing requirements for MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or in the drivers where MBUF_FAST_FREE only works correctly with direct and non-segmented mbufs.
> 

I believe multi-segment packets work ok with MBUF_FAST_FREE
(as long as other requirements are met).



^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 22:35             ` Konstantin Ananyev
@ 2022-07-23  8:24               ` Morten Brørup
  2022-07-23 18:25                 ` Konstantin Ananyev
  0 siblings, 1 reply; 26+ messages in thread
From: Morten Brørup @ 2022-07-23  8:24 UTC (permalink / raw)
  To: Konstantin Ananyev, Huichao Cai
  Cc: dev, Stephen Hemminger, Olivier Matz, Yuying Zhang, Beilei Xing,
	Matan Azrad, Viacheslav Ovsiienko

+CC: i40e maintainers
+CC: mlx5 maintainers

> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> Sent: Saturday, 23 July 2022 00.35
> 
> 22/07/2022 17:14, Morten Brørup пишет:
> > From: Huichao Cai [mailto:chcchc88@163.com]
> > Sent: Friday, 22 July 2022 17.59
> >
> >> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
> wrote:
> >>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >>>> Sent: Friday, 22 July 2022 16.49
> >>>>
> >>>> On Fri, 22 Jul 2022 21:01:50 +0800
> >>>> Huichao Cai <chcchc88@163.com> wrote:
> >>>>
> >>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
> optimization
> >>>>> for fast release of mbufs. When set application must guarantee
> that
> >>>>> per-queue all mbufs comes from the same mempool and has refcnt =
> 1)
> >>>>> offload. In order to adapt to this offload function, add this
> API.
> >>>>> Add some test data for this API.
> >>>>>
> >>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
> >>>>
> >>>> The code should just be checking that refcnt == 1 directly.
> >>>>
> >>>> There are cases where sender passes a cloned mbuf.  This is
> independent
> >>>> of the fast free optimization.
> >>>>
> >>>> Similar to what Linux kernel does with skb_cow().
> >>>
> >>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs
> are direct and non-segmented, although these requirements are not yet
> documented.
> >>>
> >>> This means that you should not generate segmented mbufs with this
> patch. I don't know what to do instead; probably fail with an
> appropriate errno.
> >>
> >> When the bnxt driver sends mbuf, it will take the mbuf segments
> apart and hang it to the tx_buf_ring, so there is no mbuf segments when
> it is released. Does this mean that there can be mbuf segments?
> >
> > Only if the bnxt driver also resets the segmentation fields (nb_segs
> and next) in those mbufs, which I suppose it does, if it supports
> MBUF_FAST_FREE with segmented packets.
> >
> > However, other Ethernet drivers don't do that, so a generic library
> function cannot rely on it. These missing requirements for
> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or
> in the drivers where MBUF_FAST_FREE only works correctly with direct
> and non-segmented mbufs.
> >
> 
> I believe multi-segment packets work ok with MBUF_FAST_FREE
> (as long as other requirements are met).

Looking at the i40e and mlx5 drivers, they both seem to call rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg(). So segmented packets freed with MBUF_FAST_FREE, will be stored in the mbuf pool without m->nb_segs and m->next being reset first.

I don't have deep knowledge of these drivers, so maybe I have overlooked something.

The point of MBUF_FAST_FREE is to bypass a lot of code under certain conditions. So I believe that these two undocumented requirements should remain, so the drivers can bypass this code. Otherwise, don't use MBUF_FAST_FREE.


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-23  8:24               ` Morten Brørup
@ 2022-07-23 18:25                 ` Konstantin Ananyev
  2022-07-23 22:27                   ` Morten Brørup
  0 siblings, 1 reply; 26+ messages in thread
From: Konstantin Ananyev @ 2022-07-23 18:25 UTC (permalink / raw)
  To: Morten Brørup, Huichao Cai
  Cc: dev, Stephen Hemminger, Olivier Matz, Yuying Zhang, Beilei Xing,
	Matan Azrad, Viacheslav Ovsiienko

23/07/2022 09:24, Morten Brørup пишет:
> +CC: i40e maintainers
> +CC: mlx5 maintainers
> 
>> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
>> Sent: Saturday, 23 July 2022 00.35
>>
>> 22/07/2022 17:14, Morten Brørup пишет:
>>> From: Huichao Cai [mailto:chcchc88@163.com]
>>> Sent: Friday, 22 July 2022 17.59
>>>
>>>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
>> wrote:
>>>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>>>> Sent: Friday, 22 July 2022 16.49
>>>>>>
>>>>>> On Fri, 22 Jul 2022 21:01:50 +0800
>>>>>> Huichao Cai <chcchc88@163.com> wrote:
>>>>>>
>>>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
>> optimization
>>>>>>> for fast release of mbufs. When set application must guarantee
>> that
>>>>>>> per-queue all mbufs comes from the same mempool and has refcnt =
>> 1)
>>>>>>> offload. In order to adapt to this offload function, add this
>> API.
>>>>>>> Add some test data for this API.
>>>>>>>
>>>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
>>>>>>
>>>>>> The code should just be checking that refcnt == 1 directly.
>>>>>>
>>>>>> There are cases where sender passes a cloned mbuf.  This is
>> independent
>>>>>> of the fast free optimization.
>>>>>>
>>>>>> Similar to what Linux kernel does with skb_cow().
>>>>>
>>>>> Olivier just confirmed that MBUF_FAST_FREE requires that the mbufs
>> are direct and non-segmented, although these requirements are not yet
>> documented.
>>>>>
>>>>> This means that you should not generate segmented mbufs with this
>> patch. I don't know what to do instead; probably fail with an
>> appropriate errno.
>>>>
>>>> When the bnxt driver sends mbuf, it will take the mbuf segments
>> apart and hang it to the tx_buf_ring, so there is no mbuf segments when
>> it is released. Does this mean that there can be mbuf segments?
>>>
>>> Only if the bnxt driver also resets the segmentation fields (nb_segs
>> and next) in those mbufs, which I suppose it does, if it supports
>> MBUF_FAST_FREE with segmented packets.
>>>
>>> However, other Ethernet drivers don't do that, so a generic library
>> function cannot rely on it. These missing requirements for
>> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation, or
>> in the drivers where MBUF_FAST_FREE only works correctly with direct
>> and non-segmented mbufs.
>>>
>>
>> I believe multi-segment packets work ok with MBUF_FAST_FREE
>> (as long as other requirements are met).
> 
> Looking at the i40e and mlx5 drivers, they both seem to call rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg(). So segmented packets freed with MBUF_FAST_FREE, will be stored in the mbuf pool without m->nb_segs and m->next being reset first.
> 
> I don't have deep knowledge of these drivers, so maybe I have overlooked something.
> 
> The point of MBUF_FAST_FREE is to bypass a lot of code under certain conditions. So I believe that these two undocumented requirements should remain, so the drivers can bypass this code. Otherwise, don't use MBUF_FAST_FREE.
> 

Actually, after another look, I think you and Olivier are right -
multi-seg packets should not be used together with MBUF_FAST_FREE.
I forgot that mbuf_prefree() is responsible to reset both 'next'
and 'nb_segs' fields of the mbuf.
It might keep working for some simple forwarding app (like l3fwd),
as most PMDs reset these fields at RX path anyway, but that's just a
coincidence we shouldn't rely on.
We probably need to update l3fwd (and other examples) to dis-allow
MBUF_FAST_FREE when TX_OFFLOAD_MULTI_SEGS is selected.

Konstantin

^ permalink raw reply	[flat|nested] 26+ messages in thread

* RE: [PATCH v3] ip_frag: add IPv4 fragment copy packet API
  2022-07-23 18:25                 ` Konstantin Ananyev
@ 2022-07-23 22:27                   ` Morten Brørup
  0 siblings, 0 replies; 26+ messages in thread
From: Morten Brørup @ 2022-07-23 22:27 UTC (permalink / raw)
  To: Konstantin Ananyev, Huichao Cai
  Cc: dev, Stephen Hemminger, Olivier Matz, Yuying Zhang, Beilei Xing,
	Matan Azrad, Viacheslav Ovsiienko

> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> Sent: Saturday, 23 July 2022 20.25
> 
> 23/07/2022 09:24, Morten Brørup пишет:
> > +CC: i40e maintainers
> > +CC: mlx5 maintainers
> >
> >> From: Konstantin Ananyev [mailto:konstantin.v.ananyev@yandex.ru]
> >> Sent: Saturday, 23 July 2022 00.35
> >>
> >> 22/07/2022 17:14, Morten Brørup пишет:
> >>> From: Huichao Cai [mailto:chcchc88@163.com]
> >>> Sent: Friday, 22 July 2022 17.59
> >>>
> >>>> At 2022-07-22 23:52:28, "Morten Brørup" <mb@smartsharesystems.com>
> >> wrote:
> >>>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >>>>>> Sent: Friday, 22 July 2022 16.49
> >>>>>>
> >>>>>> On Fri, 22 Jul 2022 21:01:50 +0800
> >>>>>> Huichao Cai <chcchc88@163.com> wrote:
> >>>>>>
> >>>>>>> Some NIC drivers support MBUF_FAST_FREE(Device supports
> >> optimization
> >>>>>>> for fast release of mbufs. When set application must guarantee
> >> that
> >>>>>>> per-queue all mbufs comes from the same mempool and has refcnt
> =
> >> 1)
> >>>>>>> offload. In order to adapt to this offload function, add this
> >> API.
> >>>>>>> Add some test data for this API.
> >>>>>>>
> >>>>>>> Signed-off-by: Huichao Cai <chcchc88@163.com>
> >>>>>>
> >>>>>> The code should just be checking that refcnt == 1 directly.
> >>>>>>
> >>>>>> There are cases where sender passes a cloned mbuf.  This is
> >> independent
> >>>>>> of the fast free optimization.
> >>>>>>
> >>>>>> Similar to what Linux kernel does with skb_cow().
> >>>>>
> >>>>> Olivier just confirmed that MBUF_FAST_FREE requires that the
> mbufs
> >> are direct and non-segmented, although these requirements are not
> yet
> >> documented.
> >>>>>
> >>>>> This means that you should not generate segmented mbufs with this
> >> patch. I don't know what to do instead; probably fail with an
> >> appropriate errno.
> >>>>
> >>>> When the bnxt driver sends mbuf, it will take the mbuf segments
> >> apart and hang it to the tx_buf_ring, so there is no mbuf segments
> when
> >> it is released. Does this mean that there can be mbuf segments?
> >>>
> >>> Only if the bnxt driver also resets the segmentation fields
> (nb_segs
> >> and next) in those mbufs, which I suppose it does, if it supports
> >> MBUF_FAST_FREE with segmented packets.
> >>>
> >>> However, other Ethernet drivers don't do that, so a generic library
> >> function cannot rely on it. These missing requirements for
> >> MBUF_FAST_FREE is a bug, either in the MBUF_FAST_FREE documentation,
> or
> >> in the drivers where MBUF_FAST_FREE only works correctly with direct
> >> and non-segmented mbufs.
> >>>
> >>
> >> I believe multi-segment packets work ok with MBUF_FAST_FREE
> >> (as long as other requirements are met).
> >
> > Looking at the i40e and mlx5 drivers, they both seem to call
> rte_mempool_put_bulk() without first calling rte_pktmbuf_prefree_seg().
> So segmented packets freed with MBUF_FAST_FREE, will be stored in the
> mbuf pool without m->nb_segs and m->next being reset first.
> >
> > I don't have deep knowledge of these drivers, so maybe I have
> overlooked something.
> >
> > The point of MBUF_FAST_FREE is to bypass a lot of code under certain
> conditions. So I believe that these two undocumented requirements
> should remain, so the drivers can bypass this code. Otherwise, don't
> use MBUF_FAST_FREE.
> >
> 
> Actually, after another look, I think you and Olivier are right -
> multi-seg packets should not be used together with MBUF_FAST_FREE.
> I forgot that mbuf_prefree() is responsible to reset both 'next'
> and 'nb_segs' fields of the mbuf.
> It might keep working for some simple forwarding app (like l3fwd),
> as most PMDs reset these fields at RX path anyway, but that's just a
> coincidence we shouldn't rely on.

I hope the PMDs don't reset these fields in their RX path, unless they are creating multi-seg packets and therefore must. It might cause an extra cache miss per packet, if the PMD unnecessarily sets m->next, which is in the second cache line of the mbuf.

Or perhaps everyone has forgotten about this RX/TX split of the first/second cache line of the mbufs, because all tests are based on run-to-completion, where the second cache line will be written shortly afterwards anyway. :-(

> We probably need to update l3fwd (and other examples) to dis-allow
> MBUF_FAST_FREE when TX_OFFLOAD_MULTI_SEGS is selected.

+1

> 
> Konstantin


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH v5] ip_frag: add IPv4 fragment copy packet API
  2022-07-22 14:49     ` [PATCH v4] " Huichao Cai
@ 2022-07-24  4:50       ` Huichao Cai
  2022-07-24  8:10         ` [PATCH v6] " Huichao Cai
  0 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-07-24  4:50 UTC (permalink / raw)
  To: dev; +Cc: konstantin.v.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
and non-segmented.)offload. In order to adapt to this offload function,
add this API. Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   9 +-
 lib/ip_frag/rte_ip_frag.h            |  34 +++++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 174 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 217 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index ba0ffd0..88cc4cd 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -418,10 +418,17 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_nonseg_packet(b,
+						       pkts_out,
+						       BURST,
+						       tests[i].mtu_size,
+						       direct_pool);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..4a2b150 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index 27a8ad2..4009d26 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -259,3 +259,177 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate direct buffer */
+		out_pkt = rte_pktmbuf_alloc(pool_direct);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+		if (unlikely(out_pkt->buf_len < frag_size)) {
+			rte_pktmbuf_free(out_pkt);
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -EINVAL;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
+					out_pkt->data_len),
+				rte_pktmbuf_mtod_offset(in_seg, char *,
+					in_seg_data_pos),
+				len);
+
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+			out_pkt->data_len += len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_pkt->pkt_len = out_pkt->data_len;
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index b9c1cca..8aad839 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_nonseg_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* [PATCH v6] ip_frag: add IPv4 fragment copy packet API
  2022-07-24  4:50       ` [PATCH v5] " Huichao Cai
@ 2022-07-24  8:10         ` Huichao Cai
  2022-07-25 15:42           ` Stephen Hemminger
                             ` (2 more replies)
  0 siblings, 3 replies; 26+ messages in thread
From: Huichao Cai @ 2022-07-24  8:10 UTC (permalink / raw)
  To: dev; +Cc: konstantin.v.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
and non-segmented.)offload. In order to adapt to this offload function,
add this API. Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   9 +-
 lib/ip_frag/rte_ip_frag.h            |  34 +++++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 175 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 218 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index ba0ffd0..88cc4cd 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -418,10 +418,17 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_nonseg_packet(b,
+						       pkts_out,
+						       BURST,
+						       tests[i].mtu_size,
+						       direct_pool);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..4a2b150 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index 27a8ad2..e6ec408 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -259,3 +259,178 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate direct buffer */
+		out_pkt = rte_pktmbuf_alloc(pool_direct);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+		if (unlikely(out_pkt->buf_len - rte_pktmbuf_headroom(out_pkt) <
+				frag_size)) {
+			rte_pktmbuf_free(out_pkt);
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -EINVAL;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
+					out_pkt->data_len),
+				rte_pktmbuf_mtod_offset(in_seg, char *,
+					in_seg_data_pos),
+				len);
+
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+			out_pkt->data_len += len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_pkt->pkt_len = out_pkt->data_len;
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index b9c1cca..8aad839 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_nonseg_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH v6] ip_frag: add IPv4 fragment copy packet API
  2022-07-24  8:10         ` [PATCH v6] " Huichao Cai
@ 2022-07-25 15:42           ` Stephen Hemminger
  2022-07-26  1:22             ` Huichao Cai
  2022-08-07 11:45           ` Konstantin Ananyev
  2022-08-08  1:48           ` [PATCH v7] " Huichao Cai
  2 siblings, 1 reply; 26+ messages in thread
From: Stephen Hemminger @ 2022-07-25 15:42 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, konstantin.v.ananyev

On Sun, 24 Jul 2022 16:10:03 +0800
Huichao Cai <chcchc88@163.com> wrote:

> +
> +	/*
> +	 * Formal parameter checking.
> +	 */
> +	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
> +	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
> +	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
> +		return -EINVAL;
> +
> +	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
> +	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
> +	    RTE_IPV4_IHL_MULTIPLIER;
> +
> +	/* Check IP header length */
> +	if (unlikely(pkt_in->data_len < header_len) ||
> +	    unlikely(mtu_size < header_len))
> +		return -EINVAL;
> +

My suspicions are all this input parameter checking probably costs more
than any performance gain of having a non-segmented fast path.

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re:Re: [PATCH v6] ip_frag: add IPv4 fragment copy packet API
  2022-07-25 15:42           ` Stephen Hemminger
@ 2022-07-26  1:22             ` Huichao Cai
  2022-08-07 11:49               ` Konstantin Ananyev
  0 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-07-26  1:22 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: dev, konstantin.v.ananyev

[-- Attachment #1: Type: text/plain, Size: 1143 bytes --]




















At 2022-07-25 23:42:06, "Stephen Hemminger" <stephen@networkplumber.org> wrote:
>On Sun, 24 Jul 2022 16:10:03 +0800
>Huichao Cai <chcchc88@163.com> wrote:
>
>> +
>> +	/*
>> +	 * Formal parameter checking.
>> +	 */
>> +	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
>> +	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
>> +	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
>> +		return -EINVAL;
>> +
>> +	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
>> +	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
>> +	    RTE_IPV4_IHL_MULTIPLIER;
>> +
>> +	/* Check IP header length */
>> +	if (unlikely(pkt_in->data_len < header_len) ||
>> +	    unlikely(mtu_size < header_len))
>> +		return -EINVAL;
>> +
>
>My suspicions are all this input parameter checking probably costs more

>than any performance gain of having a non-segmented fast path.
These checks are consistent with the rte_ipv4_fragment_packet function.
I think these have been tested for performance.If these checks do affect performance, 
perhaps the legitimacy of the variable is better guaranteed by the caller

[-- Attachment #2: Type: text/html, Size: 1791 bytes --]

^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v6] ip_frag: add IPv4 fragment copy packet API
  2022-07-24  8:10         ` [PATCH v6] " Huichao Cai
  2022-07-25 15:42           ` Stephen Hemminger
@ 2022-08-07 11:45           ` Konstantin Ananyev
  2022-08-08  1:48           ` [PATCH v7] " Huichao Cai
  2 siblings, 0 replies; 26+ messages in thread
From: Konstantin Ananyev @ 2022-08-07 11:45 UTC (permalink / raw)
  To: Huichao Cai, dev

24/07/2022 09:10, Huichao Cai пишет:
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
> and non-segmented.)offload. In order to adapt to this offload function,
> add this API. Add some test data for this API.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---
>   app/test/test_ipfrag.c               |   9 +-
>   lib/ip_frag/rte_ip_frag.h            |  34 +++++++
>   lib/ip_frag/rte_ipv4_fragmentation.c | 175 +++++++++++++++++++++++++++++++++++
>   lib/ip_frag/version.map              |   1 +
>   4 files changed, 218 insertions(+), 1 deletion(-)
> 
> diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
> index ba0ffd0..88cc4cd 100644
> --- a/app/test/test_ipfrag.c
> +++ b/app/test/test_ipfrag.c
> @@ -418,10 +418,17 @@ static void ut_teardown(void)
>   		}
>   
>   		if (tests[i].ipv == 4)
> -			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
> +			if (i % 2)
> +				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
>   						       tests[i].mtu_size,
>   						       direct_pool,
>   						       indirect_pool);
> +			else
> +				len = rte_ipv4_fragment_copy_nonseg_packet(b,
> +						       pkts_out,
> +						       BURST,
> +						       tests[i].mtu_size,
> +						       direct_pool);
>   		else if (tests[i].ipv == 6)
>   			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
>   						       tests[i].mtu_size,
> diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
> index 7d2abe1..4a2b150 100644
> --- a/lib/ip_frag/rte_ip_frag.h
> +++ b/lib/ip_frag/rte_ip_frag.h
> @@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
>   			struct rte_mempool *pool_indirect);
>   
>   /**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy
> + * non-segmented mbuf.
> + * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
> + * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
> + * When set application must guarantee that per-queue all mbufs comes from
> + * the same mempool,has refcnt = 1,direct and non-segmented.
> + *
> + * @param pkt_in
> + *   The input packet.
> + * @param pkts_out
> + *   Array storing the output fragments.
> + * @param nb_pkts_out
> + *   Number of fragments.
> + * @param mtu_size
> + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
> + *   datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + *   MBUF pool used for allocating direct buffers for the output fragments.
> + * @return
> + *   Upon successful completion - number of output fragments placed
> + *   in the pkts_out array.
> + *   Otherwise - (-1) * errno.
> + */
> +__rte_experimental
> +int32_t
> +rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
> +	struct rte_mbuf **pkts_out,
> +	uint16_t nb_pkts_out,
> +	uint16_t mtu_size,
> +	struct rte_mempool *pool_direct);
> +
> +/**
>    * This function implements reassembly of fragmented IPv4 packets.
>    * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
>    *
> diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
> index 27a8ad2..e6ec408 100644
> --- a/lib/ip_frag/rte_ipv4_fragmentation.c
> +++ b/lib/ip_frag/rte_ipv4_fragmentation.c
> @@ -259,3 +259,178 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
>   
>   	return out_pkt_pos;
>   }
> +
> +/**
> + * IPv4 fragmentation by copy.
> + *
> + * This function implements the fragmentation of IPv4 packets by copy
> + * non-segmented mbuf.
> + * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
> + * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
> + * When set application must guarantee that per-queue all mbufs comes from
> + * the same mempool,has refcnt = 1,direct and non-segmented.
> + *
> + * @param pkt_in
> + *   The input packet.
> + * @param pkts_out
> + *   Array storing the output fragments.
> + * @param nb_pkts_out
> + *   Number of fragments.
> + * @param mtu_size
> + *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
> + *   datagrams. This value includes the size of the IPv4 header.
> + * @param pool_direct
> + *   MBUF pool used for allocating direct buffers for the output fragments.
> + * @return
> + *   Upon successful completion - number of output fragments placed
> + *   in the pkts_out array.
> + *   Otherwise - (-1) * errno.
> + */
> +int32_t
> +rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
> +	struct rte_mbuf **pkts_out,
> +	uint16_t nb_pkts_out,
> +	uint16_t mtu_size,
> +	struct rte_mempool *pool_direct)
> +{
> +	struct rte_mbuf *in_seg = NULL;
> +	struct rte_ipv4_hdr *in_hdr;
> +	uint32_t out_pkt_pos, in_seg_data_pos;
> +	uint32_t more_in_segs;
> +	uint16_t fragment_offset, flag_offset, frag_size, header_len;
> +	uint16_t frag_bytes_remaining;
> +	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
> +	uint16_t ipopt_len;
> +
> +	/*
> +	 * Formal parameter checking.
> +	 */
> +	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
> +	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
> +	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
> +		return -EINVAL;
> +
> +	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
> +	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
> +	    RTE_IPV4_IHL_MULTIPLIER;
> +
> +	/* Check IP header length */
> +	if (unlikely(pkt_in->data_len < header_len) ||
> +	    unlikely(mtu_size < header_len))
> +		return -EINVAL;
> +
> +	/*
> +	 * Ensure the IP payload length of all fragments is aligned to a
> +	 * multiple of 8 bytes as per RFC791 section 2.3.
> +	 */
> +	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
> +				    IPV4_HDR_FO_ALIGN);
> +
> +	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
> +
> +	/* If Don't Fragment flag is set */
> +	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
> +		return -ENOTSUP;
> +
> +	/* Check that pkts_out is big enough to hold all fragments */
> +	if (unlikely(frag_size * nb_pkts_out <
> +	    (uint16_t)(pkt_in->pkt_len - header_len)))
> +		return -EINVAL;
> +
> +	in_seg = pkt_in;
> +	in_seg_data_pos = header_len;
> +	out_pkt_pos = 0;
> +	fragment_offset = 0;
> +
> +	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
> +	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
> +		return -EINVAL;
> +
> +	more_in_segs = 1;
> +	while (likely(more_in_segs)) {
> +		struct rte_mbuf *out_pkt = NULL;
> +		uint32_t more_out_segs;
> +		struct rte_ipv4_hdr *out_hdr;
> +
> +		/* Allocate direct buffer */
> +		out_pkt = rte_pktmbuf_alloc(pool_direct);
> +		if (unlikely(out_pkt == NULL)) {
> +			__free_fragments(pkts_out, out_pkt_pos);
> +			return -ENOMEM;
> +		}
> +		if (unlikely(out_pkt->buf_len - rte_pktmbuf_headroom(out_pkt) <
> +				frag_size)) {

As a nit, might be better;
if (rte_pktmbuf_tailroom(out_pkt) < frag_size) {...}

Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>

> +			rte_pktmbuf_free(out_pkt);
> +			__free_fragments(pkts_out, out_pkt_pos);
> +			return -EINVAL;
> +		}
> +
> +		/* Reserve space for the IP header that will be built later */
> +		out_pkt->data_len = header_len;
> +		out_pkt->pkt_len = header_len;
> +		frag_bytes_remaining = frag_size;
> +
> +		more_out_segs = 1;
> +		while (likely(more_out_segs && more_in_segs)) {
> +			uint32_t len;
> +
> +			len = frag_bytes_remaining;
> +			if (len > (in_seg->data_len - in_seg_data_pos))
> +				len = in_seg->data_len - in_seg_data_pos;
> +
> +			memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
> +					out_pkt->data_len),
> +				rte_pktmbuf_mtod_offset(in_seg, char *,
> +					in_seg_data_pos),
> +				len);
> +
> +			in_seg_data_pos += len;
> +			frag_bytes_remaining -= len;
> +			out_pkt->data_len += len;
> +
> +			/* Current output packet (i.e. fragment) done ? */
> +			if (unlikely(frag_bytes_remaining == 0))
> +				more_out_segs = 0;
> +
> +			/* Current input segment done ? */
> +			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
> +				in_seg = in_seg->next;
> +				in_seg_data_pos = 0;
> +
> +				if (unlikely(in_seg == NULL))
> +					more_in_segs = 0;
> +			}
> +		}
> +
> +		/* Build the IP header */
> +
> +		out_pkt->pkt_len = out_pkt->data_len;
> +		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
> +
> +		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
> +		    (uint16_t)out_pkt->pkt_len,
> +		    flag_offset, fragment_offset, more_in_segs);
> +
> +		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
> +			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
> +			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
> +				ipopt_len, ipopt_frag_hdr);
> +			fragment_offset = (uint16_t)(fragment_offset +
> +				out_pkt->pkt_len - header_len);
> +			out_pkt->l3_len = header_len;
> +
> +			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
> +			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
> +		} else {
> +			fragment_offset = (uint16_t)(fragment_offset +
> +				out_pkt->pkt_len - header_len);
> +			out_pkt->l3_len = header_len;
> +		}
> +
> +		/* Write the fragment to the output list */
> +		pkts_out[out_pkt_pos] = out_pkt;
> +		out_pkt_pos++;
> +	}
> +
> +	return out_pkt_pos;
> +}
> diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
> index b9c1cca..8aad839 100644
> --- a/lib/ip_frag/version.map
> +++ b/lib/ip_frag/version.map
> @@ -17,4 +17,5 @@ EXPERIMENTAL {
>   	global:
>   
>   	rte_ip_frag_table_del_expired_entries;
> +	rte_ipv4_fragment_copy_nonseg_packet;
>   };


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v6] ip_frag: add IPv4 fragment copy packet API
  2022-07-26  1:22             ` Huichao Cai
@ 2022-08-07 11:49               ` Konstantin Ananyev
  0 siblings, 0 replies; 26+ messages in thread
From: Konstantin Ananyev @ 2022-08-07 11:49 UTC (permalink / raw)
  To: Huichao Cai, Stephen Hemminger; +Cc: dev


> 
> At 2022-07-25 23:42:06, "Stephen Hemminger" <stephen@networkplumber.org> wrote:
>>On Sun, 24 Jul 2022 16:10:03 +0800
>>Huichao Cai <chcchc88@163.com> wrote:
>>
>>> +
>>> +	/*
>>> +	 * Formal parameter checking.
>>> +	 */
>>> +	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
>>> +	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
>>> +	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
>>> +		return -EINVAL;
>>> +
>>> +	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
>>> +	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
>>> +	    RTE_IPV4_IHL_MULTIPLIER;
>>> +
>>> +	/* Check IP header length */
>>> +	if (unlikely(pkt_in->data_len < header_len) ||
>>> +	    unlikely(mtu_size < header_len))
>>> +		return -EINVAL;
>>> +
>>
>>My suspicions are all this input parameter checking probably costs more
>  >than any performance gain of having a non-segmented fast path.

I think checks are not that expensive.
My guess - actual copying will be the main cycles eater here.
Though if percentage of packets that need to be fragmented is tiny,
might be it is still worth it.
Though yes, I still think better would be not to use MBUF_FAST_FREE at 
all, but we are where we are.


> These checks are consistent with the rte_ipv4_fragment_packet function.
> I think these have been tested for performance.If these checks do affect 
> performance,
> perhaps the legitimacy of the variable is better guaranteed by the caller


^ permalink raw reply	[flat|nested] 26+ messages in thread

* [PATCH v7] ip_frag: add IPv4 fragment copy packet API
  2022-07-24  8:10         ` [PATCH v6] " Huichao Cai
  2022-07-25 15:42           ` Stephen Hemminger
  2022-08-07 11:45           ` Konstantin Ananyev
@ 2022-08-08  1:48           ` Huichao Cai
  2022-08-08 22:29             ` Konstantin Ananyev
  2 siblings, 1 reply; 26+ messages in thread
From: Huichao Cai @ 2022-08-08  1:48 UTC (permalink / raw)
  To: dev; +Cc: konstantin.v.ananyev

Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
for fast release of mbufs. When set application must guarantee that
per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
and non-segmented.)offload. In order to adapt to this offload function,
add this API. Add some test data for this API.

Signed-off-by: Huichao Cai <chcchc88@163.com>
---
 app/test/test_ipfrag.c               |   9 +-
 lib/ip_frag/rte_ip_frag.h            |  34 +++++++
 lib/ip_frag/rte_ipv4_fragmentation.c | 174 +++++++++++++++++++++++++++++++++++
 lib/ip_frag/version.map              |   1 +
 4 files changed, 217 insertions(+), 1 deletion(-)

diff --git a/app/test/test_ipfrag.c b/app/test/test_ipfrag.c
index ba0ffd0..88cc4cd 100644
--- a/app/test/test_ipfrag.c
+++ b/app/test/test_ipfrag.c
@@ -418,10 +418,17 @@ static void ut_teardown(void)
 		}
 
 		if (tests[i].ipv == 4)
-			len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
+			if (i % 2)
+				len = rte_ipv4_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
 						       direct_pool,
 						       indirect_pool);
+			else
+				len = rte_ipv4_fragment_copy_nonseg_packet(b,
+						       pkts_out,
+						       BURST,
+						       tests[i].mtu_size,
+						       direct_pool);
 		else if (tests[i].ipv == 6)
 			len = rte_ipv6_fragment_packet(b, pkts_out, BURST,
 						       tests[i].mtu_size,
diff --git a/lib/ip_frag/rte_ip_frag.h b/lib/ip_frag/rte_ip_frag.h
index 7d2abe1..4a2b150 100644
--- a/lib/ip_frag/rte_ip_frag.h
+++ b/lib/ip_frag/rte_ip_frag.h
@@ -179,6 +179,40 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
 			struct rte_mempool *pool_indirect);
 
 /**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+__rte_experimental
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct);
+
+/**
  * This function implements reassembly of fragmented IPv4 packets.
  * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
  *
diff --git a/lib/ip_frag/rte_ipv4_fragmentation.c b/lib/ip_frag/rte_ipv4_fragmentation.c
index 27a8ad2..ef5e1c5 100644
--- a/lib/ip_frag/rte_ipv4_fragmentation.c
+++ b/lib/ip_frag/rte_ipv4_fragmentation.c
@@ -259,3 +259,177 @@ static inline uint16_t __create_ipopt_frag_hdr(uint8_t *iph,
 
 	return out_pkt_pos;
 }
+
+/**
+ * IPv4 fragmentation by copy.
+ *
+ * This function implements the fragmentation of IPv4 packets by copy
+ * non-segmented mbuf.
+ * This function is mainly used to adapt TX MBUF_FAST_FREE offload.
+ * MBUF_FAST_FREE: Device supports optimization for fast release of mbufs.
+ * When set application must guarantee that per-queue all mbufs comes from
+ * the same mempool,has refcnt = 1,direct and non-segmented.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param nb_pkts_out
+ *   Number of fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * errno.
+ */
+int32_t
+rte_ipv4_fragment_copy_nonseg_packet(struct rte_mbuf *pkt_in,
+	struct rte_mbuf **pkts_out,
+	uint16_t nb_pkts_out,
+	uint16_t mtu_size,
+	struct rte_mempool *pool_direct)
+{
+	struct rte_mbuf *in_seg = NULL;
+	struct rte_ipv4_hdr *in_hdr;
+	uint32_t out_pkt_pos, in_seg_data_pos;
+	uint32_t more_in_segs;
+	uint16_t fragment_offset, flag_offset, frag_size, header_len;
+	uint16_t frag_bytes_remaining;
+	uint8_t ipopt_frag_hdr[IPV4_HDR_MAX_LEN];
+	uint16_t ipopt_len;
+
+	/*
+	 * Formal parameter checking.
+	 */
+	if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+	    unlikely(nb_pkts_out == 0) || unlikely(pool_direct == NULL) ||
+	    unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+		return -EINVAL;
+
+	in_hdr = rte_pktmbuf_mtod(pkt_in, struct rte_ipv4_hdr *);
+	header_len = (in_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
+	    RTE_IPV4_IHL_MULTIPLIER;
+
+	/* Check IP header length */
+	if (unlikely(pkt_in->data_len < header_len) ||
+	    unlikely(mtu_size < header_len))
+		return -EINVAL;
+
+	/*
+	 * Ensure the IP payload length of all fragments is aligned to a
+	 * multiple of 8 bytes as per RFC791 section 2.3.
+	 */
+	frag_size = RTE_ALIGN_FLOOR((mtu_size - header_len),
+				    IPV4_HDR_FO_ALIGN);
+
+	flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+	/* If Don't Fragment flag is set */
+	if (unlikely((flag_offset & IPV4_HDR_DF_MASK) != 0))
+		return -ENOTSUP;
+
+	/* Check that pkts_out is big enough to hold all fragments */
+	if (unlikely(frag_size * nb_pkts_out <
+	    (uint16_t)(pkt_in->pkt_len - header_len)))
+		return -EINVAL;
+
+	in_seg = pkt_in;
+	in_seg_data_pos = header_len;
+	out_pkt_pos = 0;
+	fragment_offset = 0;
+
+	ipopt_len = header_len - sizeof(struct rte_ipv4_hdr);
+	if (unlikely(ipopt_len > RTE_IPV4_HDR_OPT_MAX_LEN))
+		return -EINVAL;
+
+	more_in_segs = 1;
+	while (likely(more_in_segs)) {
+		struct rte_mbuf *out_pkt = NULL;
+		uint32_t more_out_segs;
+		struct rte_ipv4_hdr *out_hdr;
+
+		/* Allocate direct buffer */
+		out_pkt = rte_pktmbuf_alloc(pool_direct);
+		if (unlikely(out_pkt == NULL)) {
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -ENOMEM;
+		}
+		if (unlikely(rte_pktmbuf_tailroom(out_pkt) < frag_size)) {
+			rte_pktmbuf_free(out_pkt);
+			__free_fragments(pkts_out, out_pkt_pos);
+			return -EINVAL;
+		}
+
+		/* Reserve space for the IP header that will be built later */
+		out_pkt->data_len = header_len;
+		out_pkt->pkt_len = header_len;
+		frag_bytes_remaining = frag_size;
+
+		more_out_segs = 1;
+		while (likely(more_out_segs && more_in_segs)) {
+			uint32_t len;
+
+			len = frag_bytes_remaining;
+			if (len > (in_seg->data_len - in_seg_data_pos))
+				len = in_seg->data_len - in_seg_data_pos;
+
+			memcpy(rte_pktmbuf_mtod_offset(out_pkt, char *,
+					out_pkt->data_len),
+				rte_pktmbuf_mtod_offset(in_seg, char *,
+					in_seg_data_pos),
+				len);
+
+			in_seg_data_pos += len;
+			frag_bytes_remaining -= len;
+			out_pkt->data_len += len;
+
+			/* Current output packet (i.e. fragment) done ? */
+			if (unlikely(frag_bytes_remaining == 0))
+				more_out_segs = 0;
+
+			/* Current input segment done ? */
+			if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+				in_seg = in_seg->next;
+				in_seg_data_pos = 0;
+
+				if (unlikely(in_seg == NULL))
+					more_in_segs = 0;
+			}
+		}
+
+		/* Build the IP header */
+
+		out_pkt->pkt_len = out_pkt->data_len;
+		out_hdr = rte_pktmbuf_mtod(out_pkt, struct rte_ipv4_hdr *);
+
+		__fill_ipv4hdr_frag(out_hdr, in_hdr, header_len,
+		    (uint16_t)out_pkt->pkt_len,
+		    flag_offset, fragment_offset, more_in_segs);
+
+		if (unlikely((fragment_offset == 0) && (ipopt_len) &&
+			    ((flag_offset & RTE_IPV4_HDR_OFFSET_MASK) == 0))) {
+			ipopt_len = __create_ipopt_frag_hdr((uint8_t *)in_hdr,
+				ipopt_len, ipopt_frag_hdr);
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+
+			header_len = sizeof(struct rte_ipv4_hdr) + ipopt_len;
+			in_hdr = (struct rte_ipv4_hdr *)ipopt_frag_hdr;
+		} else {
+			fragment_offset = (uint16_t)(fragment_offset +
+				out_pkt->pkt_len - header_len);
+			out_pkt->l3_len = header_len;
+		}
+
+		/* Write the fragment to the output list */
+		pkts_out[out_pkt_pos] = out_pkt;
+		out_pkt_pos++;
+	}
+
+	return out_pkt_pos;
+}
diff --git a/lib/ip_frag/version.map b/lib/ip_frag/version.map
index b9c1cca..8aad839 100644
--- a/lib/ip_frag/version.map
+++ b/lib/ip_frag/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
 	global:
 
 	rte_ip_frag_table_del_expired_entries;
+	rte_ipv4_fragment_copy_nonseg_packet;
 };
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 26+ messages in thread

* Re: [PATCH v7] ip_frag: add IPv4 fragment copy packet API
  2022-08-08  1:48           ` [PATCH v7] " Huichao Cai
@ 2022-08-08 22:29             ` Konstantin Ananyev
  2022-08-29 14:22               ` Thomas Monjalon
  0 siblings, 1 reply; 26+ messages in thread
From: Konstantin Ananyev @ 2022-08-08 22:29 UTC (permalink / raw)
  To: Huichao Cai, dev

08/08/2022 02:48, Huichao Cai пишет:
> Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> for fast release of mbufs. When set application must guarantee that
> per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
> and non-segmented.)offload. In order to adapt to this offload function,
> add this API. Add some test data for this API.
> 
> Signed-off-by: Huichao Cai <chcchc88@163.com>
> ---


Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>


^ permalink raw reply	[flat|nested] 26+ messages in thread

* Re: [PATCH v7] ip_frag: add IPv4 fragment copy packet API
  2022-08-08 22:29             ` Konstantin Ananyev
@ 2022-08-29 14:22               ` Thomas Monjalon
  0 siblings, 0 replies; 26+ messages in thread
From: Thomas Monjalon @ 2022-08-29 14:22 UTC (permalink / raw)
  To: Huichao Cai; +Cc: dev, Konstantin Ananyev

09/08/2022 00:29, Konstantin Ananyev:
> 08/08/2022 02:48, Huichao Cai пишет:
> > Some NIC drivers support MBUF_FAST_FREE(Device supports optimization
> > for fast release of mbufs. When set application must guarantee that
> > per-queue all mbufs comes from the same mempool,has refcnt = 1,direct
> > and non-segmented.)offload. In order to adapt to this offload function,
> > add this API. Add some test data for this API.
> > 
> > Signed-off-by: Huichao Cai <chcchc88@163.com>
> Acked-by: Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>

Applied, thanks.




^ permalink raw reply	[flat|nested] 26+ messages in thread

end of thread, other threads:[~2022-08-29 14:22 UTC | newest]

Thread overview: 26+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-09  2:39 [PATCH v1] ip_frag: add IPv4 fragment copy packet API Huichao Cai
2022-06-09 14:19 ` [PATCH v2] " Huichao Cai
2022-07-10 23:35   ` Konstantin Ananyev
2022-07-11  9:14     ` Konstantin Ananyev
2022-07-15  8:05       ` Huichao Cai
2022-07-19  8:19         ` Konstantin Ananyev
2022-07-22 13:01   ` [PATCH v3] " Huichao Cai
2022-07-22 14:42     ` Morten Brørup
2022-07-22 14:49     ` Stephen Hemminger
2022-07-22 15:52       ` Morten Brørup
2022-07-22 15:58         ` Huichao Cai
2022-07-22 16:14           ` Morten Brørup
2022-07-22 22:35             ` Konstantin Ananyev
2022-07-23  8:24               ` Morten Brørup
2022-07-23 18:25                 ` Konstantin Ananyev
2022-07-23 22:27                   ` Morten Brørup
2022-07-22 14:49     ` [PATCH v4] " Huichao Cai
2022-07-24  4:50       ` [PATCH v5] " Huichao Cai
2022-07-24  8:10         ` [PATCH v6] " Huichao Cai
2022-07-25 15:42           ` Stephen Hemminger
2022-07-26  1:22             ` Huichao Cai
2022-08-07 11:49               ` Konstantin Ananyev
2022-08-07 11:45           ` Konstantin Ananyev
2022-08-08  1:48           ` [PATCH v7] " Huichao Cai
2022-08-08 22:29             ` Konstantin Ananyev
2022-08-29 14:22               ` Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.