All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kni: use bulk functions to allocate and free mbufs
@ 2016-12-29 21:50 Sergey Vyazmitinov
  2017-01-11 10:39 ` Ananyev, Konstantin
  2017-01-11 16:17 ` Stephen Hemminger
  0 siblings, 2 replies; 13+ messages in thread
From: Sergey Vyazmitinov @ 2016-12-29 21:50 UTC (permalink / raw)
  To: olivier.matz; +Cc: ferruh.yigit, dev, Sergey Vyazmitinov

Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
functions. This can improve performance more than two times.

Signed-off-by: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
---
 lib/librte_kni/rte_kni.c         | 44 +++++++++++++++++-----------------------
 lib/librte_kni/rte_kni_fifo.h    | 18 ++++++++++++++++
 lib/librte_mbuf/rte_mbuf.h       | 32 +++++++++++++++++++++++++++++
 lib/librte_mempool/rte_mempool.h |  6 ++++++
 4 files changed, 75 insertions(+), 25 deletions(-)

diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index a80cefd..cb4cfa6 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
 static void
 kni_free_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
+	unsigned freeing;
 	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
 
-	ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
-	if (likely(ret > 0)) {
-		for (i = 0; i < ret; i++)
-			rte_pktmbuf_free(pkts[i]);
+	freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+	if (likely(freeing > 0)) {
+		rte_pktmbuf_free_bulk(kni->pktmbuf_pool, pkts, freeing);
 	}
 }
 
 static void
 kni_allocate_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
-	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-	void *phys[MAX_MBUF_BURST_NUM];
+	unsigned count, allocated, put;
+	struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX];
+	void *phys[KNI_FIFO_COUNT_MAX];
 
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
 			 offsetof(struct rte_kni_mbuf, pool));
@@ -628,28 +627,23 @@ kni_allocate_mbufs(struct rte_kni *kni)
 		return;
 	}
 
-	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
-		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
-		if (unlikely(pkts[i] == NULL)) {
-			/* Out of memory */
-			RTE_LOG(ERR, KNI, "Out of memory\n");
-			break;
-		}
-		phys[i] = va2pa(pkts[i]);
-	}
+	/* Calculate alloc queue free space */
+	count = kni_fifo_free_count(kni->alloc_q);
 
-	/* No pkt mbuf alocated */
-	if (i <= 0)
-		return;
+	/* Get buffers from mempool */
+	allocated = rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count);
+	for (unsigned i = 0; i < allocated; i++)
+		phys[i] = va2pa(pkts[i]);
 
-	ret = kni_fifo_put(kni->alloc_q, phys, i);
+	/* Put buffers into alloc queue */
+	put = kni_fifo_put(kni->alloc_q, (void **)phys, allocated);
 
 	/* Check if any mbufs not put into alloc_q, and then free them */
-	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
-		int j;
-
-		for (j = ret; j < i; j++)
+	if (unlikely(put < allocated)) {
+		for (unsigned j = put; j < allocated; j++) {
+			RTE_LOG(ERR, KNI, "Free allocated buffer\n");
 			rte_pktmbuf_free(pkts[j]);
+		}
 	}
 }
 
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index 8cb8587..361ddb0 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
 	fifo->read = new_read;
 	return i;
 }
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..707c300 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1261,6 +1261,38 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 }
 
 /**
+ * Free n packets mbuf back into its original mempool.
+ *
+ * Free each mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param mp
+ *   The packets mempool.
+ * @param mbufs
+ *   The packets mbufs array to be freed.
+ * @param n
+ *   Number of packets.
+ */
+static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
+		struct rte_mbuf **mbufs, unsigned n)
+{
+	struct rte_mbuf *mbuf, *m_next;
+	unsigned i;
+	for (i = 0; i < n; ++i) {
+		mbuf = mbufs[i];
+		__rte_mbuf_sanity_check(mbuf, 1);
+
+		mbuf = mbuf->next;
+		while (mbuf != NULL) {
+			m_next = mbuf->next;
+			rte_pktmbuf_free_seg(mbuf);
+			mbuf = m_next;
+		}
+	}
+	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
+}
+
+/**
  * Creates a "clone" of the given packet mbuf.
  *
  * Walks through all segments of the given packet mbuf, and for each of them:
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d315d42..e612a0a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1497,6 +1497,12 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
 	return rte_mempool_get_bulk(mp, obj_p, 1);
 }
 
+static inline int __attribute__((always_inline))
+rte_mempool_get_n(struct rte_mempool *mp, void **obj_p, int n)
+{
+	return rte_mempool_get_bulk(mp, obj_p, n);
+}
+
 /**
  * Return the number of entries in the mempool.
  *
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2016-12-29 21:50 [PATCH] kni: use bulk functions to allocate and free mbufs Sergey Vyazmitinov
@ 2017-01-11 10:39 ` Ananyev, Konstantin
  2017-01-11 16:17 ` Stephen Hemminger
  1 sibling, 0 replies; 13+ messages in thread
From: Ananyev, Konstantin @ 2017-01-11 10:39 UTC (permalink / raw)
  To: Sergey Vyazmitinov, olivier.matz; +Cc: Yigit, Ferruh, dev

Hi Sergey,

...
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 4476d75..707c300 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -1261,6 +1261,38 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
>  }
> 
>  /**
> + * Free n packets mbuf back into its original mempool.
> + *
> + * Free each mbuf, and all its segments in case of chained buffers. Each
> + * segment is added back into its original mempool.
> + *
> + * @param mp
> + *   The packets mempool.
> + * @param mbufs
> + *   The packets mbufs array to be freed.
> + * @param n
> + *   Number of packets.
> + */
> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> +		struct rte_mbuf **mbufs, unsigned n)
> +{
> +	struct rte_mbuf *mbuf, *m_next;
> +	unsigned i;
> +	for (i = 0; i < n; ++i) {
> +		mbuf = mbufs[i];
> +		__rte_mbuf_sanity_check(mbuf, 1);
> +
> +		mbuf = mbuf->next;
> +		while (mbuf != NULL) {
> +			m_next = mbuf->next;
> +			rte_pktmbuf_free_seg(mbuf);
> +			mbuf = m_next;
> +		}

I think you forgot to call __rte_pktmbuf_prefree_seg(mbufs[i]); somewhere here.
Konstantin

> +	}
> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> +}
> +
> +/**
>   * Creates a "clone" of the given packet mbuf.
>   *
>   * Walks through all segments of the given packet mbuf, and for each of them:
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index d315d42..e612a0a 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -1497,6 +1497,12 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
>  	return rte_mempool_get_bulk(mp, obj_p, 1);
>  }
> 
> +static inline int __attribute__((always_inline))
> +rte_mempool_get_n(struct rte_mempool *mp, void **obj_p, int n)
> +{
> +	return rte_mempool_get_bulk(mp, obj_p, n);
> +}
> +
>  /**
>   * Return the number of entries in the mempool.
>   *
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2016-12-29 21:50 [PATCH] kni: use bulk functions to allocate and free mbufs Sergey Vyazmitinov
  2017-01-11 10:39 ` Ananyev, Konstantin
@ 2017-01-11 16:17 ` Stephen Hemminger
  2017-01-11 16:38   ` Olivier MATZ
                     ` (2 more replies)
  1 sibling, 3 replies; 13+ messages in thread
From: Stephen Hemminger @ 2017-01-11 16:17 UTC (permalink / raw)
  To: Sergey Vyazmitinov; +Cc: olivier.matz, ferruh.yigit, dev

On Fri, 30 Dec 2016 04:50:16 +0700
Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:

>  /**
> + * Free n packets mbuf back into its original mempool.
> + *
> + * Free each mbuf, and all its segments in case of chained buffers. Each
> + * segment is added back into its original mempool.
> + *
> + * @param mp
> + *   The packets mempool.
> + * @param mbufs
> + *   The packets mbufs array to be freed.
> + * @param n
> + *   Number of packets.
> + */
> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> +		struct rte_mbuf **mbufs, unsigned n)
> +{
> +	struct rte_mbuf *mbuf, *m_next;
> +	unsigned i;
> +	for (i = 0; i < n; ++i) {
> +		mbuf = mbufs[i];
> +		__rte_mbuf_sanity_check(mbuf, 1);
> +
> +		mbuf = mbuf->next;
> +		while (mbuf != NULL) {
> +			m_next = mbuf->next;
> +			rte_pktmbuf_free_seg(mbuf);
> +			mbuf = m_next;
> +		}
> +	}
> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> +}

The mbufs may come from different pools. You need to handle that.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 16:17 ` Stephen Hemminger
@ 2017-01-11 16:38   ` Olivier MATZ
  2017-01-11 17:00   ` Ferruh Yigit
  2017-01-11 17:28   ` Ananyev, Konstantin
  2 siblings, 0 replies; 13+ messages in thread
From: Olivier MATZ @ 2017-01-11 16:38 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: Sergey Vyazmitinov, olivier.matz, ferruh.yigit, dev

On Wed, 11 Jan 2017 08:17:59 -0800, Stephen Hemminger
<stephen@networkplumber.org> wrote:
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
> >  /**
> > + * Free n packets mbuf back into its original mempool.
> > + *
> > + * Free each mbuf, and all its segments in case of chained
> > buffers. Each
> > + * segment is added back into its original mempool.
> > + *
> > + * @param mp
> > + *   The packets mempool.
> > + * @param mbufs
> > + *   The packets mbufs array to be freed.
> > + * @param n
> > + *   Number of packets.
> > + */
> > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > +		struct rte_mbuf **mbufs, unsigned n)
> > +{
> > +	struct rte_mbuf *mbuf, *m_next;
> > +	unsigned i;
> > +	for (i = 0; i < n; ++i) {
> > +		mbuf = mbufs[i];
> > +		__rte_mbuf_sanity_check(mbuf, 1);
> > +
> > +		mbuf = mbuf->next;
> > +		while (mbuf != NULL) {
> > +			m_next = mbuf->next;
> > +			rte_pktmbuf_free_seg(mbuf);
> > +			mbuf = m_next;
> > +		}
> > +	}
> > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > +}  
> 
> The mbufs may come from different pools. You need to handle that.

I have an implementation for that in an endless-work-in-progress
patchset:

 /**
+ * Free several mbufs segments.
+ *
+ * This function frees a table of mbufs, ensuring that each mbuf is
+ * returned into its original pool. It is the equivalent of calling
+ * rte_pktmbuf_free_seg() on all mbuf of the table.
+ *
+ * @param mbufs
+ *    Array of mbuf pointers.
+ * @param n
+ *    Array size.
+ */
+static inline void
+rte_pktmbuf_free_seg_bulk(struct rte_mbuf * const *m_tab, unsigned n)
+{
+       struct rte_mbuf *m;
+       struct rte_mbuf * const *start = NULL;
+       unsigned n_free = 0, i;
+       struct rte_mempool *free_pool = NULL;
+
+       for (i = 0; i < n; i++) {
+               m = m_tab[i];
+
+               if (__rte_pktmbuf_prefree_seg(m) == NULL) {
+                       if (n_free != 0)
+                               rte_mempool_put_bulk(free_pool,
+                                       (void * const *)start, n_free);
+
+                       free_pool = NULL;
+                       n_free = 0;
+                       continue;
+               }
+
+               if (unlikely(m->pool != free_pool)) {
+                       if (n_free != 0)
+                               rte_mempool_put_bulk(free_pool,
+                                       (void * const *)start, n_free);
+
+                       free_pool = m->pool;
+                       start = &m_tab[i];
+                       n_free = 1;
+               } else {
+                       n_free++;
+               }
+       }
+
+       if (n_free != 0)
+               rte_mempool_put_bulk(free_pool,
+                       (void * const *)start, n_free);
+}


In the same patch, I also remove the tx_free_bulk_mbuf() functions
that does almost the same job in specific drivers. Unfortunately,
this patch needs to be rebased and better tested, so it's not ready yet.


Regards,
Olivier

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 16:17 ` Stephen Hemminger
  2017-01-11 16:38   ` Olivier MATZ
@ 2017-01-11 17:00   ` Ferruh Yigit
  2017-01-11 17:28   ` Ananyev, Konstantin
  2 siblings, 0 replies; 13+ messages in thread
From: Ferruh Yigit @ 2017-01-11 17:00 UTC (permalink / raw)
  To: Stephen Hemminger, Sergey Vyazmitinov; +Cc: olivier.matz, dev

Hi Sergey,

On 1/11/2017 4:17 PM, Stephen Hemminger wrote:
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
>>  /**
>> + * Free n packets mbuf back into its original mempool.
>> + *
>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>> + * segment is added back into its original mempool.
>> + *
>> + * @param mp
>> + *   The packets mempool.
>> + * @param mbufs
>> + *   The packets mbufs array to be freed.
>> + * @param n
>> + *   Number of packets.
>> + */
>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>> +		struct rte_mbuf **mbufs, unsigned n)
>> +{
>> +	struct rte_mbuf *mbuf, *m_next;
>> +	unsigned i;
>> +	for (i = 0; i < n; ++i) {
>> +		mbuf = mbufs[i];
>> +		__rte_mbuf_sanity_check(mbuf, 1);
>> +
>> +		mbuf = mbuf->next;
>> +		while (mbuf != NULL) {
>> +			m_next = mbuf->next;
>> +			rte_pktmbuf_free_seg(mbuf);
>> +			mbuf = m_next;
>> +		}
>> +	}
>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>> +}
> 
> The mbufs may come from different pools. You need to handle that.
> 

As Stephen pointed mbufs can be from different mempools. But still can
benefit from bulk allocation part of your patch, would you mind
separating patches for alloc and free?

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 16:17 ` Stephen Hemminger
  2017-01-11 16:38   ` Olivier MATZ
  2017-01-11 17:00   ` Ferruh Yigit
@ 2017-01-11 17:28   ` Ananyev, Konstantin
  2017-01-11 17:35     ` Stephen Hemminger
  2 siblings, 1 reply; 13+ messages in thread
From: Ananyev, Konstantin @ 2017-01-11 17:28 UTC (permalink / raw)
  To: Stephen Hemminger, Sergey Vyazmitinov; +Cc: olivier.matz, Yigit, Ferruh, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> Sent: Wednesday, January 11, 2017 4:18 PM
> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
> >  /**
> > + * Free n packets mbuf back into its original mempool.
> > + *
> > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > + * segment is added back into its original mempool.
> > + *
> > + * @param mp
> > + *   The packets mempool.
> > + * @param mbufs
> > + *   The packets mbufs array to be freed.
> > + * @param n
> > + *   Number of packets.
> > + */
> > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > +		struct rte_mbuf **mbufs, unsigned n)
> > +{
> > +	struct rte_mbuf *mbuf, *m_next;
> > +	unsigned i;
> > +	for (i = 0; i < n; ++i) {
> > +		mbuf = mbufs[i];
> > +		__rte_mbuf_sanity_check(mbuf, 1);
> > +
> > +		mbuf = mbuf->next;
> > +		while (mbuf != NULL) {
> > +			m_next = mbuf->next;
> > +			rte_pktmbuf_free_seg(mbuf);
> > +			mbuf = m_next;
> > +		}
> > +	}
> > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > +}
> 
> The mbufs may come from different pools. You need to handle that.

I suppose both stituations are possible:
1) user knows off-hand that all mbufs in the group are from the same mempool
2) user can't guarantee that all mbufs in the group are from same mempool.

As I understand that patch is for case 1) only.
For 2) it could be a separate function and separate patch.

Konstantin

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 17:28   ` Ananyev, Konstantin
@ 2017-01-11 17:35     ` Stephen Hemminger
  2017-01-11 17:43       ` Ananyev, Konstantin
  0 siblings, 1 reply; 13+ messages in thread
From: Stephen Hemminger @ 2017-01-11 17:35 UTC (permalink / raw)
  To: Ananyev, Konstantin; +Cc: Sergey Vyazmitinov, olivier.matz, Yigit, Ferruh, dev

On Wed, 11 Jan 2017 17:28:21 +0000
"Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:

> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> > Sent: Wednesday, January 11, 2017 4:18 PM
> > To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> > Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> > 
> > On Fri, 30 Dec 2016 04:50:16 +0700
> > Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> >   
> > >  /**
> > > + * Free n packets mbuf back into its original mempool.
> > > + *
> > > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > > + * segment is added back into its original mempool.
> > > + *
> > > + * @param mp
> > > + *   The packets mempool.
> > > + * @param mbufs
> > > + *   The packets mbufs array to be freed.
> > > + * @param n
> > > + *   Number of packets.
> > > + */
> > > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > > +		struct rte_mbuf **mbufs, unsigned n)
> > > +{
> > > +	struct rte_mbuf *mbuf, *m_next;
> > > +	unsigned i;
> > > +	for (i = 0; i < n; ++i) {
> > > +		mbuf = mbufs[i];
> > > +		__rte_mbuf_sanity_check(mbuf, 1);
> > > +
> > > +		mbuf = mbuf->next;
> > > +		while (mbuf != NULL) {
> > > +			m_next = mbuf->next;
> > > +			rte_pktmbuf_free_seg(mbuf);
> > > +			mbuf = m_next;
> > > +		}
> > > +	}
> > > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > > +}  
> > 
> > The mbufs may come from different pools. You need to handle that.  
> 
> I suppose both stituations are possible:
> 1) user knows off-hand that all mbufs in the group are from the same mempool
> 2) user can't guarantee that all mbufs in the group are from same mempool.
> 
> As I understand that patch is for case 1) only.
> For 2) it could be a separate function and separate patch.
> 
> Konstantin
> 
> 

Please don't make unnecessary assumptions in pursuit of minor optimizations.
It is trivial to write a correct free bulk that handles pool changing.
Also the free_seg could be bulked as well.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 17:35     ` Stephen Hemminger
@ 2017-01-11 17:43       ` Ananyev, Konstantin
  2017-01-11 17:47         ` Ferruh Yigit
  0 siblings, 1 reply; 13+ messages in thread
From: Ananyev, Konstantin @ 2017-01-11 17:43 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: Sergey Vyazmitinov, olivier.matz, Yigit, Ferruh, dev



> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Wednesday, January 11, 2017 5:36 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On Wed, 11 Jan 2017 17:28:21 +0000
> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
> 
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> > > Sent: Wednesday, January 11, 2017 4:18 PM
> > > To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> > > Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> > >
> > > On Fri, 30 Dec 2016 04:50:16 +0700
> > > Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> > >
> > > >  /**
> > > > + * Free n packets mbuf back into its original mempool.
> > > > + *
> > > > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > > > + * segment is added back into its original mempool.
> > > > + *
> > > > + * @param mp
> > > > + *   The packets mempool.
> > > > + * @param mbufs
> > > > + *   The packets mbufs array to be freed.
> > > > + * @param n
> > > > + *   Number of packets.
> > > > + */
> > > > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > > > +		struct rte_mbuf **mbufs, unsigned n)
> > > > +{
> > > > +	struct rte_mbuf *mbuf, *m_next;
> > > > +	unsigned i;
> > > > +	for (i = 0; i < n; ++i) {
> > > > +		mbuf = mbufs[i];
> > > > +		__rte_mbuf_sanity_check(mbuf, 1);
> > > > +
> > > > +		mbuf = mbuf->next;
> > > > +		while (mbuf != NULL) {
> > > > +			m_next = mbuf->next;
> > > > +			rte_pktmbuf_free_seg(mbuf);
> > > > +			mbuf = m_next;
> > > > +		}
> > > > +	}
> > > > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > > > +}
> > >
> > > The mbufs may come from different pools. You need to handle that.
> >
> > I suppose both stituations are possible:
> > 1) user knows off-hand that all mbufs in the group are from the same mempool
> > 2) user can't guarantee that all mbufs in the group are from same mempool.
> >
> > As I understand that patch is for case 1) only.
> > For 2) it could be a separate function and separate patch.
> >
> > Konstantin
> >
> >
> 
> Please don't make unnecessary assumptions in pursuit of minor optimizations.

I don't suggest to make *any* assumptions.
What I am saying we  can have 2 functions for two different cases.
Obviously we'll have to document it properly.
Konstantin

> It is trivial to write a correct free bulk that handles pool changing.
> Also the free_seg could be bulked as well.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 17:43       ` Ananyev, Konstantin
@ 2017-01-11 17:47         ` Ferruh Yigit
  2017-01-11 18:25           ` Ananyev, Konstantin
  0 siblings, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2017-01-11 17:47 UTC (permalink / raw)
  To: Ananyev, Konstantin, Stephen Hemminger
  Cc: Sergey Vyazmitinov, olivier.matz, dev

On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
> 
> 
>> -----Original Message-----
>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>> Sent: Wednesday, January 11, 2017 5:36 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>
>> On Wed, 11 Jan 2017 17:28:21 +0000
>> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
>>
>>>> -----Original Message-----
>>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
>>>> Sent: Wednesday, January 11, 2017 4:18 PM
>>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
>>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>
>>>> On Fri, 30 Dec 2016 04:50:16 +0700
>>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
>>>>
>>>>>  /**
>>>>> + * Free n packets mbuf back into its original mempool.
>>>>> + *
>>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>>>>> + * segment is added back into its original mempool.
>>>>> + *
>>>>> + * @param mp
>>>>> + *   The packets mempool.
>>>>> + * @param mbufs
>>>>> + *   The packets mbufs array to be freed.
>>>>> + * @param n
>>>>> + *   Number of packets.
>>>>> + */
>>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>>>>> +		struct rte_mbuf **mbufs, unsigned n)
>>>>> +{
>>>>> +	struct rte_mbuf *mbuf, *m_next;
>>>>> +	unsigned i;
>>>>> +	for (i = 0; i < n; ++i) {
>>>>> +		mbuf = mbufs[i];
>>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
>>>>> +
>>>>> +		mbuf = mbuf->next;
>>>>> +		while (mbuf != NULL) {
>>>>> +			m_next = mbuf->next;
>>>>> +			rte_pktmbuf_free_seg(mbuf);
>>>>> +			mbuf = m_next;
>>>>> +		}
>>>>> +	}
>>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>>>>> +}
>>>>
>>>> The mbufs may come from different pools. You need to handle that.
>>>
>>> I suppose both stituations are possible:
>>> 1) user knows off-hand that all mbufs in the group are from the same mempool
>>> 2) user can't guarantee that all mbufs in the group are from same mempool.
>>>
>>> As I understand that patch is for case 1) only.
>>> For 2) it could be a separate function and separate patch.
>>>
>>> Konstantin
>>>
>>>
>>
>> Please don't make unnecessary assumptions in pursuit of minor optimizations.
> 
> I don't suggest to make *any* assumptions.
> What I am saying we  can have 2 functions for two different cases.

kni_free_mbufs() is static function. Even user knows if all mbufs are
some same pool or not, can't pass this information to the free function.

Of course this information can be passed via new API, or as an update to
exiting API, but I think it is better to update free function to cover
both cases instead of getting this information from user.

> Obviously we'll have to document it properly.
> Konstantin
> 
>> It is trivial to write a correct free bulk that handles pool changing.
>> Also the free_seg could be bulked as well.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 17:47         ` Ferruh Yigit
@ 2017-01-11 18:25           ` Ananyev, Konstantin
  2017-01-11 18:41             ` Ferruh Yigit
  0 siblings, 1 reply; 13+ messages in thread
From: Ananyev, Konstantin @ 2017-01-11 18:25 UTC (permalink / raw)
  To: Yigit, Ferruh, Stephen Hemminger; +Cc: Sergey Vyazmitinov, olivier.matz, dev



> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, January 11, 2017 5:48 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Stephen Hemminger <stephen@networkplumber.org>
> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
> >
> >
> >> -----Original Message-----
> >> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >> Sent: Wednesday, January 11, 2017 5:36 PM
> >> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> >> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>;
> dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>
> >> On Wed, 11 Jan 2017 17:28:21 +0000
> >> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
> >>
> >>>> -----Original Message-----
> >>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> >>>> Sent: Wednesday, January 11, 2017 4:18 PM
> >>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> >>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> >>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>>>
> >>>> On Fri, 30 Dec 2016 04:50:16 +0700
> >>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> >>>>
> >>>>>  /**
> >>>>> + * Free n packets mbuf back into its original mempool.
> >>>>> + *
> >>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
> >>>>> + * segment is added back into its original mempool.
> >>>>> + *
> >>>>> + * @param mp
> >>>>> + *   The packets mempool.
> >>>>> + * @param mbufs
> >>>>> + *   The packets mbufs array to be freed.
> >>>>> + * @param n
> >>>>> + *   Number of packets.
> >>>>> + */
> >>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> >>>>> +		struct rte_mbuf **mbufs, unsigned n)
> >>>>> +{
> >>>>> +	struct rte_mbuf *mbuf, *m_next;
> >>>>> +	unsigned i;
> >>>>> +	for (i = 0; i < n; ++i) {
> >>>>> +		mbuf = mbufs[i];
> >>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
> >>>>> +
> >>>>> +		mbuf = mbuf->next;
> >>>>> +		while (mbuf != NULL) {
> >>>>> +			m_next = mbuf->next;
> >>>>> +			rte_pktmbuf_free_seg(mbuf);
> >>>>> +			mbuf = m_next;
> >>>>> +		}
> >>>>> +	}
> >>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> >>>>> +}
> >>>>
> >>>> The mbufs may come from different pools. You need to handle that.
> >>>
> >>> I suppose both stituations are possible:
> >>> 1) user knows off-hand that all mbufs in the group are from the same mempool
> >>> 2) user can't guarantee that all mbufs in the group are from same mempool.
> >>>
> >>> As I understand that patch is for case 1) only.
> >>> For 2) it could be a separate function and separate patch.
> >>>
> >>> Konstantin
> >>>
> >>>
> >>
> >> Please don't make unnecessary assumptions in pursuit of minor optimizations.
> >
> > I don't suggest to make *any* assumptions.
> > What I am saying we  can have 2 functions for two different cases.
> 
> kni_free_mbufs() is static function. Even user knows if all mbufs are
> some same pool or not, can't pass this information to the free function.
> 
> Of course this information can be passed via new API, or as an update to
> exiting API, but I think it is better to update free function to cover
> both cases instead of getting this information from user.

I suppose misunderstanding came from the fact that kni_free_mbufs()
is modified to use rte_pktmbuf_free_bulk(mp, mbufs, n).
I am not talking about kni part of the patch
(to be honest I didn't pay much attention to it).
What I am saying there are many situations when user knows off-hand
that all  mbufs in that group are from the same mempool and such
function will be useful too.
BTW, for my own curiosity, how it could happen with KNI that 
kni_fifo_get() would return mbufs not from kni->pktmbuf_pool
(I am not really familiar with KNI and its use-cases)?
Konstantin

> 
> > Obviously we'll have to document it properly.
> > Konstantin
> >
> >> It is trivial to write a correct free bulk that handles pool changing.
> >> Also the free_seg could be bulked as well.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 18:25           ` Ananyev, Konstantin
@ 2017-01-11 18:41             ` Ferruh Yigit
  2017-01-11 18:56               ` Stephen Hemminger
  0 siblings, 1 reply; 13+ messages in thread
From: Ferruh Yigit @ 2017-01-11 18:41 UTC (permalink / raw)
  To: Ananyev, Konstantin, Stephen Hemminger
  Cc: Sergey Vyazmitinov, olivier.matz, dev

On 1/11/2017 6:25 PM, Ananyev, Konstantin wrote:
> 
> 
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Wednesday, January 11, 2017 5:48 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Stephen Hemminger <stephen@networkplumber.org>
>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>
>> On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
>>>
>>>
>>>> -----Original Message-----
>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>> Sent: Wednesday, January 11, 2017 5:36 PM
>>>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
>>>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>;
>> dev@dpdk.org
>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>
>>>> On Wed, 11 Jan 2017 17:28:21 +0000
>>>> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
>>>>
>>>>>> -----Original Message-----
>>>>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
>>>>>> Sent: Wednesday, January 11, 2017 4:18 PM
>>>>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
>>>>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>>>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>>>
>>>>>> On Fri, 30 Dec 2016 04:50:16 +0700
>>>>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
>>>>>>
>>>>>>>  /**
>>>>>>> + * Free n packets mbuf back into its original mempool.
>>>>>>> + *
>>>>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>>>>>>> + * segment is added back into its original mempool.
>>>>>>> + *
>>>>>>> + * @param mp
>>>>>>> + *   The packets mempool.
>>>>>>> + * @param mbufs
>>>>>>> + *   The packets mbufs array to be freed.
>>>>>>> + * @param n
>>>>>>> + *   Number of packets.
>>>>>>> + */
>>>>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>>>>>>> +		struct rte_mbuf **mbufs, unsigned n)
>>>>>>> +{
>>>>>>> +	struct rte_mbuf *mbuf, *m_next;
>>>>>>> +	unsigned i;
>>>>>>> +	for (i = 0; i < n; ++i) {
>>>>>>> +		mbuf = mbufs[i];
>>>>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
>>>>>>> +
>>>>>>> +		mbuf = mbuf->next;
>>>>>>> +		while (mbuf != NULL) {
>>>>>>> +			m_next = mbuf->next;
>>>>>>> +			rte_pktmbuf_free_seg(mbuf);
>>>>>>> +			mbuf = m_next;
>>>>>>> +		}
>>>>>>> +	}
>>>>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>>>>>>> +}
>>>>>>
>>>>>> The mbufs may come from different pools. You need to handle that.
>>>>>
>>>>> I suppose both stituations are possible:
>>>>> 1) user knows off-hand that all mbufs in the group are from the same mempool
>>>>> 2) user can't guarantee that all mbufs in the group are from same mempool.
>>>>>
>>>>> As I understand that patch is for case 1) only.
>>>>> For 2) it could be a separate function and separate patch.
>>>>>
>>>>> Konstantin
>>>>>
>>>>>
>>>>
>>>> Please don't make unnecessary assumptions in pursuit of minor optimizations.
>>>
>>> I don't suggest to make *any* assumptions.
>>> What I am saying we  can have 2 functions for two different cases.
>>
>> kni_free_mbufs() is static function. Even user knows if all mbufs are
>> some same pool or not, can't pass this information to the free function.
>>
>> Of course this information can be passed via new API, or as an update to
>> exiting API, but I think it is better to update free function to cover
>> both cases instead of getting this information from user.
> 
> I suppose misunderstanding came from the fact that kni_free_mbufs()
> is modified to use rte_pktmbuf_free_bulk(mp, mbufs, n).
> I am not talking about kni part of the patch
> (to be honest I didn't pay much attention to it).
> What I am saying there are many situations when user knows off-hand
> that all  mbufs in that group are from the same mempool and such
> function will be useful too.

> BTW, for my own curiosity, how it could happen with KNI that 
> kni_fifo_get() would return mbufs not from kni->pktmbuf_pool
> (I am not really familiar with KNI and its use-cases)?

It gets packets from free queue:
kni_fifo_get(kni->free_q, ...)

DPDK app may send a mbuf (from any pool, like another port's mempool) to
kernel, kernel puts buf back to kni->free_q when done with it.

> Konstantin
> 
>>
>>> Obviously we'll have to document it properly.
>>> Konstantin
>>>
>>>> It is trivial to write a correct free bulk that handles pool changing.
>>>> Also the free_seg could be bulked as well.
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 18:41             ` Ferruh Yigit
@ 2017-01-11 18:56               ` Stephen Hemminger
  2017-01-16  7:39                 ` Yuanhan Liu
  0 siblings, 1 reply; 13+ messages in thread
From: Stephen Hemminger @ 2017-01-11 18:56 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Ananyev, Konstantin, Sergey Vyazmitinov, olivier.matz, dev

On Wed, 11 Jan 2017 18:41:28 +0000
Ferruh Yigit <ferruh.yigit@intel.com> wrote:

> On 1/11/2017 6:25 PM, Ananyev, Konstantin wrote:
> > 
> >   
> >> -----Original Message-----
> >> From: Yigit, Ferruh
> >> Sent: Wednesday, January 11, 2017 5:48 PM
> >> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Stephen Hemminger <stephen@networkplumber.org>
> >> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>
> >> On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:  
> >>>
> >>>  
> >>>> -----Original Message-----
> >>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >>>> Sent: Wednesday, January 11, 2017 5:36 PM
> >>>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> >>>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>;  
> >> dev@dpdk.org  
> >>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>>>
> >>>> On Wed, 11 Jan 2017 17:28:21 +0000
> >>>> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
> >>>>  
> >>>>>> -----Original Message-----
> >>>>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> >>>>>> Sent: Wednesday, January 11, 2017 4:18 PM
> >>>>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> >>>>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> >>>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>>>>>
> >>>>>> On Fri, 30 Dec 2016 04:50:16 +0700
> >>>>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> >>>>>>  
> >>>>>>>  /**
> >>>>>>> + * Free n packets mbuf back into its original mempool.
> >>>>>>> + *
> >>>>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
> >>>>>>> + * segment is added back into its original mempool.
> >>>>>>> + *
> >>>>>>> + * @param mp
> >>>>>>> + *   The packets mempool.
> >>>>>>> + * @param mbufs
> >>>>>>> + *   The packets mbufs array to be freed.
> >>>>>>> + * @param n
> >>>>>>> + *   Number of packets.
> >>>>>>> + */
> >>>>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> >>>>>>> +		struct rte_mbuf **mbufs, unsigned n)
> >>>>>>> +{
> >>>>>>> +	struct rte_mbuf *mbuf, *m_next;
> >>>>>>> +	unsigned i;
> >>>>>>> +	for (i = 0; i < n; ++i) {
> >>>>>>> +		mbuf = mbufs[i];
> >>>>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
> >>>>>>> +
> >>>>>>> +		mbuf = mbuf->next;
> >>>>>>> +		while (mbuf != NULL) {
> >>>>>>> +			m_next = mbuf->next;
> >>>>>>> +			rte_pktmbuf_free_seg(mbuf);
> >>>>>>> +			mbuf = m_next;
> >>>>>>> +		}
> >>>>>>> +	}
> >>>>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> >>>>>>> +}  
> >>>>>>
> >>>>>> The mbufs may come from different pools. You need to handle that.  
> >>>>>
> >>>>> I suppose both stituations are possible:
> >>>>> 1) user knows off-hand that all mbufs in the group are from the same mempool
> >>>>> 2) user can't guarantee that all mbufs in the group are from same mempool.
> >>>>>
> >>>>> As I understand that patch is for case 1) only.
> >>>>> For 2) it could be a separate function and separate patch.
> >>>>>
> >>>>> Konstantin
> >>>>>
> >>>>>  
> >>>>
> >>>> Please don't make unnecessary assumptions in pursuit of minor optimizations.  
> >>>
> >>> I don't suggest to make *any* assumptions.
> >>> What I am saying we  can have 2 functions for two different cases.  
> >>
> >> kni_free_mbufs() is static function. Even user knows if all mbufs are
> >> some same pool or not, can't pass this information to the free function.
> >>
> >> Of course this information can be passed via new API, or as an update to
> >> exiting API, but I think it is better to update free function to cover
> >> both cases instead of getting this information from user.  
> > 
> > I suppose misunderstanding came from the fact that kni_free_mbufs()
> > is modified to use rte_pktmbuf_free_bulk(mp, mbufs, n).
> > I am not talking about kni part of the patch
> > (to be honest I didn't pay much attention to it).
> > What I am saying there are many situations when user knows off-hand
> > that all  mbufs in that group are from the same mempool and such
> > function will be useful too.  
> 
> > BTW, for my own curiosity, how it could happen with KNI that 
> > kni_fifo_get() would return mbufs not from kni->pktmbuf_pool
> > (I am not really familiar with KNI and its use-cases)?  
> 
> It gets packets from free queue:
> kni_fifo_get(kni->free_q, ...)
> 
> DPDK app may send a mbuf (from any pool, like another port's mempool) to
> kernel, kernel puts buf back to kni->free_q when done with it.
> 
> > Konstantin
> >   
> >>  
> >>> Obviously we'll have to document it properly.
> >>> Konstantin
> >>>  
> >>>> It is trivial to write a correct free bulk that handles pool changing.
> >>>> Also the free_seg could be bulked as well.  
> >   
> 

Please write generic code. Something like the following (untested).

diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75379fd..b7a743ec5c87 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -306,6 +306,9 @@ extern "C" {
 /** Alignment constraint of mbuf private area. */
 #define RTE_MBUF_PRIV_ALIGN 8
 
+/** Maximum number of mbufs freed in bulk. */
+#define RTE_MBUF_BULK_FREE 64
+
 /**
  * Get the name of a RX offload flag
  *
@@ -1261,6 +1264,50 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 }
 
 /**
+ * Free n packets mbuf back into its original mempool.
+ *
+ * Free each mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param mbufs
+ *   The packets mbufs array to be freed.
+ * @param n
+ *   Number of packets.
+ */
+static inline void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs,
+					 unsigned n)
+{
+	struct rte_mbuf *tofree[RTE_MBUF_BULK_FREE];
+	struct rte_mempool *mp;
+	unsigned i, count = 0;
+
+	for (i = 0; i < n; i++) {
+		struct rte_mbuf *m, *m_next;
+
+		for (m = mbufs[i]; m; m = m_next) {
+			m_next = m->next;
+			
+			if (count > 0 &&
+			    (unlikely(m->pool != mp || count == RTE_MBUF_BULK_FREE))) {
+				rte_mempool_put_bulk(mp, tofree, count);
+				count = 0;
+			}
+
+			mp = m->pool;
+
+			if (likely(__rte_pktmbuf_prefree_seg(m))) {
+				m->next = NULL;
+				tofree[count++] = m;
+			}
+		}
+	}
+
+	if (likely(count > 0))
+		rte_mempool_put_bulk(mp, tofree, count);
+}
+
+	
+/**
  * Creates a "clone" of the given packet mbuf.
  *
  * Walks through all segments of the given packet mbuf, and for each of them:


This handles multiple pools and multi-segment and indirect mbufs.

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH] kni: use bulk functions to allocate and free mbufs
  2017-01-11 18:56               ` Stephen Hemminger
@ 2017-01-16  7:39                 ` Yuanhan Liu
  0 siblings, 0 replies; 13+ messages in thread
From: Yuanhan Liu @ 2017-01-16  7:39 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Ferruh Yigit, Ananyev, Konstantin, Sergey Vyazmitinov, olivier.matz, dev

On Wed, Jan 11, 2017 at 10:56:20AM -0800, Stephen Hemminger wrote:
> Please write generic code. Something like the following (untested).

Despite there are few compile errors (which are easy to fix), it works
fine.

More importantly, it saves near 2000 cycles if I apply the bulk free
to virtio_xmit_cleanup(). Only one mp is being used.

	--yliu

> 
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 4476d75379fd..b7a743ec5c87 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -306,6 +306,9 @@ extern "C" {
>  /** Alignment constraint of mbuf private area. */
>  #define RTE_MBUF_PRIV_ALIGN 8
>  
> +/** Maximum number of mbufs freed in bulk. */
> +#define RTE_MBUF_BULK_FREE 64
> +
>  /**
>   * Get the name of a RX offload flag
>   *
> @@ -1261,6 +1264,50 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
>  }
>  
>  /**
> + * Free n packets mbuf back into its original mempool.
> + *
> + * Free each mbuf, and all its segments in case of chained buffers. Each
> + * segment is added back into its original mempool.
> + *
> + * @param mbufs
> + *   The packets mbufs array to be freed.
> + * @param n
> + *   Number of packets.
> + */
> +static inline void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs,
> +					 unsigned n)
> +{
> +	struct rte_mbuf *tofree[RTE_MBUF_BULK_FREE];
> +	struct rte_mempool *mp;
> +	unsigned i, count = 0;
> +
> +	for (i = 0; i < n; i++) {
> +		struct rte_mbuf *m, *m_next;
> +
> +		for (m = mbufs[i]; m; m = m_next) {
> +			m_next = m->next;
> +			
> +			if (count > 0 &&
> +			    (unlikely(m->pool != mp || count == RTE_MBUF_BULK_FREE))) {
> +				rte_mempool_put_bulk(mp, tofree, count);
> +				count = 0;
> +			}
> +
> +			mp = m->pool;
> +
> +			if (likely(__rte_pktmbuf_prefree_seg(m))) {
> +				m->next = NULL;
> +				tofree[count++] = m;
> +			}
> +		}
> +	}
> +
> +	if (likely(count > 0))
> +		rte_mempool_put_bulk(mp, tofree, count);
> +}
> +
> +	
> +/**
>   * Creates a "clone" of the given packet mbuf.
>   *
>   * Walks through all segments of the given packet mbuf, and for each of them:
> 
> 
> This handles multiple pools and multi-segment and indirect mbufs.
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2017-01-16  7:37 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-29 21:50 [PATCH] kni: use bulk functions to allocate and free mbufs Sergey Vyazmitinov
2017-01-11 10:39 ` Ananyev, Konstantin
2017-01-11 16:17 ` Stephen Hemminger
2017-01-11 16:38   ` Olivier MATZ
2017-01-11 17:00   ` Ferruh Yigit
2017-01-11 17:28   ` Ananyev, Konstantin
2017-01-11 17:35     ` Stephen Hemminger
2017-01-11 17:43       ` Ananyev, Konstantin
2017-01-11 17:47         ` Ferruh Yigit
2017-01-11 18:25           ` Ananyev, Konstantin
2017-01-11 18:41             ` Ferruh Yigit
2017-01-11 18:56               ` Stephen Hemminger
2017-01-16  7:39                 ` Yuanhan Liu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.