All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] kni: use bulk functions to allocate and free mbufs
@ 2016-12-29 22:08 Sergey Vyazmitinov
  0 siblings, 0 replies; only message in thread
From: Sergey Vyazmitinov @ 2016-12-29 22:08 UTC (permalink / raw)
  To: olivier.matz; +Cc: ferruh.yigit, dev, Sergey Vyazmitinov

Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
functions. This can improve performance more than two times.

Signed-off-by: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
---
 lib/librte_kni/rte_kni.c      | 44 +++++++++++++++++++------------------------
 lib/librte_kni/rte_kni_fifo.h | 18 ++++++++++++++++++
 lib/librte_mbuf/rte_mbuf.h    | 32 +++++++++++++++++++++++++++++++
 3 files changed, 69 insertions(+), 25 deletions(-)

diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index a80cefd..8b0b2be 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
 static void
 kni_free_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
+	unsigned int freeing;
 	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
 
-	ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
-	if (likely(ret > 0)) {
-		for (i = 0; i < ret; i++)
-			rte_pktmbuf_free(pkts[i]);
+	freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+	if (likely(freeing > 0)) {
+		rte_pktmbuf_free_bulk(kni->pktmbuf_pool, pkts, freeing);
 	}
 }
 
 static void
 kni_allocate_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
-	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-	void *phys[MAX_MBUF_BURST_NUM];
+	unsigned int count, allocated, put;
+	struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX];
+	void *phys[KNI_FIFO_COUNT_MAX];
 
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
 			 offsetof(struct rte_kni_mbuf, pool));
@@ -628,28 +627,23 @@ kni_allocate_mbufs(struct rte_kni *kni)
 		return;
 	}
 
-	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
-		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
-		if (unlikely(pkts[i] == NULL)) {
-			/* Out of memory */
-			RTE_LOG(ERR, KNI, "Out of memory\n");
-			break;
-		}
-		phys[i] = va2pa(pkts[i]);
-	}
+	/* Calculate alloc queue free space */
+	count = kni_fifo_free_count(kni->alloc_q);
 
-	/* No pkt mbuf alocated */
-	if (i <= 0)
-		return;
+	/* Get buffers from mempool */
+	allocated = rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count);
+	for (unsigned int i = 0; i < allocated; i++)
+		phys[i] = va2pa(pkts[i]);
 
-	ret = kni_fifo_put(kni->alloc_q, phys, i);
+	/* Put buffers into alloc queue */
+	put = kni_fifo_put(kni->alloc_q, (void **)phys, allocated);
 
 	/* Check if any mbufs not put into alloc_q, and then free them */
-	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
-		int j;
-
-		for (j = ret; j < i; j++)
+	if (unlikely(put < allocated)) {
+		for (unsigned int j = put; j < allocated; j++) {
+			RTE_LOG(ERR, KNI, "Free allocated buffer\n");
 			rte_pktmbuf_free(pkts[j]);
+		}
 	}
 }
 
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index 8cb8587..361ddb0 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
 	fifo->read = new_read;
 	return i;
 }
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..56e9ef7 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1261,6 +1261,38 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 }
 
 /**
+ * Free n packets mbuf back into its original mempool.
+ *
+ * Free each mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param mp
+ *   The packets mempool.
+ * @param mbufs
+ *   The packets mbufs array to be freed.
+ * @param n
+ *   Number of packets.
+ */
+static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
+		struct rte_mbuf **mbufs, unsigned int n)
+{
+	struct rte_mbuf *mbuf, *m_next;
+	unsigned int i;
+	for (i = 0; i < n; ++i) {
+		mbuf = mbufs[i];
+		__rte_mbuf_sanity_check(mbuf, 1);
+
+		mbuf = mbuf->next;
+		while (mbuf != NULL) {
+			m_next = mbuf->next;
+			rte_pktmbuf_free_seg(mbuf);
+			mbuf = m_next;
+		}
+	}
+	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
+}
+
+/**
  * Creates a "clone" of the given packet mbuf.
  *
  * Walks through all segments of the given packet mbuf, and for each of them:
-- 
2.7.4

^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2017-01-11  3:42 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-29 22:08 [PATCH v2] kni: use bulk functions to allocate and free mbufs Sergey Vyazmitinov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.