All of lore.kernel.org
 help / color / mirror / Atom feed
From: Olivier Matz <olivier.matz@6wind.com>
To: dev@dpdk.org
Cc: Anatoly Burakov <anatoly.burakov@intel.com>,
	Andrew Rybchenko <arybchenko@solarflare.com>,
	Ferruh Yigit <ferruh.yigit@linux.intel.com>,
	"Giridharan, Ganesan" <ggiridharan@rbbn.com>,
	Jerin Jacob Kollanukkaran <jerinj@marvell.com>,
	"Kiran Kumar Kokkilagadda" <kirankumark@marvell.com>,
	Stephen Hemminger <sthemmin@microsoft.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	Vamsi Krishna Attunuru <vattunuru@marvell.com>,
	Hemant Agrawal <hemant.agrawal@nxp.com>,
	Nipun Gupta <nipun.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH v3 6/7] mempool: prevent objects from being across pages
Date: Mon,  4 Nov 2019 16:12:53 +0100	[thread overview]
Message-ID: <20191104151254.6354-7-olivier.matz@6wind.com> (raw)
In-Reply-To: <20191104151254.6354-1-olivier.matz@6wind.com>

When populating a mempool, ensure that objects are not located across
several pages, except if user did not request iova contiguous objects.

Signed-off-by: Vamsi Krishna Attunuru <vattunuru@marvell.com>
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/mempool/bucket/rte_mempool_bucket.c   |  2 +-
 drivers/mempool/dpaa/dpaa_mempool.c           |  4 +-
 drivers/mempool/dpaa2/dpaa2_hw_mempool.c      |  4 +-
 .../mempool/octeontx/rte_mempool_octeontx.c   | 21 +++---
 drivers/mempool/octeontx2/Makefile            |  3 +
 drivers/mempool/octeontx2/meson.build         |  3 +
 drivers/mempool/octeontx2/otx2_mempool_ops.c  | 21 +++---
 lib/librte_mempool/rte_mempool.c              | 23 ++-----
 lib/librte_mempool/rte_mempool.h              | 24 +++++--
 lib/librte_mempool/rte_mempool_ops_default.c  | 66 +++++++++++++++----
 10 files changed, 115 insertions(+), 56 deletions(-)

diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c
index dfeaf4e45..b978fd220 100644
--- a/drivers/mempool/bucket/rte_mempool_bucket.c
+++ b/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -585,7 +585,7 @@ bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
 
 		hdr->fill_cnt = 0;
 		hdr->lcore_id = LCORE_ID_ANY;
-		rc = rte_mempool_op_populate_helper(mp,
+		rc = rte_mempool_op_populate_helper(mp, 0,
 						     RTE_MIN(bd->obj_per_bucket,
 							     max_objs - n_objs),
 						     iter + bucket_header_sz,
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index 27736e6c2..3a2528331 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -341,8 +341,8 @@ dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
 	 */
 	TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
 
-	return rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,
-					       obj_cb, obj_cb_arg);
+	return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+					       len, obj_cb, obj_cb_arg);
 }
 
 static const struct rte_mempool_ops dpaa_mpool_ops = {
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 8f8dbeada..36c93decf 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -421,8 +421,8 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
 	/* Insert entry into the PA->VA Table */
 	dpaax_iova_table_update(paddr, vaddr, len);
 
-	return rte_mempool_op_populate_helper(mp, max_objs, vaddr, paddr, len,
-					       obj_cb, obj_cb_arg);
+	return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, paddr,
+					       len, obj_cb, obj_cb_arg);
 }
 
 static const struct rte_mempool_ops dpaa2_mpool_ops = {
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index fff33e5c6..bd0070020 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -132,14 +132,15 @@ octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
 			     size_t *min_chunk_size, size_t *align)
 {
 	ssize_t mem_size;
+	size_t total_elt_sz;
 
-	/*
-	 * Simply need space for one more object to be able to
-	 * fulfil alignment requirements.
+	/* Need space for one more obj on each chunk to fulfill
+	 * alignment requirements.
 	 */
-	mem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1,
-							pg_shift,
-							min_chunk_size, align);
+	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+	mem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+						total_elt_sz, min_chunk_size,
+						align);
 	if (mem_size >= 0) {
 		/*
 		 * Memory area which contains objects must be physically
@@ -168,7 +169,7 @@ octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
 	/* align object start address to a multiple of total_elt_sz */
-	off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+	off = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);
 
 	if (len < off)
 		return -EINVAL;
@@ -184,8 +185,10 @@ octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
 	if (ret < 0)
 		return ret;
 
-	return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,
-					       obj_cb, obj_cb_arg);
+	return rte_mempool_op_populate_helper(mp,
+					RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,
+					max_objs, vaddr, iova, len,
+					obj_cb, obj_cb_arg);
 }
 
 static struct rte_mempool_ops octeontx_fpavf_ops = {
diff --git a/drivers/mempool/octeontx2/Makefile b/drivers/mempool/octeontx2/Makefile
index 8f55305c5..62e90f277 100644
--- a/drivers/mempool/octeontx2/Makefile
+++ b/drivers/mempool/octeontx2/Makefile
@@ -30,6 +30,9 @@ EXPORT_MAP := rte_mempool_octeontx2_version.map
 
 LIBABIVER := 1
 
+# for rte_mempool_get_page_size
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
 #
 # all source are stored in SRCS-y
 #
diff --git a/drivers/mempool/octeontx2/meson.build b/drivers/mempool/octeontx2/meson.build
index 5f93bb495..883b643da 100644
--- a/drivers/mempool/octeontx2/meson.build
+++ b/drivers/mempool/octeontx2/meson.build
@@ -24,3 +24,6 @@ foreach flag: extra_flags
 endforeach
 
 deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_octeontx2', 'mempool']
+
+# for rte_mempool_get_page_size
+allow_experimental_apis = true
diff --git a/drivers/mempool/octeontx2/otx2_mempool_ops.c b/drivers/mempool/octeontx2/otx2_mempool_ops.c
index 3aea92a01..ea4b1c45d 100644
--- a/drivers/mempool/octeontx2/otx2_mempool_ops.c
+++ b/drivers/mempool/octeontx2/otx2_mempool_ops.c
@@ -713,12 +713,15 @@ static ssize_t
 otx2_npa_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
 		       uint32_t pg_shift, size_t *min_chunk_size, size_t *align)
 {
-	/*
-	 * Simply need space for one more object to be able to
-	 * fulfill alignment requirements.
+	size_t total_elt_sz;
+
+	/* Need space for one more obj on each chunk to fulfill
+	 * alignment requirements.
 	 */
-	return rte_mempool_op_calc_mem_size_helper(mp, obj_num + 1, pg_shift,
-						    min_chunk_size, align);
+	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+	return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
+						total_elt_sz, min_chunk_size,
+						align);
 }
 
 static int
@@ -735,7 +738,7 @@ otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,
 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
 	/* Align object start address to a multiple of total_elt_sz */
-	off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+	off = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);
 
 	if (len < off)
 		return -EINVAL;
@@ -749,8 +752,10 @@ otx2_npa_populate(struct rte_mempool *mp, unsigned int max_objs, void *vaddr,
 	if (npa_lf_aura_range_update_check(mp->pool_id) < 0)
 		return -EBUSY;
 
-	return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova, len,
-					       obj_cb, obj_cb_arg);
+	return rte_mempool_op_populate_helper(mp,
+					RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,
+					max_objs, vaddr, iova, len,
+					obj_cb, obj_cb_arg);
 }
 
 static struct rte_mempool_ops otx2_npa_ops = {
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 758c5410b..d3db9273d 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -431,8 +431,6 @@ rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
 
 	if (!need_iova_contig_obj)
 		*pg_sz = 0;
-	else if (!alloc_in_ext_mem && rte_eal_iova_mode() == RTE_IOVA_VA)
-		*pg_sz = 0;
 	else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
 		*pg_sz = get_min_page_size(mp->socket_id);
 	else
@@ -481,17 +479,15 @@ rte_mempool_populate_default(struct rte_mempool *mp)
 	 * then just set page shift and page size to 0, because the user has
 	 * indicated that there's no need to care about anything.
 	 *
-	 * if we do need contiguous objects, there is also an option to reserve
-	 * the entire mempool memory as one contiguous block of memory, in
-	 * which case the page shift and alignment wouldn't matter as well.
+	 * if we do need contiguous objects (if a mempool driver has its
+	 * own calc_size() method returning min_chunk_size = mem_size),
+	 * there is also an option to reserve the entire mempool memory
+	 * as one contiguous block of memory.
 	 *
 	 * if we require contiguous objects, but not necessarily the entire
-	 * mempool reserved space to be contiguous, then there are two options.
-	 *
-	 * if our IO addresses are virtual, not actual physical (IOVA as VA
-	 * case), then no page shift needed - our memory allocation will give us
-	 * contiguous IO memory as far as the hardware is concerned, so
-	 * act as if we're getting contiguous memory.
+	 * mempool reserved space to be contiguous, pg_sz will be != 0,
+	 * and the default ops->populate() will take care of not placing
+	 * objects across pages.
 	 *
 	 * if our IO addresses are physical, we may get memory from bigger
 	 * pages, or we might get memory from smaller pages, and how much of it
@@ -504,11 +500,6 @@ rte_mempool_populate_default(struct rte_mempool *mp)
 	 *
 	 * If we fail to get enough contiguous memory, then we'll go and
 	 * reserve space in smaller chunks.
-	 *
-	 * We also have to take into account the fact that memory that we're
-	 * going to allocate from can belong to an externally allocated memory
-	 * area, in which case the assumption of IOVA as VA mode being
-	 * synonymous with IOVA contiguousness will not hold.
 	 */
 
 	need_iova_contig_obj = !(mp->flags & MEMPOOL_F_NO_IOVA_CONTIG);
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 26a98af30..f1cba3521 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -473,6 +473,10 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
  * Otherwise, it is a number of pages required to store given number of
  * objects without crossing page boundary.
  *
+ * The chunk_reserve argument is the amount of memory that must be
+ * reserved at the beginning of each page, or at the beginning of the
+ * memory area if pg_shift is 0.
+ *
  * Note that if object size is bigger than page size, then it assumes
  * that pages are grouped in subsets of physically continuous pages big
  * enough to store at least one object.
@@ -482,7 +486,7 @@ typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
  */
 __rte_experimental
 ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
-		uint32_t obj_num, uint32_t pg_shift,
+		uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve,
 		size_t *min_chunk_size, size_t *align);
 
 /**
@@ -490,7 +494,7 @@ ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
  * objects.
  *
  * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
- * min_chunk_size, align).
+ * 0, min_chunk_size, align).
  */
 ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
 		uint32_t obj_num, uint32_t pg_shift,
@@ -544,20 +548,30 @@ typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
 		void *vaddr, rte_iova_t iova, size_t len,
 		rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
 
+/**
+ * Align objects on addresses multiple of total_elt_sz.
+ */
+#define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001
+
 /**
  * Helper to populate memory pool object using provided memory
- * chunk: just slice objects one by one.
+ * chunk: just slice objects one by one, taking care of not
+ * crossing page boundaries.
+ *
+ * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses
+ * of object headers will be aligned on a multiple of total_elt_sz.
+ * This feature is used by octeontx hardware.
  */
 __rte_experimental
 int rte_mempool_op_populate_helper(struct rte_mempool *mp,
-		unsigned int max_objs,
+		unsigned int flags, unsigned int max_objs,
 		void *vaddr, rte_iova_t iova, size_t len,
 		rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
 
 /**
  * Default way to populate memory pool object using provided memory chunk.
  *
- * Equivalent to rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
+ * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
  * len, obj_cb, obj_cb_arg).
  */
 int rte_mempool_op_populate_default(struct rte_mempool *mp,
diff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c
index 0bfc63497..e6be7152b 100644
--- a/lib/librte_mempool/rte_mempool_ops_default.c
+++ b/lib/librte_mempool/rte_mempool_ops_default.c
@@ -9,6 +9,7 @@
 ssize_t
 rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
 				uint32_t obj_num, uint32_t pg_shift,
+				size_t chunk_reserve,
 				size_t *min_chunk_size, size_t *align)
 {
 	size_t total_elt_sz;
@@ -19,10 +20,12 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
 	if (total_elt_sz == 0) {
 		mem_size = 0;
 	} else if (pg_shift == 0) {
-		mem_size = total_elt_sz * obj_num;
+		mem_size = total_elt_sz * obj_num + chunk_reserve;
 	} else {
 		pg_sz = (size_t)1 << pg_shift;
-		obj_per_page = pg_sz / total_elt_sz;
+		if (chunk_reserve >= pg_sz)
+			return -EINVAL;
+		obj_per_page = (pg_sz - chunk_reserve) / total_elt_sz;
 		if (obj_per_page == 0) {
 			/*
 			 * Note that if object size is bigger than page size,
@@ -30,8 +33,8 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
 			 * of physically continuous pages big enough to store
 			 * at least one object.
 			 */
-			mem_size =
-				RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
+			mem_size = RTE_ALIGN_CEIL(total_elt_sz + chunk_reserve,
+						pg_sz) * obj_num;
 		} else {
 			/* In the best case, the allocator will return a
 			 * page-aligned address. For example, with 5 objs,
@@ -42,7 +45,8 @@ rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp,
 			 */
 			objs_in_last_page = ((obj_num - 1) % obj_per_page) + 1;
 			/* room required for the last page */
-			mem_size = objs_in_last_page * total_elt_sz;
+			mem_size = objs_in_last_page * total_elt_sz +
+				chunk_reserve;
 			/* room required for other pages */
 			mem_size += ((obj_num - objs_in_last_page) /
 				obj_per_page) << pg_shift;
@@ -67,24 +71,60 @@ rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
 				size_t *min_chunk_size, size_t *align)
 {
 	return rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
-						min_chunk_size, align);
+						0, min_chunk_size, align);
+}
+
+/* Returns -1 if object crosses a page boundary, else returns 0 */
+static int
+check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)
+{
+	if (pg_sz == 0)
+		return 0;
+	if (elt_sz > pg_sz)
+		return 0;
+	if (RTE_PTR_ALIGN(obj, pg_sz) != RTE_PTR_ALIGN(obj + elt_sz - 1, pg_sz))
+		return -1;
+	return 0;
 }
 
 int
-rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int max_objs,
-			void *vaddr, rte_iova_t iova, size_t len,
-			rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+rte_mempool_op_populate_helper(struct rte_mempool *mp, unsigned int flags,
+			unsigned int max_objs, void *vaddr, rte_iova_t iova,
+			size_t len, rte_mempool_populate_obj_cb_t *obj_cb,
+			void *obj_cb_arg)
 {
-	size_t total_elt_sz;
+	char *va = vaddr;
+	size_t total_elt_sz, pg_sz;
 	size_t off;
 	unsigned int i;
 	void *obj;
+	int ret;
+
+	ret = rte_mempool_get_page_size(mp, &pg_sz);
+	if (ret < 0)
+		return ret;
 
 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
 
-	for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
+	if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
+		off = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);
+	else
+		off = 0;
+	for (i = 0; i < max_objs; i++) {
+		/* avoid objects to cross page boundaries */
+		if (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {
+			off += RTE_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);
+			if (flags & RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ)
+				off += total_elt_sz -
+					(((uintptr_t)(va + off - 1) %
+						total_elt_sz) + 1);
+		}
+
+		if (off + total_elt_sz > len)
+			break;
+
 		off += mp->header_size;
-		obj = (char *)vaddr + off;
+		obj = va + off;
 		obj_cb(mp, obj_cb_arg, obj,
 		       (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
 		rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
@@ -100,6 +140,6 @@ rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
 				rte_mempool_populate_obj_cb_t *obj_cb,
 				void *obj_cb_arg)
 {
-	return rte_mempool_op_populate_helper(mp, max_objs, vaddr, iova,
+	return rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova,
 					len, obj_cb, obj_cb_arg);
 }
-- 
2.20.1


  parent reply	other threads:[~2019-11-04 15:14 UTC|newest]

Thread overview: 251+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-27 10:49 [PATCH] kni: add IOVA va support for kni Kiran Kumar
2018-09-27 10:58 ` Burakov, Anatoly
2018-10-02 17:05 ` Ferruh Yigit
2019-04-01 17:30   ` Jerin Jacob Kollanukkaran
2019-04-01 18:20     ` Ferruh Yigit
2019-04-01  9:51 ` [PATCH v2] " Kiran Kumar Kokkilagadda
2019-04-03 16:29   ` Ferruh Yigit
2019-04-04  5:03     ` [dpdk-dev] [EXT] " Kiran Kumar Kokkilagadda
2019-04-04 11:20       ` Ferruh Yigit
2019-04-04 13:29         ` Burakov, Anatoly
2019-04-04  9:57     ` Burakov, Anatoly
2019-04-04 11:21       ` Ferruh Yigit
2019-04-16  4:55   ` [dpdk-dev] [PATCH v3] " kirankumark
2019-04-19 10:38     ` Thomas Monjalon
2019-04-22  4:39     ` [dpdk-dev] [PATCH v4] " kirankumark
2019-04-22  6:15       ` [dpdk-dev] [PATCH v5] " kirankumark
2019-04-26  9:11         ` Burakov, Anatoly
2019-06-25  3:56         ` [dpdk-dev] [PATCH v6 0/4] add IOVA = VA support in KNI vattunuru
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 1/4] lib/mempool: skip populating mempool objs that falls on page boundaries vattunuru
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 2/4] lib/kni: add PCI related information vattunuru
2019-06-25 17:41             ` Stephen Hemminger
2019-06-26  3:48               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-06-26 14:58                 ` Stephen Hemminger
2019-06-27  9:43                   ` Vamsi Krishna Attunuru
2019-07-11 16:22             ` [dpdk-dev] " Ferruh Yigit
2019-07-12 11:02               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-12 11:11                 ` Ferruh Yigit
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 3/4] example/kni: add IOVA support for kni application vattunuru
2019-07-11 16:23             ` Ferruh Yigit
2019-06-25  3:57           ` [dpdk-dev] [PATCH v6 4/4] kernel/linux/kni: add IOVA support in kni module vattunuru
2019-07-11 16:30             ` Ferruh Yigit
2019-07-12 10:38               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-12 11:10                 ` Ferruh Yigit
2019-07-12 12:27                   ` Vamsi Krishna Attunuru
2019-07-12 16:29                   ` Vamsi Krishna Attunuru
2019-07-15 11:26                     ` Ferruh Yigit
2019-07-15 13:06                       ` Vamsi Krishna Attunuru
2019-07-11 16:43             ` [dpdk-dev] " Stephen Hemminger
2019-06-25 10:00           ` [dpdk-dev] [PATCH v6 0/4] add IOVA = VA support in KNI Burakov, Anatoly
2019-06-25 11:15             ` Jerin Jacob Kollanukkaran
2019-06-25 11:30               ` Burakov, Anatoly
2019-06-25 13:38                 ` Burakov, Anatoly
2019-06-27  9:34                   ` Jerin Jacob Kollanukkaran
2019-07-01 13:51                     ` Vamsi Krishna Attunuru
2019-07-04  6:42                       ` Vamsi Krishna Attunuru
2019-07-04  9:48                         ` Jerin Jacob Kollanukkaran
2019-07-11 16:21                           ` Ferruh Yigit
2019-07-17  9:04           ` [dpdk-dev] [PATCH v7 0/4] kni: add IOVA=VA support vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 1/4] mempool: modify mempool populate() to skip objects from page boundaries vattunuru
2019-07-17 13:36               ` Andrew Rybchenko
2019-07-17 13:47                 ` Olivier Matz
2019-07-17 17:31                 ` Vamsi Krishna Attunuru
2019-07-18  9:28                   ` Andrew Rybchenko
2019-07-18 14:16                     ` Vamsi Krishna Attunuru
2019-07-19 13:38                       ` [dpdk-dev] [RFC 0/4] mempool: avoid objects allocations across pages Olivier Matz
2019-07-19 13:38                         ` [dpdk-dev] [RFC 1/4] mempool: clarify default populate function Olivier Matz
2019-07-19 15:42                           ` Andrew Rybchenko
2019-10-08  9:36                             ` Olivier Matz
2019-07-19 13:38                         ` [dpdk-dev] [RFC 2/4] mempool: unalign size when calculating required mem amount Olivier Matz
2019-08-07 15:21                           ` [dpdk-dev] ***Spam*** " Andrew Rybchenko
2019-10-28 14:06                             ` Olivier Matz
2019-07-19 13:38                         ` [dpdk-dev] [RFC 3/4] mempool: introduce function to get mempool page size Olivier Matz
2019-08-07 15:21                           ` Andrew Rybchenko
2019-10-28 14:06                             ` Olivier Matz
2019-07-19 13:38                         ` [dpdk-dev] [RFC 4/4] mempool: prevent objects from being across pages Olivier Matz
2019-07-19 14:03                           ` Burakov, Anatoly
2019-10-28 14:07                             ` Olivier Matz
2019-07-19 14:11                           ` Burakov, Anatoly
2019-08-07 15:21                           ` Andrew Rybchenko
2019-10-28 14:07                             ` Olivier Matz
2019-10-29 11:03                               ` Andrew Rybchenko
2019-07-23  5:37                         ` [dpdk-dev] [RFC 0/4] mempool: avoid objects allocations " Vamsi Krishna Attunuru
2019-08-07 15:21                         ` [dpdk-dev] ***Spam*** " Andrew Rybchenko
2019-10-28 14:06                           ` Olivier Matz
2019-10-28 14:01                         ` [dpdk-dev] [PATCH 0/5] " Olivier Matz
2019-10-28 14:01                           ` [dpdk-dev] [PATCH 1/5] mempool: allow unaligned addr/len in populate virt Olivier Matz
2019-10-29  9:02                             ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-29  9:13                               ` Olivier Matz
2019-10-29  9:18                                 ` Vamsi Krishna Attunuru
2019-10-29  9:21                             ` [dpdk-dev] " Andrew Rybchenko
2019-10-29 17:02                               ` Olivier Matz
2019-10-28 14:01                           ` [dpdk-dev] [PATCH 2/5] mempool: reduce wasted space on mempool populate Olivier Matz
2019-10-29 10:09                             ` Andrew Rybchenko
2019-10-29 17:09                               ` Olivier Matz
2019-10-28 14:01                           ` [dpdk-dev] [PATCH 3/5] mempool: remove optimistic IOVA-contiguous allocation Olivier Matz
2019-10-29 10:25                             ` Andrew Rybchenko
2019-10-29 17:20                               ` Olivier Matz
2019-10-30  7:36                                 ` Andrew Rybchenko
2019-10-30  7:44                                   ` Andrew Rybchenko
2019-10-30 10:38                                     ` Olivier Matz
2019-10-28 14:01                           ` [dpdk-dev] [PATCH 4/5] mempool: introduce function to get mempool page size Olivier Matz
2019-10-29 10:31                             ` Andrew Rybchenko
2019-10-29 17:20                               ` Olivier Matz
2019-10-30  8:32                                 ` Olivier Matz
2019-10-30 14:29                                   ` Olivier Matz
2019-10-28 14:01                           ` [dpdk-dev] [PATCH 5/5] mempool: prevent objects from being across pages Olivier Matz
2019-10-29 10:59                             ` [dpdk-dev] ***Spam*** " Andrew Rybchenko
2019-10-29 17:34                               ` Olivier Matz
2019-10-30  7:56                                 ` [dpdk-dev] " Andrew Rybchenko
2019-10-29 17:25                             ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-30  3:55                               ` Vamsi Krishna Attunuru
2019-10-30  7:46                               ` Andrew Rybchenko
2019-10-30  8:38                                 ` Jerin Jacob
2019-10-30 14:33                                   ` Olivier Matz
2019-10-30 14:54                                     ` Jerin Jacob
2019-10-30  8:42                                 ` Olivier Matz
2019-10-30 14:36                         ` [dpdk-dev] [PATCH v2 0/6] mempool: avoid objects allocations " Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 1/6] mempool: allow unaligned addr/len in populate virt Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 2/6] mempool: reduce wasted space on mempool populate Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 3/6] mempool: remove optimistic IOVA-contiguous allocation Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 4/6] mempool: introduce function to get mempool page size Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 5/6] mempool: prevent objects from being across pages Olivier Matz
2019-10-31  6:54                             ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-31  8:19                               ` Jerin Jacob
2019-10-31  8:29                                 ` Olivier Matz
2019-10-31  8:24                               ` Olivier Matz
2019-10-31  8:33                                 ` Andrew Rybchenko
2019-10-31  8:45                                   ` Olivier Matz
2019-10-30 14:36                           ` [dpdk-dev] [PATCH v2 6/6] mempool: use the specific macro for object alignment Olivier Matz
2019-10-30 14:55                             ` Andrew Rybchenko
2019-11-01  3:56                           ` [dpdk-dev] [PATCH v2 0/6] mempool: avoid objects allocations across pages Nipun Gupta
2019-11-04 15:12                         ` [dpdk-dev] [PATCH v3 0/7] " Olivier Matz
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 1/7] mempool: allow unaligned addr/len in populate virt Olivier Matz
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 2/7] mempool: reduce wasted space on mempool populate Olivier Matz
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 3/7] mempool: remove optimistic IOVA-contiguous allocation Olivier Matz
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 4/7] mempool: introduce function to get mempool page size Olivier Matz
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 5/7] mempool: introduce helpers for populate and calc mem size Olivier Matz
2019-11-05 12:19                             ` Andrew Rybchenko
2019-11-04 15:12                           ` Olivier Matz [this message]
2019-11-05 12:22                             ` [dpdk-dev] [PATCH v3 6/7] mempool: prevent objects from being across pages Andrew Rybchenko
2019-11-04 15:12                           ` [dpdk-dev] [PATCH v3 7/7] mempool: use the specific macro for object alignment Olivier Matz
2019-11-05 12:15                             ` Andrew Rybchenko
2019-11-05 12:48                               ` Olivier Matz
2019-11-05 15:36                         ` [dpdk-dev] [PATCH v4 0/7] mempool: avoid objects allocations across pages Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 1/7] mempool: allow unaligned addr/len in populate virt Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 2/7] mempool: reduce wasted space on mempool populate Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 3/7] mempool: remove optimistic IOVA-contiguous allocation Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 4/7] mempool: introduce function to get mempool page size Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 5/7] mempool: introduce helpers for populate and calc mem size Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 6/7] mempool: prevent objects from being across pages Olivier Matz
2019-11-05 15:37                           ` [dpdk-dev] [PATCH v4 7/7] mempool: use the specific macro for object alignment Olivier Matz
2019-11-05 16:03                           ` [dpdk-dev] [PATCH v4 0/7] mempool: avoid objects allocations across pages Olivier Matz
2019-11-06 10:39                           ` Thomas Monjalon
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 2/4] kni: add IOVA = VA support in KNI lib vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 3/4] kni: add IOVA=VA support in KNI module vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 4/4] kni: modify IOVA mode checks to support VA vattunuru
2019-07-23  5:38             ` [dpdk-dev] [PATCH v8 0/5] kni: add IOVA=VA support vattunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 1/5] mempool: populate mempool with page sized chunks of memory vattunuru
2019-07-23 11:08                 ` Andrew Rybchenko
2019-07-23 12:28                   ` Vamsi Krishna Attunuru
2019-07-23 19:33                     ` Andrew Rybchenko
2019-07-24  7:09                       ` Vamsi Krishna Attunuru
2019-07-24  7:27                         ` Andrew Rybchenko
2019-07-29  6:25                           ` Vamsi Krishna Attunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 2/5] add IOVA -VA support in KNI lib vattunuru
2019-07-23 10:54                 ` Andrew Rybchenko
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 3/5] kni: add app specific mempool create & free routine vattunuru
2019-07-23 10:50                 ` Andrew Rybchenko
2019-07-23 11:01                   ` Vamsi Krishna Attunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-07-24  7:14               ` [dpdk-dev] [PATCH v8 0/5] kni: add IOVA=VA support Vamsi Krishna Attunuru
2019-07-29 12:13               ` [dpdk-dev] [PATCH v9 " vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 1/5] mempool: populate mempool with the page sized chunks of memory vattunuru
2019-07-29 12:41                   ` Andrew Rybchenko
2019-07-29 13:33                     ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-08-16  6:12                   ` [dpdk-dev] [PATCH v10 0/5] kni: add IOVA=VA support vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 1/5] mempool: populate mempool with the page sized chunks vattunuru
2019-10-08  9:26                       ` Olivier Matz
2019-10-09  5:29                         ` Vamsi Krishna Attunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 2/5] kni: add IOVA=VA support in KNI lib vattunuru
2019-10-15 15:36                       ` Yigit, Ferruh
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 3/5] kni: add app specific mempool create and free routines vattunuru
2019-10-15 15:40                       ` Yigit, Ferruh
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-10-15 15:43                       ` Yigit, Ferruh
2019-10-15 15:46                         ` Stephen Hemminger
2019-10-16 11:26                           ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-16 14:37                             ` Vamsi Krishna Attunuru
2019-10-16 16:14                             ` Ferruh Yigit
2019-10-18 17:15                               ` Vamsi Krishna Attunuru
2019-10-21 11:45                                 ` Ferruh Yigit
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-09-25  4:00                     ` [dpdk-dev] [PATCH v10 0/5] kni: add IOVA=VA support Vamsi Krishna Attunuru
2019-10-08  5:08                       ` Vamsi Krishna Attunuru
2019-10-14  4:05                         ` Vamsi Krishna Attunuru
2019-10-15 15:34                     ` Yigit, Ferruh
2019-10-16 12:17                       ` Vamsi Krishna Attunuru
2019-10-16 16:21                         ` Ferruh Yigit
2019-10-18 16:42                           ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-21  8:03                     ` [dpdk-dev] [PATCH v11 0/4] kni: add IOVA=VA mode support vattunuru
2019-10-21  8:03                       ` [dpdk-dev] [PATCH v11 1/4] mempool: populate mempool with the page sized chunks vattunuru
2019-10-21  8:03                       ` [dpdk-dev] [PATCH v11 2/4] eal: add legacy kni option vattunuru
2019-10-21 11:55                         ` Ferruh Yigit
2019-10-21 13:13                           ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-10-21 13:32                             ` Ferruh Yigit
2019-10-21 14:38                               ` Vamsi Krishna Attunuru
2019-10-22  9:29                                 ` Vamsi Krishna Attunuru
2019-10-22 12:28                                 ` Andrew Rybchenko
2019-10-22 13:31                                   ` Vamsi Krishna Attunuru
2019-10-23 10:12                                     ` Jerin Jacob
2019-10-23 14:47                                       ` Olivier Matz
2019-10-23 15:02                                         ` Jerin Jacob
2019-10-24 17:35                                           ` Olivier Matz
2019-10-24 19:30                                             ` Jerin Jacob
2019-10-25  9:20                                               ` Vamsi Krishna Attunuru
2019-10-26 12:25                                                 ` Olivier Matz
2019-10-26 14:09                                                   ` Vamsi Krishna Attunuru
2019-10-28 14:05                                                     ` Olivier Matz
2019-10-21  8:03                       ` [dpdk-dev] [PATCH v11 3/4] kni: add IOVA=VA support vattunuru
2019-10-21  8:03                       ` [dpdk-dev] [PATCH v11 4/4] kni: add IOVA=VA support in kernel module vattunuru
2019-10-21 12:02                         ` Ferruh Yigit
2019-11-05 11:04                       ` [dpdk-dev] [PATCH v12 0/2] add IOVA=VA mode support vattunuru
2019-11-05 11:04                         ` [dpdk-dev] [PATCH v12 1/2] kni: " vattunuru
2019-11-14 10:57                           ` David Marchand
2019-11-14 11:13                             ` David Marchand
2019-11-14 12:10                               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-11-14 12:25                                 ` David Marchand
2019-11-14 17:48                             ` [dpdk-dev] " David Marchand
2019-11-05 11:04                         ` [dpdk-dev] [PATCH v12 2/2] kni: add IOVA=VA support in kernel module vattunuru
2019-11-06 10:49                         ` [dpdk-dev] [PATCH v12 0/2] add IOVA=VA mode support Thomas Monjalon
2019-11-06 11:09                           ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-11-06 11:53                             ` Thomas Monjalon
2019-11-06 11:59                               ` Vamsi Krishna Attunuru
2019-11-07 10:34                             ` Vamsi Krishna Attunuru
2019-11-07 19:53                         ` [dpdk-dev] " Ferruh Yigit
2019-11-08  4:16                           ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-11-08 14:26                             ` Ferruh Yigit
2019-11-08 14:54                               ` Jerin Jacob
2019-11-13  6:33                                 ` Vamsi Krishna Attunuru
2019-11-13 12:32                                   ` Ferruh Yigit
2019-11-15 11:18                         ` [dpdk-dev] [PATCH v13 0/2] kni: support IOVA mode vattunuru
2019-11-15 11:18                           ` [dpdk-dev] [PATCH v13 1/2] kni: support IOVA mode in kernel module vattunuru
2019-11-15 11:18                           ` [dpdk-dev] [PATCH v13 2/2] kni: support IOVA mode vattunuru
2019-11-15 12:11                             ` Ferruh Yigit
2019-11-15 12:59                             ` David Marchand
2019-11-15 13:35                               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-11-15 13:40                                 ` David Marchand
2019-11-15 13:40                               ` [dpdk-dev] " Jerin Jacob
2019-11-15 14:56                                 ` David Marchand
2019-11-15 15:22                                   ` Jerin Jacob
2019-11-15 17:07                           ` [dpdk-dev] [PATCH v14 0/2] " vattunuru
2019-11-15 17:07                             ` [dpdk-dev] [PATCH v14 1/2] kni: support IOVA mode in kernel module vattunuru
2019-11-15 17:07                             ` [dpdk-dev] [PATCH v14 2/2] kni: support IOVA mode vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 2/5] kni: add IOVA=VA support in KNI lib vattunuru
2019-07-29 12:24                   ` Igor Ryzhov
2019-07-29 13:22                     ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 3/5] kni: add app specific mempool create & free routine vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-04-23  8:56       ` [dpdk-dev] [PATCH v4] kni: add IOVA va support for kni Burakov, Anatoly

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191104151254.6354-7-olivier.matz@6wind.com \
    --to=olivier.matz@6wind.com \
    --cc=anatoly.burakov@intel.com \
    --cc=arybchenko@solarflare.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@linux.intel.com \
    --cc=ggiridharan@rbbn.com \
    --cc=hemant.agrawal@nxp.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=nipun.gupta@nxp.com \
    --cc=sthemmin@microsoft.com \
    --cc=thomas@monjalon.net \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.