DPDK-dev Archive on lore.kernel.org
 help / color / Atom feed
* [PATCH] kni: add IOVA va support for kni
@ 2018-09-27 10:49 Kiran Kumar
  2018-09-27 10:58 ` Burakov, Anatoly
                   ` (2 more replies)
  0 siblings, 3 replies; 103+ messages in thread
From: Kiran Kumar @ 2018-09-27 10:49 UTC (permalink / raw)
  To: ferruh.yigit
  Cc: dev, anatoly.burakov, Jacob,  Jerin, hemant.agrawal,
	jianfeng.tan, Kokkilagadda, Kiran

With current KNI implementation kernel module will work only in
IOVA=PA mode. This patch will add support for kernel module to work
with IOVA=VA mode.

The idea is to maintain a mapping in KNI module between user pages and
kernel pages and in fast path perform a lookup in this table and get
the kernel virtual address for corresponding user virtual address.

In IOVA=VA mode, the memory allocated to the pool is physically
and virtually contiguous. We will take advantage of this and create a
mapping in the kernel.In kernel we need mapping for queues
(tx_q, rx_q,... slow path) and mbuf memory (fast path).

At the KNI init time, in slow path we will create a mapping for the
queues and mbuf using get_user_pages similar to af_xdp. Using pool
memory base address, we will create a page map table for the mbuf,
which we will use in the fast path for kernel page translation.

At KNI init time, we will pass the base address of the pool and size of
the pool to kernel. In kernel, using get_user_pages API, we will get
the pages with size PAGE_SIZE and store the mapping and start address
of user space in a table.

In fast path for any user address perform PAGE_SHIFT
(user_addr >> PAGE_SHIFT) and subtract the start address from this value,
we will get the index of the kernel page with in the page map table.
Adding offset to this kernel page address, we will get the kernel address
for this user virtual address.

For example user pool base address is X, and size is S that we passed to
kernel. In kernel we will create a mapping for this using get_user_pages.
Our page map table will look like [Y, Y+PAGE_SIZE, Y+(PAGE_SIZE*2) ....]
and user start page will be U (we will get it from X >> PAGE_SHIFT).

For any user address Z we will get the index of the page map table using
((Z >> PAGE_SHIFT) - U). Adding offset (Z & (PAGE_SIZE - 1)) to this
address will give kernel virtual address.

Signed-off-by: Kiran Kumar <kkokkilagadda@caviumnetworks.com>
---
 kernel/linux/kni/kni_dev.h                    |  37 +++
 kernel/linux/kni/kni_misc.c                   | 211 +++++++++++++++++-
 kernel/linux/kni/kni_net.c                    | 112 ++++++++--
 lib/librte_eal/linuxapp/eal/eal.c             |   9 -
 .../eal/include/exec-env/rte_kni_common.h     |   8 +
 lib/librte_kni/rte_kni.c                      |  21 ++
 6 files changed, 363 insertions(+), 35 deletions(-)

diff --git a/kernel/linux/kni/kni_dev.h b/kernel/linux/kni/kni_dev.h
index 6275ef27f..6a92da497 100644
--- a/kernel/linux/kni/kni_dev.h
+++ b/kernel/linux/kni/kni_dev.h
@@ -29,10 +29,47 @@
 
 #define MBUF_BURST_SZ 32
 
+struct iova_page_info {
+	/* User to kernel page table map, used for
+	 * fast path lookup
+	 */
+	struct mbuf_page {
+		void *addr;
+	} *page_map;
+
+	/* Page mask */
+	u64 page_mask;
+
+	/* Start page for user address */
+	u64 start_page;
+
+	struct page_info {
+		/* Physical pages returned by get_user_pages */
+		struct page **pgs;
+
+		/* Number of Pages returned by get_user_pages */
+		u32 npgs;
+	} page_info;
+
+	/* Queue info */
+	struct page_info tx_q;
+	struct page_info rx_q;
+	struct page_info alloc_q;
+	struct page_info free_q;
+	struct page_info req_q;
+	struct page_info resp_q;
+	struct page_info sync_va;
+};
+
 /**
  * A structure describing the private information for a kni device.
  */
 struct kni_dev {
+	/* Page info for IOVA=VA mode */
+	struct iova_page_info va_info;
+	/* IOVA mode 0 = PA, 1 = VA */
+	uint8_t iova_mode;
+
 	/* kni list */
 	struct list_head list;
 
diff --git a/kernel/linux/kni/kni_misc.c b/kernel/linux/kni/kni_misc.c
index fa69f8e63..2627c1f69 100644
--- a/kernel/linux/kni/kni_misc.c
+++ b/kernel/linux/kni/kni_misc.c
@@ -197,6 +197,117 @@ kni_dev_remove(struct kni_dev *dev)
 	return 0;
 }
 
+static void
+kni_unpin_pages(struct page_info *mem)
+{
+	u32 i;
+
+	/* Set the user pages as dirty, so that these pages will not be
+	 * allocated to other applications until we release them.
+	 */
+	for (i = 0; i < mem->npgs; i++) {
+		struct page *page = mem->pgs[i];
+
+		set_page_dirty_lock(page);
+		put_page(page);
+	}
+
+	kfree(mem->pgs);
+	mem->pgs = NULL;
+}
+
+static void
+kni_clean_queue(struct page_info *mem)
+{
+	if (mem->pgs) {
+		set_page_dirty_lock(mem->pgs[0]);
+		put_page(mem->pgs[0]);
+		kfree(mem->pgs);
+		mem->pgs = NULL;
+	}
+}
+
+static void
+kni_cleanup_iova(struct iova_page_info *mem)
+{
+	kni_unpin_pages(&mem->page_info);
+	kfree(mem->page_map);
+	mem->page_map = NULL;
+
+	kni_clean_queue(&mem->tx_q);
+	kni_clean_queue(&mem->rx_q);
+	kni_clean_queue(&mem->alloc_q);
+	kni_clean_queue(&mem->free_q);
+	kni_clean_queue(&mem->req_q);
+	kni_clean_queue(&mem->resp_q);
+	kni_clean_queue(&mem->sync_va);
+}
+
+int
+kni_pin_pages(void *address, size_t size, struct page_info *mem)
+{
+	unsigned int gup_flags = FOLL_WRITE;
+	long npgs;
+	int err;
+
+	/* Get at least one page */
+	if (size < PAGE_SIZE)
+		size = PAGE_SIZE;
+
+	/* Compute number of user pages based on page size */
+	mem->npgs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+	/* Allocate memory for the pages */
+	mem->pgs = kcalloc(mem->npgs, sizeof(*mem->pgs),
+		      GFP_KERNEL | __GFP_NOWARN);
+	if (!mem->pgs) {
+		pr_err("%s: -ENOMEM\n", __func__);
+		return -ENOMEM;
+	}
+
+	down_write(&current->mm->mmap_sem);
+
+	/* Get the user pages from the user address*/
+	npgs = get_user_pages((u64)address, mem->npgs,
+				gup_flags, &mem->pgs[0], NULL);
+	up_write(&current->mm->mmap_sem);
+
+	/* We didn't get all the requested pages, throw error */
+	if (npgs != mem->npgs) {
+		if (npgs >= 0) {
+			mem->npgs = npgs;
+			err = -ENOMEM;
+			pr_err("%s: -ENOMEM\n", __func__);
+			goto out_pin;
+		}
+		err = npgs;
+		goto out_pgs;
+	}
+	return 0;
+
+out_pin:
+	kni_unpin_pages(mem);
+out_pgs:
+	kfree(mem->pgs);
+	mem->pgs = NULL;
+	return err;
+}
+
+static void*
+kni_map_queue(struct kni_dev *kni, u64 addr,
+			   struct page_info *mm)
+{
+	/* Map atleast 1 page */
+	if (kni_pin_pages((void *)addr, PAGE_SIZE,
+			  mm) != 0) {
+		pr_err("Unable to pin pages\n");
+		return NULL;
+	}
+
+	return (page_address(mm->pgs[0]) +
+			   (addr & kni->va_info.page_mask));
+}
+
 static int
 kni_release(struct inode *inode, struct file *file)
 {
@@ -224,6 +335,11 @@ kni_release(struct inode *inode, struct file *file)
 		}
 
 		kni_dev_remove(dev);
+
+		/* IOVA=VA mode, unpin pages */
+		if (likely(dev->iova_mode == 1))
+			kni_cleanup_iova(&dev->va_info);
+
 		list_del(&dev->list);
 	}
 	up_write(&knet->kni_list_lock);
@@ -364,18 +480,94 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
 	strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
 
 	/* Translate user space info into kernel space info */
-	kni->tx_q = phys_to_virt(dev_info.tx_phys);
-	kni->rx_q = phys_to_virt(dev_info.rx_phys);
-	kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
-	kni->free_q = phys_to_virt(dev_info.free_phys);
+	kni->iova_mode = dev_info.iova_mode;
 
-	kni->req_q = phys_to_virt(dev_info.req_phys);
-	kni->resp_q = phys_to_virt(dev_info.resp_phys);
-	kni->sync_va = dev_info.sync_va;
-	kni->sync_kva = phys_to_virt(dev_info.sync_phys);
+	if (kni->iova_mode) {
+		u64 mbuf_addr;
+		int i;
+
+		/* map userspace memory info */
+		mbuf_addr = (u64)dev_info.mbuf_va;
+
+		/* Pre compute page mask, used in fast path */
+		kni->va_info.page_mask = (u64)(PAGE_SIZE - 1);
 
+		/* Store start page address, This is the reference
+		 * for all the user virtual address
+		 */
+		kni->va_info.start_page = (mbuf_addr >> PAGE_SHIFT);
+
+		/* Get and pin the user pages */
+		if (kni_pin_pages(dev_info.mbuf_va, dev_info.mbuf_pool_size,
+			      &kni->va_info.page_info) != 0) {
+			pr_err("Unable to pin pages\n");
+			return -1;
+		}
+
+		/* Page map table between user and kernel pages */
+		kni->va_info.page_map = kcalloc(kni->va_info.page_info.npgs,
+						   sizeof(struct mbuf_page),
+						   GFP_KERNEL);
+		if (kni->va_info.page_map == NULL) {
+			pr_err("Out of memory\n");
+			return -ENOMEM;
+		}
+
+		/* Conver the user pages to kernel pages */
+		for (i = 0; i < kni->va_info.page_info.npgs; i++) {
+			kni->va_info.page_map[i].addr =
+				page_address(kni->va_info.page_info.pgs[i]);
+		}
+
+		/* map queues */
+		kni->tx_q = kni_map_queue(kni, dev_info.tx_phys,
+					  &kni->va_info.tx_q);
+		if (kni->tx_q == NULL)
+			goto iova_err;
+
+		kni->rx_q = kni_map_queue(kni, dev_info.rx_phys,
+					  &kni->va_info.rx_q);
+		if (kni->rx_q == NULL)
+			goto iova_err;
+
+		kni->alloc_q = kni_map_queue(kni, dev_info.alloc_phys,
+					     &kni->va_info.alloc_q);
+		if (kni->alloc_q == NULL)
+			goto iova_err;
+
+		kni->free_q = kni_map_queue(kni, dev_info.free_phys,
+					    &kni->va_info.free_q);
+		if (kni->free_q == NULL)
+			goto iova_err;
+
+		kni->req_q = kni_map_queue(kni, dev_info.req_phys,
+					   &kni->va_info.req_q);
+		if (kni->req_q == NULL)
+			goto iova_err;
+
+		kni->resp_q = kni_map_queue(kni, dev_info.resp_phys,
+					    &kni->va_info.resp_q);
+		if (kni->resp_q == NULL)
+			goto iova_err;
+
+		kni->sync_kva = kni_map_queue(kni, dev_info.sync_phys,
+					      &kni->va_info.sync_va);
+		if (kni->sync_kva == NULL)
+			goto iova_err;
+	} else {
+		/* Address tranlation for IOVA=PA mode */
+		kni->tx_q = phys_to_virt(dev_info.tx_phys);
+		kni->rx_q = phys_to_virt(dev_info.rx_phys);
+		kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
+		kni->free_q = phys_to_virt(dev_info.free_phys);
+		kni->req_q = phys_to_virt(dev_info.req_phys);
+		kni->resp_q = phys_to_virt(dev_info.resp_phys);
+		kni->sync_kva = phys_to_virt(dev_info.sync_phys);
+	}
+	kni->sync_va = dev_info.sync_va;
 	kni->mbuf_size = dev_info.mbuf_size;
 
+
 	pr_debug("tx_phys:      0x%016llx, tx_q addr:      0x%p\n",
 		(unsigned long long) dev_info.tx_phys, kni->tx_q);
 	pr_debug("rx_phys:      0x%016llx, rx_q addr:      0x%p\n",
@@ -475,6 +667,9 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
 	up_write(&knet->kni_list_lock);
 
 	return 0;
+iova_err:
+	kni_cleanup_iova(&kni->va_info);
+	return -1;
 }
 
 static int
diff --git a/kernel/linux/kni/kni_net.c b/kernel/linux/kni/kni_net.c
index 7fcfa106c..d6d76a32f 100644
--- a/kernel/linux/kni/kni_net.c
+++ b/kernel/linux/kni/kni_net.c
@@ -35,6 +35,25 @@ static void kni_net_rx_normal(struct kni_dev *kni);
 /* kni rx function pointer, with default to normal rx */
 static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
 
+
+/* Get the kernel address from the user address using
+ * page map table. Will be used only in IOVA=VA mode
+ */
+static inline void*
+get_kva(uint64_t usr_addr, struct kni_dev *kni)
+{
+	uint32_t index;
+	/* User page - start user page will give the index
+	 * with in the page map table
+	 */
+	index = (usr_addr >> PAGE_SHIFT) - kni->va_info.start_page;
+
+	/* Add the offset to the page address */
+	return (kni->va_info.page_map[index].addr +
+		(usr_addr & kni->va_info.page_mask));
+
+}
+
 /* physical address to kernel virtual address */
 static void *
 pa2kva(void *pa)
@@ -181,7 +200,10 @@ kni_fifo_trans_pa2va(struct kni_dev *kni,
 			return;
 
 		for (i = 0; i < num_rx; i++) {
-			kva = pa2kva(kni->pa[i]);
+			if (likely(kni->iova_mode == 1))
+				kva = get_kva((u64)(kni->pa[i]), kni);
+			else
+				kva = pa2kva(kni->pa[i]);
 			kni->va[i] = pa2va(kni->pa[i], kva);
 		}
 
@@ -258,8 +280,15 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)
 	if (likely(ret == 1)) {
 		void *data_kva;
 
-		pkt_kva = pa2kva(pkt_pa);
-		data_kva = kva2data_kva(pkt_kva);
+		if (likely(kni->iova_mode == 1)) {
+			pkt_kva = get_kva((u64)pkt_pa, kni);
+			data_kva = (uint8_t *)pkt_kva +
+				(sizeof(struct rte_kni_mbuf) +
+				 pkt_kva->data_off);
+		} else {
+			pkt_kva = pa2kva(pkt_pa);
+			data_kva = kva2data_kva(pkt_kva);
+		}
 		pkt_va = pa2va(pkt_pa, pkt_kva);
 
 		len = skb->len;
@@ -330,9 +359,15 @@ kni_net_rx_normal(struct kni_dev *kni)
 
 	/* Transfer received packets to netif */
 	for (i = 0; i < num_rx; i++) {
-		kva = pa2kva(kni->pa[i]);
+		if (likely(kni->iova_mode == 1)) {
+			kva = get_kva((u64)kni->pa[i], kni);
+			data_kva = (uint8_t *)kva +
+				(sizeof(struct rte_kni_mbuf) + kva->data_off);
+		} else {
+			kva = pa2kva(kni->pa[i]);
+			data_kva = kva2data_kva(kva);
+		}
 		len = kva->pkt_len;
-		data_kva = kva2data_kva(kva);
 		kni->va[i] = pa2va(kni->pa[i], kva);
 
 		skb = dev_alloc_skb(len + 2);
@@ -358,8 +393,17 @@ kni_net_rx_normal(struct kni_dev *kni)
 				if (!kva->next)
 					break;
 
-				kva = pa2kva(va2pa(kva->next, kva));
-				data_kva = kva2data_kva(kva);
+				if (likely(kni->iova_mode == 1)) {
+					kva = get_kva(
+						(u64)va2pa(kva->next, kva),
+						kni);
+					data_kva = (uint8_t *)kva +
+					(sizeof(struct rte_kni_mbuf) +
+					 kva->data_off);
+				} else {
+					kva = pa2kva(va2pa(kva->next, kva));
+					data_kva = kva2data_kva(kva);
+				}
 			}
 		}
 
@@ -429,14 +473,31 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)
 		num = ret;
 		/* Copy mbufs */
 		for (i = 0; i < num; i++) {
-			kva = pa2kva(kni->pa[i]);
-			len = kva->pkt_len;
-			data_kva = kva2data_kva(kva);
-			kni->va[i] = pa2va(kni->pa[i], kva);
+			if (likely(kni->iova_mode == 1)) {
+				kva = get_kva((u64)(kni->pa[i]), kni);
+				len = kva->pkt_len;
+				data_kva = (uint8_t *)kva +
+					(sizeof(struct rte_kni_mbuf) +
+					 kva->data_off);
+				kni->va[i] = pa2va(kni->pa[i], kva);
+				alloc_kva = get_kva((u64)(kni->alloc_pa[i]),
+						    kni);
+				alloc_data_kva = (uint8_t *)alloc_kva +
+					(sizeof(struct rte_kni_mbuf) +
+					 alloc_kva->data_off);
+				kni->alloc_va[i] = pa2va(kni->alloc_pa[i],
+							 alloc_kva);
+			} else {
+				kva = pa2kva(kni->pa[i]);
+				len = kva->pkt_len;
+				data_kva = kva2data_kva(kva);
+				kni->va[i] = pa2va(kni->pa[i], kva);
 
-			alloc_kva = pa2kva(kni->alloc_pa[i]);
-			alloc_data_kva = kva2data_kva(alloc_kva);
-			kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
+				alloc_kva = pa2kva(kni->alloc_pa[i]);
+				alloc_data_kva = kva2data_kva(alloc_kva);
+				kni->alloc_va[i] = pa2va(kni->alloc_pa[i],
+							 alloc_kva);
+			}
 
 			memcpy(alloc_data_kva, data_kva, len);
 			alloc_kva->pkt_len = len;
@@ -502,9 +563,15 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
 
 	/* Copy mbufs to sk buffer and then call tx interface */
 	for (i = 0; i < num; i++) {
-		kva = pa2kva(kni->pa[i]);
+		if (likely(kni->iova_mode == 1)) {
+			kva = get_kva((u64)(kni->pa[i]), kni);
+			data_kva = (uint8_t *)kva +
+				(sizeof(struct rte_kni_mbuf) + kva->data_off);
+		} else {
+			kva = pa2kva(kni->pa[i]);
+			data_kva = kva2data_kva(kva);
+		}
 		len = kva->pkt_len;
-		data_kva = kva2data_kva(kva);
 		kni->va[i] = pa2va(kni->pa[i], kva);
 
 		skb = dev_alloc_skb(len + 2);
@@ -540,8 +607,17 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
 				if (!kva->next)
 					break;
 
-				kva = pa2kva(va2pa(kva->next, kva));
-				data_kva = kva2data_kva(kva);
+				if (likely(kni->iova_mode == 1)) {
+					kva = get_kva(
+						(u64)(va2pa(kva->next, kva)),
+						kni);
+					data_kva = (uint8_t *)kva +
+					(sizeof(struct rte_kni_mbuf) +
+					 kva->data_off);
+				} else {
+					kva = pa2kva(va2pa(kva->next, kva));
+					data_kva = kva2data_kva(kva);
+				}
 			}
 		}
 
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index e59ac6577..7d5b2ebfa 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -875,15 +875,6 @@ rte_eal_init(int argc, char **argv)
 	/* autodetect the iova mapping mode (default is iova_pa) */
 	rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
 
-	/* Workaround for KNI which requires physical address to work */
-	if (rte_eal_get_configuration()->iova_mode == RTE_IOVA_VA &&
-			rte_eal_check_module("rte_kni") == 1) {
-		rte_eal_get_configuration()->iova_mode = RTE_IOVA_PA;
-		RTE_LOG(WARNING, EAL,
-			"Some devices want IOVA as VA but PA will be used because.. "
-			"KNI module inserted\n");
-	}
-
 	if (internal_config.no_hugetlbfs == 0) {
 		/* rte_config isn't initialized yet */
 		ret = internal_config.process_type == RTE_PROC_PRIMARY ?
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index cfa9448bd..706756e6c 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -122,6 +122,14 @@ struct rte_kni_device_info {
 	unsigned mbuf_size;
 	unsigned int mtu;
 	char mac_addr[6];
+
+	/* IOVA mode. 1 = VA, 0 = PA */
+	uint8_t iova_mode;
+
+	/* Pool size, will be used in kernel to map the
+	 * user pages
+	 */
+	uint64_t mbuf_pool_size;
 };
 
 #define KNI_DEVICE "kni"
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 65f6a2b03..1e79e68f1 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -397,6 +397,27 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
 	ctx->slot_id = slot->id;
 	ctx->mbuf_size = conf->mbuf_size;
 
+	dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0;
+	if (dev_info.iova_mode) {
+		struct rte_mempool_memhdr *hdr;
+		uint64_t pool_size = 0;
+
+		/* In each pool header chunk, we will maintain the
+		 * base address of the pool. This chunk is physically and
+		 * virtually contiguous.
+		 * This approach will work, only if the allocated pool
+		 * memory is contiguous, else it won't work
+		 */
+		hdr = STAILQ_FIRST(&pktmbuf_pool->mem_list);
+		dev_info.mbuf_va = (void *)(hdr->addr);
+
+		/* Traverse the list and get the total size of the pool */
+		STAILQ_FOREACH(hdr, &pktmbuf_pool->mem_list, next) {
+			pool_size += hdr->len;
+		}
+		dev_info.mbuf_pool_size = pool_size +
+			pktmbuf_pool->mz->len;
+	}
 	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
 	KNI_MEM_CHECK(ret < 0);
 
-- 
2.17.1

^ permalink raw reply	[flat|nested] 103+ messages in thread

end of thread, back to index

Thread overview: 103+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-27 10:49 [PATCH] kni: add IOVA va support for kni Kiran Kumar
2018-09-27 10:58 ` Burakov, Anatoly
2018-10-02 17:05 ` Ferruh Yigit
2019-04-01 17:30   ` Jerin Jacob Kollanukkaran
2019-04-01 18:20     ` Ferruh Yigit
2019-04-01  9:51 ` [PATCH v2] " Kiran Kumar Kokkilagadda
2019-04-03 16:29   ` Ferruh Yigit
2019-04-04  5:03     ` [dpdk-dev] [EXT] " Kiran Kumar Kokkilagadda
2019-04-04 11:20       ` Ferruh Yigit
2019-04-04 13:29         ` Burakov, Anatoly
2019-04-04  9:57     ` Burakov, Anatoly
2019-04-04 11:21       ` Ferruh Yigit
2019-04-16  4:55   ` [dpdk-dev] [PATCH v3] " kirankumark
2019-04-19 10:38     ` Thomas Monjalon
2019-04-22  4:39     ` [dpdk-dev] [PATCH v4] " kirankumark
2019-04-22  6:15       ` [dpdk-dev] [PATCH v5] " kirankumark
2019-04-26  9:11         ` Burakov, Anatoly
2019-06-25  3:56         ` [dpdk-dev] [PATCH v6 0/4] add IOVA = VA support in KNI vattunuru
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 1/4] lib/mempool: skip populating mempool objs that falls on page boundaries vattunuru
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 2/4] lib/kni: add PCI related information vattunuru
2019-06-25 17:41             ` Stephen Hemminger
2019-06-26  3:48               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-06-26 14:58                 ` Stephen Hemminger
2019-06-27  9:43                   ` Vamsi Krishna Attunuru
2019-07-11 16:22             ` [dpdk-dev] " Ferruh Yigit
2019-07-12 11:02               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-12 11:11                 ` Ferruh Yigit
2019-06-25  3:56           ` [dpdk-dev] [PATCH v6 3/4] example/kni: add IOVA support for kni application vattunuru
2019-07-11 16:23             ` Ferruh Yigit
2019-06-25  3:57           ` [dpdk-dev] [PATCH v6 4/4] kernel/linux/kni: add IOVA support in kni module vattunuru
2019-07-11 16:30             ` Ferruh Yigit
2019-07-12 10:38               ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-12 11:10                 ` Ferruh Yigit
2019-07-12 12:27                   ` Vamsi Krishna Attunuru
2019-07-12 16:29                   ` Vamsi Krishna Attunuru
2019-07-15 11:26                     ` Ferruh Yigit
2019-07-15 13:06                       ` Vamsi Krishna Attunuru
2019-07-11 16:43             ` [dpdk-dev] " Stephen Hemminger
2019-06-25 10:00           ` [dpdk-dev] [PATCH v6 0/4] add IOVA = VA support in KNI Burakov, Anatoly
2019-06-25 11:15             ` Jerin Jacob Kollanukkaran
2019-06-25 11:30               ` Burakov, Anatoly
2019-06-25 13:38                 ` Burakov, Anatoly
2019-06-27  9:34                   ` Jerin Jacob Kollanukkaran
2019-07-01 13:51                     ` Vamsi Krishna Attunuru
2019-07-04  6:42                       ` Vamsi Krishna Attunuru
2019-07-04  9:48                         ` Jerin Jacob Kollanukkaran
2019-07-11 16:21                           ` Ferruh Yigit
2019-07-17  9:04           ` [dpdk-dev] [PATCH v7 0/4] kni: add IOVA=VA support vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 1/4] mempool: modify mempool populate() to skip objects from page boundaries vattunuru
2019-07-17 13:36               ` Andrew Rybchenko
2019-07-17 13:47                 ` Olivier Matz
2019-07-17 17:31                 ` Vamsi Krishna Attunuru
2019-07-18  9:28                   ` Andrew Rybchenko
2019-07-18 14:16                     ` Vamsi Krishna Attunuru
2019-07-19 13:38                       ` [dpdk-dev] [RFC 0/4] mempool: avoid objects allocations across pages Olivier Matz
2019-07-19 13:38                         ` [dpdk-dev] [RFC 1/4] mempool: clarify default populate function Olivier Matz
2019-07-19 15:42                           ` Andrew Rybchenko
2019-07-19 13:38                         ` [dpdk-dev] [RFC 2/4] mempool: unalign size when calculating required mem amount Olivier Matz
2019-08-07 15:21                           ` [dpdk-dev] ***Spam*** " Andrew Rybchenko
2019-07-19 13:38                         ` [dpdk-dev] [RFC 3/4] mempool: introduce function to get mempool page size Olivier Matz
2019-08-07 15:21                           ` Andrew Rybchenko
2019-07-19 13:38                         ` [dpdk-dev] [RFC 4/4] mempool: prevent objects from being across pages Olivier Matz
2019-07-19 14:03                           ` Burakov, Anatoly
2019-07-19 14:11                           ` Burakov, Anatoly
2019-08-07 15:21                           ` Andrew Rybchenko
2019-07-23  5:37                         ` [dpdk-dev] [RFC 0/4] mempool: avoid objects allocations " Vamsi Krishna Attunuru
2019-08-07 15:21                         ` [dpdk-dev] ***Spam*** " Andrew Rybchenko
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 2/4] kni: add IOVA = VA support in KNI lib vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 3/4] kni: add IOVA=VA support in KNI module vattunuru
2019-07-17  9:04             ` [dpdk-dev] [PATCH v7 4/4] kni: modify IOVA mode checks to support VA vattunuru
2019-07-23  5:38             ` [dpdk-dev] [PATCH v8 0/5] kni: add IOVA=VA support vattunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 1/5] mempool: populate mempool with page sized chunks of memory vattunuru
2019-07-23 11:08                 ` Andrew Rybchenko
2019-07-23 12:28                   ` Vamsi Krishna Attunuru
2019-07-23 19:33                     ` Andrew Rybchenko
2019-07-24  7:09                       ` Vamsi Krishna Attunuru
2019-07-24  7:27                         ` Andrew Rybchenko
2019-07-29  6:25                           ` Vamsi Krishna Attunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 2/5] add IOVA -VA support in KNI lib vattunuru
2019-07-23 10:54                 ` Andrew Rybchenko
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 3/5] kni: add app specific mempool create & free routine vattunuru
2019-07-23 10:50                 ` Andrew Rybchenko
2019-07-23 11:01                   ` Vamsi Krishna Attunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-07-23  5:38               ` [dpdk-dev] [PATCH v8 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-07-24  7:14               ` [dpdk-dev] [PATCH v8 0/5] kni: add IOVA=VA support Vamsi Krishna Attunuru
2019-07-29 12:13               ` [dpdk-dev] [PATCH v9 " vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 1/5] mempool: populate mempool with the page sized chunks of memory vattunuru
2019-07-29 12:41                   ` Andrew Rybchenko
2019-07-29 13:33                     ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-08-16  6:12                   ` [dpdk-dev] [PATCH v10 0/5] kni: add IOVA=VA support vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 1/5] mempool: populate mempool with the page sized chunks vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 2/5] kni: add IOVA=VA support in KNI lib vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 3/5] kni: add app specific mempool create and free routines vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-08-16  6:12                     ` [dpdk-dev] [PATCH v10 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 2/5] kni: add IOVA=VA support in KNI lib vattunuru
2019-07-29 12:24                   ` Igor Ryzhov
2019-07-29 13:22                     ` [dpdk-dev] [EXT] " Vamsi Krishna Attunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 3/5] kni: add app specific mempool create & free routine vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 4/5] kni: add IOVA=VA support in KNI module vattunuru
2019-07-29 12:13                 ` [dpdk-dev] [PATCH v9 5/5] kni: modify IOVA mode checks to support VA vattunuru
2019-04-23  8:56       ` [dpdk-dev] [PATCH v4] kni: add IOVA va support for kni Burakov, Anatoly

DPDK-dev Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/dpdk-dev/0 dpdk-dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dpdk-dev dpdk-dev/ https://lore.kernel.org/dpdk-dev \
		dev@dpdk.org dpdk-dev@archiver.kernel.org
	public-inbox-index dpdk-dev


Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/ public-inbox