All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chuck Lever <cel@kernel.org>
To: unlisted-recipients:; (no To-header on input)
Cc: iommu@lists.linux.dev, linux-rdma@vger.kernel.org,
	Chuck Lever <chuck.lever@oracle.com>
Subject: [PATCH RFC 3/9] dma-debug: Add dma_debug_ helpers for mapping bio_vec arrays
Date: Thu, 19 Oct 2023 11:25:52 -0400	[thread overview]
Message-ID: <169772915215.5232.10127407258544978465.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <169772852492.5232.17148564580779995849.stgit@klimt.1015granger.net>

From: Chuck Lever <chuck.lever@oracle.com>

Cc: iommu@lists.linux.dev
Cc: linux-rdma@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/dma-mapping.h |    1 
 kernel/dma/debug.c          |  163 +++++++++++++++++++++++++++++++++++++++++++
 kernel/dma/debug.h          |   38 ++++++++++
 3 files changed, 202 insertions(+)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f0ccca16a0ac..f511ec546f4d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,6 +9,7 @@
 #include <linux/err.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
+#include <linux/bvec.h>
 #include <linux/bug.h>
 #include <linux/mem_encrypt.h>
 
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 3de494375b7b..efb4a2eaf9a0 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -39,6 +39,7 @@ enum {
 	dma_debug_sg,
 	dma_debug_coherent,
 	dma_debug_resource,
+	dma_debug_bv,
 };
 
 enum map_err_types {
@@ -142,6 +143,7 @@ static const char *type2name[] = {
 	[dma_debug_sg] = "scatter-gather",
 	[dma_debug_coherent] = "coherent",
 	[dma_debug_resource] = "resource",
+	[dma_debug_bv] = "bio-vec",
 };
 
 static const char *dir2name[] = {
@@ -1189,6 +1191,32 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
 #endif
 }
 
+static void check_bv_segment(struct device *dev, struct bio_vec *bv)
+{
+#ifdef CONFIG_DMA_API_DEBUG_SG
+	unsigned int max_seg = dma_get_max_seg_size(dev);
+	u64 start, end, boundary = dma_get_seg_boundary(dev);
+
+	/*
+	 * Either the driver forgot to set dma_parms appropriately, or
+	 * whoever generated the list forgot to check them.
+	 */
+	if (bv->length > max_seg)
+		err_printk(dev, NULL, "mapping bv entry longer than device claims to support [len=%u] [max=%u]\n",
+			   bv->length, max_seg);
+	/*
+	 * In some cases this could potentially be the DMA API
+	 * implementation's fault, but it would usually imply that
+	 * the scatterlist was built inappropriately to begin with.
+	 */
+	start = bv_dma_address(bv);
+	end = start + bv_dma_len(bv) - 1;
+	if ((start ^ end) & ~boundary)
+		err_printk(dev, NULL, "mapping bv entry across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
+			   start, end, boundary);
+#endif
+}
+
 void debug_dma_map_single(struct device *dev, const void *addr,
 			    unsigned long len)
 {
@@ -1333,6 +1361,47 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	}
 }
 
+void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+			 int nents, int mapped_ents, int direction,
+			 unsigned long attrs)
+{
+	struct dma_debug_entry *entry;
+	struct bio_vec *bv;
+	int i;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+		check_for_stack(dev, bv_page(bv), bv->offset);
+		if (!PageHighMem(bv_page(bv)))
+			check_for_illegal_area(dev, bv_virt(bv), bv->length);
+	}
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+
+		entry = dma_entry_alloc();
+		if (!entry)
+			return;
+
+		entry->type           = dma_debug_bv;
+		entry->dev            = dev;
+		entry->pfn	      = page_to_pfn(bv_page(bv));
+		entry->offset	      = bv->offset;
+		entry->size           = bv_dma_len(bv);
+		entry->dev_addr       = bv_dma_address(bv);
+		entry->direction      = direction;
+		entry->sg_call_ents   = nents;
+		entry->sg_mapped_ents = mapped_ents;
+
+		check_bv_segment(dev, bv);
+
+		add_dma_entry(entry, attrs);
+	}
+}
+
 static int get_nr_mapped_entries(struct device *dev,
 				 struct dma_debug_entry *ref)
 {
@@ -1384,6 +1453,37 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 	}
 }
 
+void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+			   int nelems, int dir)
+{
+	int mapped_ents = 0, i;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv_page(bv)),
+			.offset		= bv->offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = dir,
+			.sg_call_ents   = nelems,
+		};
+
+		if (mapped_ents && i >= mapped_ents)
+			break;
+
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		check_unmap(&ref);
+	}
+}
+
 void debug_dma_alloc_coherent(struct device *dev, size_t size,
 			      dma_addr_t dma_addr, void *virt,
 			      unsigned long attrs)
@@ -1588,6 +1688,69 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 	}
 }
 
+void debug_dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+				  int nelems, int direction)
+{
+	int mapped_ents = 0, i;
+	struct bio_vec *bv;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv->bv_page),
+			.offset		= bv->bv_offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = direction,
+			.sg_call_ents   = nelems,
+		};
+
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		if (i >= mapped_ents)
+			break;
+
+		check_sync(dev, &ref, true);
+	}
+}
+
+void debug_dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+				     int nelems, int direction)
+{
+	int mapped_ents = 0, i;
+	struct bio_vec *bv;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv->bv_page),
+			.offset		= bv->bv_offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = direction,
+			.sg_call_ents   = nelems,
+		};
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		if (i >= mapped_ents)
+			break;
+
+		check_sync(dev, &ref, false);
+	}
+}
+
 static int __init dma_debug_driver_setup(char *str)
 {
 	int i;
diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
index f525197d3cae..dff7e8a2f594 100644
--- a/kernel/dma/debug.h
+++ b/kernel/dma/debug.h
@@ -24,6 +24,13 @@ extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 			       int nelems, int dir);
 
+extern void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+				int nents, int mapped_ents, int direction,
+				unsigned long attrs);
+
+extern void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+				  int nelems, int dir);
+
 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
 				     dma_addr_t dma_addr, void *virt,
 				     unsigned long attrs);
@@ -54,6 +61,14 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
 extern void debug_dma_sync_sg_for_device(struct device *dev,
 					 struct scatterlist *sg,
 					 int nelems, int direction);
+
+extern void debug_dma_sync_bvecs_for_cpu(struct device *dev,
+					 struct bio_vec *bvecs,
+					 int nelems, int direction);
+
+extern void debug_dma_sync_bvecs_for_device(struct device *dev,
+					    struct bio_vec *bvecs,
+					    int nelems, int direction);
 #else /* CONFIG_DMA_API_DEBUG */
 static inline void debug_dma_map_page(struct device *dev, struct page *page,
 				      size_t offset, size_t size,
@@ -79,6 +94,17 @@ static inline void debug_dma_unmap_sg(struct device *dev,
 {
 }
 
+static inline void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+				       int nents, int mapped_ents, int direction,
+				       unsigned long attrs)
+{
+}
+
+static inline void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+					 int nelems, int dir)
+{
+}
+
 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
 					    dma_addr_t dma_addr, void *virt,
 					    unsigned long attrs)
@@ -126,5 +152,17 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
 						int nelems, int direction)
 {
 }
+
+static inline void debug_dma_sync_bvecs_for_cpu(struct device *dev,
+						struct bio_vec *bvecs,
+						int nelems, int direction)
+{
+}
+
+static inline void debug_dma_sync_bvecs_for_device(struct device *dev,
+						   struct bio_vec *bvecs,
+						   int nelems, int direction)
+{
+}
 #endif /* CONFIG_DMA_API_DEBUG */
 #endif /* _KERNEL_DMA_DEBUG_H */



WARNING: multiple messages have this Message-ID (diff)
From: Chuck Lever <cel@kernel.org>
Cc: iommu@lists.linux.dev, linux-rdma@vger.kernel.org,
	Chuck Lever <chuck.lever@oracle.com>
Subject: [PATCH RFC 3/9] dma-debug: Add dma_debug_ helpers for mapping bio_vec arrays
Date: Thu, 19 Oct 2023 11:25:52 -0400	[thread overview]
Message-ID: <169772915215.5232.10127407258544978465.stgit@klimt.1015granger.net> (raw)
In-Reply-To: <169772852492.5232.17148564580779995849.stgit@klimt.1015granger.net>

From: Chuck Lever <chuck.lever@oracle.com>

Cc: iommu@lists.linux.dev
Cc: linux-rdma@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 include/linux/dma-mapping.h |    1 
 kernel/dma/debug.c          |  163 +++++++++++++++++++++++++++++++++++++++++++
 kernel/dma/debug.h          |   38 ++++++++++
 3 files changed, 202 insertions(+)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f0ccca16a0ac..f511ec546f4d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,6 +9,7 @@
 #include <linux/err.h>
 #include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
+#include <linux/bvec.h>
 #include <linux/bug.h>
 #include <linux/mem_encrypt.h>
 
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 3de494375b7b..efb4a2eaf9a0 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -39,6 +39,7 @@ enum {
 	dma_debug_sg,
 	dma_debug_coherent,
 	dma_debug_resource,
+	dma_debug_bv,
 };
 
 enum map_err_types {
@@ -142,6 +143,7 @@ static const char *type2name[] = {
 	[dma_debug_sg] = "scatter-gather",
 	[dma_debug_coherent] = "coherent",
 	[dma_debug_resource] = "resource",
+	[dma_debug_bv] = "bio-vec",
 };
 
 static const char *dir2name[] = {
@@ -1189,6 +1191,32 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg)
 #endif
 }
 
+static void check_bv_segment(struct device *dev, struct bio_vec *bv)
+{
+#ifdef CONFIG_DMA_API_DEBUG_SG
+	unsigned int max_seg = dma_get_max_seg_size(dev);
+	u64 start, end, boundary = dma_get_seg_boundary(dev);
+
+	/*
+	 * Either the driver forgot to set dma_parms appropriately, or
+	 * whoever generated the list forgot to check them.
+	 */
+	if (bv->length > max_seg)
+		err_printk(dev, NULL, "mapping bv entry longer than device claims to support [len=%u] [max=%u]\n",
+			   bv->length, max_seg);
+	/*
+	 * In some cases this could potentially be the DMA API
+	 * implementation's fault, but it would usually imply that
+	 * the scatterlist was built inappropriately to begin with.
+	 */
+	start = bv_dma_address(bv);
+	end = start + bv_dma_len(bv) - 1;
+	if ((start ^ end) & ~boundary)
+		err_printk(dev, NULL, "mapping bv entry across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
+			   start, end, boundary);
+#endif
+}
+
 void debug_dma_map_single(struct device *dev, const void *addr,
 			    unsigned long len)
 {
@@ -1333,6 +1361,47 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	}
 }
 
+void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+			 int nents, int mapped_ents, int direction,
+			 unsigned long attrs)
+{
+	struct dma_debug_entry *entry;
+	struct bio_vec *bv;
+	int i;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+		check_for_stack(dev, bv_page(bv), bv->offset);
+		if (!PageHighMem(bv_page(bv)))
+			check_for_illegal_area(dev, bv_virt(bv), bv->length);
+	}
+
+	for (i = 0; i < nents; i++) {
+		bv = &bvecs[i];
+
+		entry = dma_entry_alloc();
+		if (!entry)
+			return;
+
+		entry->type           = dma_debug_bv;
+		entry->dev            = dev;
+		entry->pfn	      = page_to_pfn(bv_page(bv));
+		entry->offset	      = bv->offset;
+		entry->size           = bv_dma_len(bv);
+		entry->dev_addr       = bv_dma_address(bv);
+		entry->direction      = direction;
+		entry->sg_call_ents   = nents;
+		entry->sg_mapped_ents = mapped_ents;
+
+		check_bv_segment(dev, bv);
+
+		add_dma_entry(entry, attrs);
+	}
+}
+
 static int get_nr_mapped_entries(struct device *dev,
 				 struct dma_debug_entry *ref)
 {
@@ -1384,6 +1453,37 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 	}
 }
 
+void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+			   int nelems, int dir)
+{
+	int mapped_ents = 0, i;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv_page(bv)),
+			.offset		= bv->offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = dir,
+			.sg_call_ents   = nelems,
+		};
+
+		if (mapped_ents && i >= mapped_ents)
+			break;
+
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		check_unmap(&ref);
+	}
+}
+
 void debug_dma_alloc_coherent(struct device *dev, size_t size,
 			      dma_addr_t dma_addr, void *virt,
 			      unsigned long attrs)
@@ -1588,6 +1688,69 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 	}
 }
 
+void debug_dma_sync_bvecs_for_cpu(struct device *dev, struct bio_vec *bvecs,
+				  int nelems, int direction)
+{
+	int mapped_ents = 0, i;
+	struct bio_vec *bv;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv->bv_page),
+			.offset		= bv->bv_offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = direction,
+			.sg_call_ents   = nelems,
+		};
+
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		if (i >= mapped_ents)
+			break;
+
+		check_sync(dev, &ref, true);
+	}
+}
+
+void debug_dma_sync_bvecs_for_device(struct device *dev, struct bio_vec *bvecs,
+				     int nelems, int direction)
+{
+	int mapped_ents = 0, i;
+	struct bio_vec *bv;
+
+	if (unlikely(dma_debug_disabled()))
+		return;
+
+	for (i = 0; i < nents; i++) {
+		struct bio_vec *bv = &bvecs[i];
+		struct dma_debug_entry ref = {
+			.type           = dma_debug_bv,
+			.dev            = dev,
+			.pfn		= page_to_pfn(bv->bv_page),
+			.offset		= bv->bv_offset,
+			.dev_addr       = bv_dma_address(bv),
+			.size           = bv_dma_len(bv),
+			.direction      = direction,
+			.sg_call_ents   = nelems,
+		};
+		if (!i)
+			mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+		if (i >= mapped_ents)
+			break;
+
+		check_sync(dev, &ref, false);
+	}
+}
+
 static int __init dma_debug_driver_setup(char *str)
 {
 	int i;
diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
index f525197d3cae..dff7e8a2f594 100644
--- a/kernel/dma/debug.h
+++ b/kernel/dma/debug.h
@@ -24,6 +24,13 @@ extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 			       int nelems, int dir);
 
+extern void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+				int nents, int mapped_ents, int direction,
+				unsigned long attrs);
+
+extern void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+				  int nelems, int dir);
+
 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
 				     dma_addr_t dma_addr, void *virt,
 				     unsigned long attrs);
@@ -54,6 +61,14 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
 extern void debug_dma_sync_sg_for_device(struct device *dev,
 					 struct scatterlist *sg,
 					 int nelems, int direction);
+
+extern void debug_dma_sync_bvecs_for_cpu(struct device *dev,
+					 struct bio_vec *bvecs,
+					 int nelems, int direction);
+
+extern void debug_dma_sync_bvecs_for_device(struct device *dev,
+					    struct bio_vec *bvecs,
+					    int nelems, int direction);
 #else /* CONFIG_DMA_API_DEBUG */
 static inline void debug_dma_map_page(struct device *dev, struct page *page,
 				      size_t offset, size_t size,
@@ -79,6 +94,17 @@ static inline void debug_dma_unmap_sg(struct device *dev,
 {
 }
 
+static inline void debug_dma_map_bvecs(struct device *dev, struct bio_vec *bvecs,
+				       int nents, int mapped_ents, int direction,
+				       unsigned long attrs)
+{
+}
+
+static inline void debug_dma_unmap_bvecs(struct device *dev, struct bio_vec *bvecs,
+					 int nelems, int dir)
+{
+}
+
 static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
 					    dma_addr_t dma_addr, void *virt,
 					    unsigned long attrs)
@@ -126,5 +152,17 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
 						int nelems, int direction)
 {
 }
+
+static inline void debug_dma_sync_bvecs_for_cpu(struct device *dev,
+						struct bio_vec *bvecs,
+						int nelems, int direction)
+{
+}
+
+static inline void debug_dma_sync_bvecs_for_device(struct device *dev,
+						   struct bio_vec *bvecs,
+						   int nelems, int direction)
+{
+}
 #endif /* CONFIG_DMA_API_DEBUG */
 #endif /* _KERNEL_DMA_DEBUG_H */



  parent reply	other threads:[~2023-10-19 15:25 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-19 15:25 [PATCH RFC 0/9] Exploring biovec support in (R)DMA API Chuck Lever
2023-10-19 15:25 ` Chuck Lever
2023-10-19 15:25 ` [PATCH RFC 1/9] dma-debug: Fix a typo in a debugging eye-catcher Chuck Lever
2023-10-20  4:49   ` Christoph Hellwig
2023-10-20 13:38     ` Chuck Lever III
2023-10-23  5:56       ` Christoph Hellwig
2023-10-19 15:25 ` [PATCH RFC 2/9] bvec: Add bio_vec fields to manage DMA mapping Chuck Lever
2023-10-19 15:25   ` Chuck Lever
2023-10-19 15:25 ` Chuck Lever [this message]
2023-10-19 15:25   ` [PATCH RFC 3/9] dma-debug: Add dma_debug_ helpers for mapping bio_vec arrays Chuck Lever
2023-10-19 21:38   ` kernel test robot
2023-10-19 23:21     ` Chuck Lever III
2023-10-23  2:43       ` Liu, Yujie
2023-10-23 14:27         ` Chuck Lever III
2023-10-19 21:49   ` kernel test robot
2023-10-19 15:25 ` [PATCH RFC 4/9] mm: kmsan: Add support for DMA " Chuck Lever
2023-10-19 15:25   ` Chuck Lever
2023-10-19 15:26 ` [PATCH RFC 5/9] dma-direct: Support direct " Chuck Lever
2023-10-19 15:26   ` Chuck Lever
2023-10-19 15:26 ` [PATCH RFC 6/9] DMA-API: Add dma_sync_bvecs_for_cpu() and dma_sync_bvecs_for_device() Chuck Lever
2023-10-19 15:26   ` Chuck Lever
2023-10-19 15:26 ` [PATCH RFC 7/9] DMA: Add dma_map_bvecs_attrs() Chuck Lever
2023-10-19 15:26   ` Chuck Lever
2023-10-19 22:10   ` kernel test robot
2023-10-19 15:26 ` [PATCH RFC 8/9] iommu/dma: Support DMA-mapping a bio_vec array Chuck Lever
2023-10-19 15:26   ` Chuck Lever
2023-10-19 15:26 ` [PATCH RFC 9/9] RDMA: Add helpers for DMA-mapping an array of bio_vecs Chuck Lever
2023-10-19 15:26   ` Chuck Lever
2023-10-19 15:53 ` [PATCH RFC 0/9] Exploring biovec support in (R)DMA API Matthew Wilcox
2023-10-19 17:48   ` Chuck Lever
2023-10-20  4:58   ` Christoph Hellwig
2023-10-20 10:30     ` Robin Murphy
2023-10-23  5:59       ` Christoph Hellwig
2023-10-19 16:43 ` Robin Murphy
2023-10-19 17:53   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=169772915215.5232.10127407258544978465.stgit@klimt.1015granger.net \
    --to=cel@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=iommu@lists.linux.dev \
    --cc=linux-rdma@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.