All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH rdma-next] IB/mlx5: Use ib_dma APIs instead of open access to parent device
@ 2020-11-23  8:24 Leon Romanovsky
  2020-11-23 13:59 ` Jason Gunthorpe
  2020-11-24  9:31 ` Christoph Hellwig
  0 siblings, 2 replies; 8+ messages in thread
From: Leon Romanovsky @ 2020-11-23  8:24 UTC (permalink / raw)
  To: Doug Ledford, Jason Gunthorpe; +Cc: Parav Pandit, linux-rdma

From: Parav Pandit <parav@nvidia.com>

DMA operation of the IB device is done using ib_device->dma_device.
This is well abstracted using ib_dma APIs.

Hence, instead of doing open access to parent device, use IB core
provided dma mapping APIs.

Signed-off-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/hw/mlx5/mr.c | 40 +++++++++++++--------------------
 1 file changed, 15 insertions(+), 25 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 090e204ef1e1..d24ac339c053 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,7 +42,7 @@
 #include "mlx5_ib.h"
 
 /*
- * We can't use an array for xlt_emergency_page because dma_map_single doesn't
+ * We can't use an array for xlt_emergency_page because ib_dma_map_single doesn't
  * work on kernel modules memory
  */
 void *xlt_emergency_page;
@@ -1081,7 +1081,6 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 				   unsigned int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	dma_addr_t dma;
 	void *xlt;
 
@@ -1089,8 +1088,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 				flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
 								 GFP_KERNEL);
 	sg->length = nents * ent_size;
-	dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
-	if (dma_mapping_error(ddev, dma)) {
+	dma = ib_dma_map_single(&dev->ib_dev, xlt, sg->length, DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(&dev->ib_dev, dma)) {
 		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
 		mlx5_ib_free_xlt(xlt, sg->length);
 		return NULL;
@@ -1118,9 +1117,7 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
 				   struct ib_sge *sg)
 {
-	struct device *ddev = dev->ib_dev.dev.parent;
-
-	dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
+	ib_dma_unmap_single(&dev->ib_dev, sg->addr, sg->length, DMA_TO_DEVICE);
 	mlx5_ib_free_xlt(xlt, sg->length);
 }
 
@@ -1143,7 +1140,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 		       int page_shift, int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	void *xlt;
 	struct mlx5_umr_wr wr;
 	struct ib_sge sg;
@@ -1195,11 +1191,9 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 	     pages_mapped += pages_iter, idx += pages_iter) {
 		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
 		size_to_map = npages * desc_size;
-		dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
-					DMA_TO_DEVICE);
+		ib_dma_sync_single_for_cpu(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 		mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
-		dma_sync_single_for_device(ddev, sg.addr, sg.length,
-					   DMA_TO_DEVICE);
+		ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 
 		sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
 
@@ -1222,7 +1216,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	struct ib_block_iter biter;
 	struct mlx5_mtt *cur_mtt;
 	struct mlx5_umr_wr wr;
@@ -1247,13 +1240,13 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 	rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
 			     BIT(mr->page_shift)) {
 		if (cur_mtt == (void *)mtt + sg.length) {
-			dma_sync_single_for_device(ddev, sg.addr, sg.length,
-						   DMA_TO_DEVICE);
+			ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length,
+						      DMA_TO_DEVICE);
 			err = mlx5_ib_post_send_wait(dev, &wr);
 			if (err)
 				goto err;
-			dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
-						DMA_TO_DEVICE);
+			ib_dma_sync_single_for_cpu(&dev->ib_dev, sg.addr, sg.length,
+						   DMA_TO_DEVICE);
 			wr.offset += sg.length;
 			cur_mtt = mtt;
 		}
@@ -1270,7 +1263,7 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 	wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
 	wr.xlt_size = sg.length;
 
-	dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 	err = mlx5_ib_post_send_wait(dev, &wr);
 
 err:
@@ -1763,12 +1756,10 @@ mlx5_alloc_priv_descs(struct ib_device *device,
 
 	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
 
-	mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
-				      size, DMA_TO_DEVICE);
-	if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
-		ret = -ENOMEM;
+	mr->desc_map = ib_dma_map_single(device, mr->descs, size, DMA_TO_DEVICE);
+	ret = ib_dma_mapping_error(device, mr->desc_map);
+	if (ret)
 		goto err;
-	}
 
 	return 0;
 err:
@@ -1784,8 +1775,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
 		struct ib_device *device = mr->ibmr.device;
 		int size = mr->max_descs * mr->desc_size;
 
-		dma_unmap_single(device->dev.parent, mr->desc_map,
-				 size, DMA_TO_DEVICE);
+		ib_dma_unmap_single(device, mr->desc_map, size, DMA_TO_DEVICE);
 		kfree(mr->descs_alloc);
 		mr->descs = NULL;
 	}
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-11-24 12:58 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-23  8:24 [PATCH rdma-next] IB/mlx5: Use ib_dma APIs instead of open access to parent device Leon Romanovsky
2020-11-23 13:59 ` Jason Gunthorpe
2020-11-24  3:34   ` Parav Pandit
2020-11-24  5:20     ` Leon Romanovsky
2020-11-24 12:58       ` Jason Gunthorpe
2020-11-24  9:31 ` Christoph Hellwig
2020-11-24  9:46   ` Leon Romanovsky
2020-11-24  9:49     ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.