All of lore.kernel.org
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@nvidia.com>
Cc: Parav Pandit <parav@nvidia.com>, linux-rdma@vger.kernel.org
Subject: [PATCH rdma-next] IB/mlx5: Use ib_dma APIs instead of open access to parent device
Date: Mon, 23 Nov 2020 10:24:00 +0200	[thread overview]
Message-ID: <20201123082400.351371-1-leon@kernel.org> (raw)

From: Parav Pandit <parav@nvidia.com>

DMA operation of the IB device is done using ib_device->dma_device.
This is well abstracted using ib_dma APIs.

Hence, instead of doing open access to parent device, use IB core
provided dma mapping APIs.

Signed-off-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 drivers/infiniband/hw/mlx5/mr.c | 40 +++++++++++++--------------------
 1 file changed, 15 insertions(+), 25 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 090e204ef1e1..d24ac339c053 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,7 +42,7 @@
 #include "mlx5_ib.h"
 
 /*
- * We can't use an array for xlt_emergency_page because dma_map_single doesn't
+ * We can't use an array for xlt_emergency_page because ib_dma_map_single doesn't
  * work on kernel modules memory
  */
 void *xlt_emergency_page;
@@ -1081,7 +1081,6 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 				   unsigned int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	dma_addr_t dma;
 	void *xlt;
 
@@ -1089,8 +1088,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 				flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
 								 GFP_KERNEL);
 	sg->length = nents * ent_size;
-	dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
-	if (dma_mapping_error(ddev, dma)) {
+	dma = ib_dma_map_single(&dev->ib_dev, xlt, sg->length, DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(&dev->ib_dev, dma)) {
 		mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
 		mlx5_ib_free_xlt(xlt, sg->length);
 		return NULL;
@@ -1118,9 +1117,7 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
 				   struct ib_sge *sg)
 {
-	struct device *ddev = dev->ib_dev.dev.parent;
-
-	dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
+	ib_dma_unmap_single(&dev->ib_dev, sg->addr, sg->length, DMA_TO_DEVICE);
 	mlx5_ib_free_xlt(xlt, sg->length);
 }
 
@@ -1143,7 +1140,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 		       int page_shift, int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	void *xlt;
 	struct mlx5_umr_wr wr;
 	struct ib_sge sg;
@@ -1195,11 +1191,9 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 	     pages_mapped += pages_iter, idx += pages_iter) {
 		npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
 		size_to_map = npages * desc_size;
-		dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
-					DMA_TO_DEVICE);
+		ib_dma_sync_single_for_cpu(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 		mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
-		dma_sync_single_for_device(ddev, sg.addr, sg.length,
-					   DMA_TO_DEVICE);
+		ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 
 		sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
 
@@ -1222,7 +1216,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 {
 	struct mlx5_ib_dev *dev = mr->dev;
-	struct device *ddev = dev->ib_dev.dev.parent;
 	struct ib_block_iter biter;
 	struct mlx5_mtt *cur_mtt;
 	struct mlx5_umr_wr wr;
@@ -1247,13 +1240,13 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 	rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
 			     BIT(mr->page_shift)) {
 		if (cur_mtt == (void *)mtt + sg.length) {
-			dma_sync_single_for_device(ddev, sg.addr, sg.length,
-						   DMA_TO_DEVICE);
+			ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length,
+						      DMA_TO_DEVICE);
 			err = mlx5_ib_post_send_wait(dev, &wr);
 			if (err)
 				goto err;
-			dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
-						DMA_TO_DEVICE);
+			ib_dma_sync_single_for_cpu(&dev->ib_dev, sg.addr, sg.length,
+						   DMA_TO_DEVICE);
 			wr.offset += sg.length;
 			cur_mtt = mtt;
 		}
@@ -1270,7 +1263,7 @@ static int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
 	wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
 	wr.xlt_size = sg.length;
 
-	dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
+	ib_dma_sync_single_for_device(&dev->ib_dev, sg.addr, sg.length, DMA_TO_DEVICE);
 	err = mlx5_ib_post_send_wait(dev, &wr);
 
 err:
@@ -1763,12 +1756,10 @@ mlx5_alloc_priv_descs(struct ib_device *device,
 
 	mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
 
-	mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
-				      size, DMA_TO_DEVICE);
-	if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
-		ret = -ENOMEM;
+	mr->desc_map = ib_dma_map_single(device, mr->descs, size, DMA_TO_DEVICE);
+	ret = ib_dma_mapping_error(device, mr->desc_map);
+	if (ret)
 		goto err;
-	}
 
 	return 0;
 err:
@@ -1784,8 +1775,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
 		struct ib_device *device = mr->ibmr.device;
 		int size = mr->max_descs * mr->desc_size;
 
-		dma_unmap_single(device->dev.parent, mr->desc_map,
-				 size, DMA_TO_DEVICE);
+		ib_dma_unmap_single(device, mr->desc_map, size, DMA_TO_DEVICE);
 		kfree(mr->descs_alloc);
 		mr->descs = NULL;
 	}
-- 
2.28.0


             reply	other threads:[~2020-11-23  8:24 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-23  8:24 Leon Romanovsky [this message]
2020-11-23 13:59 ` [PATCH rdma-next] IB/mlx5: Use ib_dma APIs instead of open access to parent device Jason Gunthorpe
2020-11-24  3:34   ` Parav Pandit
2020-11-24  5:20     ` Leon Romanovsky
2020-11-24 12:58       ` Jason Gunthorpe
2020-11-24  9:31 ` Christoph Hellwig
2020-11-24  9:46   ` Leon Romanovsky
2020-11-24  9:49     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201123082400.351371-1-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@nvidia.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=parav@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.