All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
To: Devesh Sharma <devesh.sharma@broadcom.com>,
	Doug Ledford <dledford@redhat.com>,
	Faisal Latif <faisal.latif@intel.com>,
	Gal Pressman <galpress@amazon.com>,
	"Wei Hu(Xavier)" <huwei87@hisilicon.com>,
	<linux-rdma@vger.kernel.org>, Weihang Li <liweihang@huawei.com>,
	"Naresh Kumar PBS" <nareshkumar.pbs@broadcom.com>,
	Lijun Ou <oulijun@huawei.com>,
	Selvin Xavier <selvin.xavier@broadcom.com>,
	Yossi Leybovich <sleybo@amazon.com>,
	Somnath Kotur <somnath.kotur@broadcom.com>,
	"Sriharsha Basavapatna" <sriharsha.basavapatna@broadcom.com>
Cc: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>,
	Shiraz Saleem <shiraz.saleem@intel.com>
Subject: [PATCH v2 04/17] RDMA/umem: Add rdma_umem_for_each_dma_block()
Date: Fri, 4 Sep 2020 19:41:45 -0300	[thread overview]
Message-ID: <4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com> (raw)
In-Reply-To: <0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com>

This helper does the same as rdma_for_each_block(), except it works on a
umem. This simplifies most of the call sites.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
---
 .clang-format                              |  1 +
 drivers/infiniband/hw/bnxt_re/ib_verbs.c   |  2 +-
 drivers/infiniband/hw/efa/efa_verbs.c      |  3 +--
 drivers/infiniband/hw/hns/hns_roce_alloc.c |  3 +--
 drivers/infiniband/hw/i40iw/i40iw_verbs.c  |  3 +--
 include/rdma/ib_umem.h                     | 20 ++++++++++++++++++++
 6 files changed, 25 insertions(+), 7 deletions(-)

diff --git a/.clang-format b/.clang-format
index a0a96088c74f49..311ef2c61a1bdf 100644
--- a/.clang-format
+++ b/.clang-format
@@ -415,6 +415,7 @@ ForEachMacros:
   - 'rbtree_postorder_for_each_entry_safe'
   - 'rdma_for_each_block'
   - 'rdma_for_each_port'
+  - 'rdma_umem_for_each_dma_block'
   - 'resource_list_for_each_entry'
   - 'resource_list_for_each_entry_safe'
   - 'rhl_for_each_entry_rcu'
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 5ee272d27aaade..9e26e651730cb3 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3783,7 +3783,7 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
 	u64 page_size =  BIT_ULL(page_shift);
 	struct ib_block_iter biter;
 
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+	rdma_umem_for_each_dma_block(umem, &biter, page_size)
 		*pbl_tbl++ = rdma_block_iter_dma_address(&biter);
 
 	return pbl_tbl - pbl_tbl_orig;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index de9a22f0fcc218..d85c63a5021a70 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1142,8 +1142,7 @@ static int umem_to_page_list(struct efa_dev *dev,
 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
 		  hp_cnt, pages_in_hp);
 
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-			    BIT(hp_shift))
+	rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
 
 	return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a522cb2d29eabc..a6b23dec1adcf6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -268,8 +268,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 	}
 
 	/* convert system page cnt to hw page cnt */
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-			    1 << page_shift) {
+	rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
 		addr = rdma_block_iter_dma_address(&biter);
 		if (idx >= start) {
 			bufs[total++] = addr;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index b51339328a51ef..beb611b157bc8d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1320,8 +1320,7 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 	if (iwmr->type == IW_MEMREG_TYPE_QP)
 		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
 
-	rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
-			    iwmr->page_size) {
+	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
 		*pbl = rdma_block_iter_dma_address(&biter);
 		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
 	}
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 07a764eb692eed..b880512ba95f16 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -40,6 +40,26 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
 	       PAGE_SHIFT;
 }
 
+static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
+						struct ib_umem *umem,
+						unsigned long pgsz)
+{
+	__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
+}
+
+/**
+ * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
+ * @umem: umem to iterate over
+ * @pgsz: Page size to split the list into
+ *
+ * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
+ * returned DMA blocks will be aligned to pgsz and span the range:
+ * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
+ */
+#define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
+	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
+	     __rdma_block_iter_next(biter);)
+
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
-- 
2.28.0


  parent reply	other threads:[~2020-09-04 22:42 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-04 22:41 [PATCH v2 00/17] RDMA: Improve use of umem in DMA drivers Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 01/17] RDMA/umem: Fix ib_umem_find_best_pgsz() for mappings that cross a page boundary Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 02/17] RDMA/umem: Prevent small pages from being returned by ib_umem_find_best_pgsz() Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 03/17] RDMA/umem: Use simpler logic for ib_umem_find_best_pgsz() Jason Gunthorpe
2020-09-04 22:41 ` Jason Gunthorpe [this message]
2020-09-04 22:41 ` [PATCH v2 05/17] RDMA/umem: Replace for_each_sg_dma_page with rdma_umem_for_each_dma_block Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 06/17] RDMA/umem: Split ib_umem_num_pages() into ib_umem_num_dma_blocks() Jason Gunthorpe
2020-09-07 12:16   ` Gal Pressman
2020-09-11 13:21   ` Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 07/17] RDMA/efa: Use ib_umem_num_dma_pages() Jason Gunthorpe
2020-09-07 12:19   ` Gal Pressman
2020-09-08 13:48     ` Jason Gunthorpe
2020-09-09  8:18       ` Gal Pressman
2020-09-09 11:14         ` Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 08/17] RDMA/i40iw: " Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 09/17] RDMA/qedr: Use rdma_umem_for_each_dma_block() instead of open-coding Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 10/17] RDMA/qedr: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count() Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 11/17] RDMA/bnxt: Do not use ib_umem_page_count() or ib_umem_num_pages() Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 12/17] RDMA/hns: Use ib_umem_num_dma_blocks() instead of opencoding Jason Gunthorpe
2020-09-07  8:11   ` liweihang
2020-09-04 22:41 ` [PATCH v2 13/17] RDMA/ocrdma: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count() Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 14/17] RDMA/pvrdma: " Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 15/17] RDMA/mlx4: Use ib_umem_num_dma_blocks() Jason Gunthorpe
2020-09-04 22:41 ` [PATCH v2 16/17] RDMA/qedr: Remove fbo and zbva from the MR Jason Gunthorpe
2020-09-06  8:01   ` [EXT] " Michal Kalderon
2020-09-04 22:41 ` [PATCH v2 17/17] RDMA/ocrdma: Remove fbo from MR Jason Gunthorpe
2020-09-06  7:21   ` Leon Romanovsky
2020-09-09 18:38 ` [PATCH v2 00/17] RDMA: Improve use of umem in DMA drivers Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=devesh.sharma@broadcom.com \
    --cc=dledford@redhat.com \
    --cc=faisal.latif@intel.com \
    --cc=galpress@amazon.com \
    --cc=huwei87@hisilicon.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=liweihang@huawei.com \
    --cc=miguel.ojeda.sandonis@gmail.com \
    --cc=nareshkumar.pbs@broadcom.com \
    --cc=oulijun@huawei.com \
    --cc=selvin.xavier@broadcom.com \
    --cc=shiraz.saleem@intel.com \
    --cc=sleybo@amazon.com \
    --cc=somnath.kotur@broadcom.com \
    --cc=sriharsha.basavapatna@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.