linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH for-rc] Revert "RDMA/efa: Use API to get contiguous memory blocks aligned to device supported page size"
@ 2020-01-20 14:10 Gal Pressman
  2020-01-21  9:07 ` Gal Pressman
  0 siblings, 1 reply; 12+ messages in thread
From: Gal Pressman @ 2020-01-20 14:10 UTC (permalink / raw)
  To: Jason Gunthorpe, Doug Ledford
  Cc: linux-rdma, Alexander Matushevsky, Gal Pressman, Shiraz Saleem, stable

The cited commit leads to register MR failures and random hangs when
running different MPI applications. The exact root cause for the issue
is still not clear, this revert brings us back to a stable state.

This reverts commit 40ddb3f020834f9afb7aab31385994811f4db259.

Fixes: 40ddb3f02083 ("RDMA/efa: Use API to get contiguous memory blocks aligned to device supported page size")
Cc: Shiraz Saleem <shiraz.saleem@intel.com>
Cc: stable@vger.kernel.org # 5.3
Signed-off-by: Gal Pressman <galpress@amazon.com>
---
 drivers/infiniband/hw/efa/efa_verbs.c | 88 ++++++++++++++++++++-------
 1 file changed, 67 insertions(+), 21 deletions(-)

diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 50c22575aed6..567797a919e8 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1005,15 +1005,21 @@ static int umem_to_page_list(struct efa_dev *dev,
 			     u8 hp_shift)
 {
 	u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
-	struct ib_block_iter biter;
+	struct sg_dma_page_iter sg_iter;
+	unsigned int page_idx = 0;
 	unsigned int hp_idx = 0;
 
 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
 		  hp_cnt, pages_in_hp);
 
-	rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
-			    BIT(hp_shift))
-		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
+	for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+		if (page_idx % pages_in_hp == 0) {
+			page_list[hp_idx] = sg_page_iter_dma_address(&sg_iter);
+			hp_idx++;
+		}
+
+		page_idx++;
+	}
 
 	return 0;
 }
@@ -1344,6 +1350,56 @@ static int efa_create_pbl(struct efa_dev *dev,
 	return 0;
 }
 
+static void efa_cont_pages(struct ib_umem *umem, u64 addr,
+			   unsigned long max_page_shift,
+			   int *count, u8 *shift, u32 *ncont)
+{
+	struct scatterlist *sg;
+	u64 base = ~0, p = 0;
+	unsigned long tmp;
+	unsigned long m;
+	u64 len, pfn;
+	int i = 0;
+	int entry;
+
+	addr = addr >> PAGE_SHIFT;
+	tmp = (unsigned long)addr;
+	m = find_first_bit(&tmp, BITS_PER_LONG);
+	if (max_page_shift)
+		m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m);
+
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+		len = DIV_ROUND_UP(sg_dma_len(sg), PAGE_SIZE);
+		pfn = sg_dma_address(sg) >> PAGE_SHIFT;
+		if (base + p != pfn) {
+			/*
+			 * If either the offset or the new
+			 * base are unaligned update m
+			 */
+			tmp = (unsigned long)(pfn | p);
+			if (!IS_ALIGNED(tmp, 1 << m))
+				m = find_first_bit(&tmp, BITS_PER_LONG);
+
+			base = pfn;
+			p = 0;
+		}
+
+		p += len;
+		i += len;
+	}
+
+	if (i) {
+		m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
+		*ncont = DIV_ROUND_UP(i, (1 << m));
+	} else {
+		m = 0;
+		*ncont = 0;
+	}
+
+	*shift = PAGE_SHIFT + m;
+	*count = i;
+}
+
 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
 			 u64 virt_addr, int access_flags,
 			 struct ib_udata *udata)
@@ -1351,11 +1407,12 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
 	struct efa_dev *dev = to_edev(ibpd->device);
 	struct efa_com_reg_mr_params params = {};
 	struct efa_com_reg_mr_result result = {};
+	unsigned long max_page_shift;
 	struct pbl_context pbl;
 	int supp_access_flags;
-	unsigned int pg_sz;
 	struct efa_mr *mr;
 	int inline_size;
+	int npages;
 	int err;
 
 	if (udata->inlen &&
@@ -1396,24 +1453,13 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
 	params.iova = virt_addr;
 	params.mr_length_in_bytes = length;
 	params.permissions = access_flags;
+	max_page_shift = fls64(dev->dev_attr.page_size_cap);
 
-	pg_sz = ib_umem_find_best_pgsz(mr->umem,
-				       dev->dev_attr.page_size_cap,
-				       virt_addr);
-	if (!pg_sz) {
-		err = -EOPNOTSUPP;
-		ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
-			  dev->dev_attr.page_size_cap);
-		goto err_unmap;
-	}
-
-	params.page_shift = __ffs(pg_sz);
-	params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
-				       pg_sz);
-
+	efa_cont_pages(mr->umem, start, max_page_shift, &npages,
+		       &params.page_shift, &params.page_num);
 	ibdev_dbg(&dev->ibdev,
-		  "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
-		  start, length, params.page_shift, params.page_num);
+		  "start %#llx length %#llx npages %d params.page_shift %u params.page_num %u\n",
+		  start, length, npages, params.page_shift, params.page_num);
 
 	inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
 	if (params.page_num <= inline_size) {
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-01-28 13:47 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-20 14:10 [PATCH for-rc] Revert "RDMA/efa: Use API to get contiguous memory blocks aligned to device supported page size" Gal Pressman
2020-01-21  9:07 ` Gal Pressman
2020-01-21 16:24   ` Leon Romanovsky
2020-01-22  7:57     ` Gal Pressman
2020-01-23 14:24       ` Leon Romanovsky
2020-01-23 14:29         ` Gal Pressman
2020-01-24  0:40           ` Saleem, Shiraz
2020-01-24  2:52             ` Jason Gunthorpe
2020-01-28 12:32               ` Gal Pressman
2020-01-28 13:47                 ` Leon Romanovsky
2020-01-21 16:39   ` Saleem, Shiraz
2020-01-22  7:58     ` Gal Pressman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).