linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Wei Hu (Xavier)" <xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
To: dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org
Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	lijun_nudt-9Onoh4P/yGk@public.gmane.org,
	oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	charles.chenxin-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	liuyixian-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	xushaobo2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	zhangxiping3-hv44wF8Li93QT0dZR+AlfA@public.gmane.org,
	xavier.huwei-9Onoh4P/yGk@public.gmane.org,
	linuxarm-hv44wF8Li93QT0dZR+AlfA@public.gmane.org
Subject: [PATCH for-next 12/20] RDMA/hns: Support multi hop addressing for PBL in hip08
Date: Wed, 30 Aug 2017 17:23:10 +0800	[thread overview]
Message-ID: <1504084998-64397-13-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1504084998-64397-1-git-send-email-xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>

The block base address in the MR can be retrieved by the block number
which is calculated with the VA in the SGE and MTPT. In hip08, the PBL
supports multi hop addressing to retrieve the block base address by
the block number.

This patch is to add the interfaces in the MR to support multi hop
addressing for the PBL.

Signed-off-by: Shaobo Xu <xushaobo2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Lijun Ou <oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
---
 drivers/infiniband/hw/hns/hns_roce_device.h |  16 ++
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  |   3 +
 drivers/infiniband/hw/hns/hns_roce_hw_v2.h  |   1 +
 drivers/infiniband/hw/hns/hns_roce_mr.c     | 422 ++++++++++++++++++++++++++--
 4 files changed, 418 insertions(+), 24 deletions(-)

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 319c7054..7815e4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -267,6 +267,19 @@ struct hns_roce_mr {
 	int			type;	/* MR's register type */
 	u64			*pbl_buf;/* MR's PBL space */
 	dma_addr_t		pbl_dma_addr;	/* MR's PBL space PA */
+	u32			pbl_size;/* PA number in the PBL */
+	u64			pbl_ba;/* page table address */
+	u32			l0_chunk_last_num;/* L0 last number */
+	u32			l1_chunk_last_num;/* L1 last number */
+	u64			**pbl_bt_l2;/* PBL BT L2 */
+	u64			**pbl_bt_l1;/* PBL BT L1 */
+	u64			*pbl_bt_l0;/* PBL BT L0 */
+	dma_addr_t		*pbl_l2_dma_addr;/* PBL BT L2 dma addr */
+	dma_addr_t		*pbl_l1_dma_addr;/* PBL BT L1 dma addr */
+	dma_addr_t		pbl_l0_dma_addr;/* PBL BT L0 dma addr */
+	u32			pbl_ba_pg_sz;/* BT chunk page size */
+	u32			pbl_buf_pg_sz;/* buf chunk page size */
+	u32			pbl_hop_num;/* multi-hop number */
 };
 
 struct hns_roce_mr_table {
@@ -514,6 +527,9 @@ struct hns_roce_caps {
 	int		qpc_entry_sz;
 	int		irrl_entry_sz;
 	int		cqc_entry_sz;
+	u32		pbl_ba_pg_sz;
+	u32		pbl_buf_pg_sz;
+	u32		pbl_hop_num;
 	int		aeqe_depth;
 	int		ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
 	enum ib_mtu	max_mtu;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c59d15e..542540d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -613,6 +613,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
 	caps->mpt_ba_pg_sz	= 0;
 	caps->mpt_buf_pg_sz	= 0;
 	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
+	caps->pbl_ba_pg_sz	= 0;
+	caps->pbl_buf_pg_sz	= 0;
+	caps->pbl_hop_num	= HNS_ROCE_PBL_HOP_NUM;
 	caps->mtt_ba_pg_sz	= 0;
 	caps->mtt_buf_pg_sz	= 0;
 	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 212d511..593d886 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -75,6 +75,7 @@
 #define HNS_ROCE_CONTEXT_HOP_NUM		1
 #define HNS_ROCE_MTT_HOP_NUM			1
 #define HNS_ROCE_CQE_HOP_NUM			1
+#define HNS_ROCE_PBL_HOP_NUM			2
 
 #define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT	0
 #define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT	1
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 88b436f..7e6ce76 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -258,6 +258,239 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
 }
 EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
 
+static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
+			       struct hns_roce_mr *mr, int err_loop_index,
+			       int loop_i, int loop_j)
+{
+	struct device *dev = hr_dev->dev;
+	u32 mhop_num;
+	u32 pbl_bt_sz;
+	u64 bt_idx;
+	int i, j;
+
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	mhop_num = hr_dev->caps.pbl_hop_num;
+
+	i = loop_i;
+	j = loop_j;
+	if (mhop_num == 3 && err_loop_index == 2) {
+		for (; i >= 0; i--) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				if (i == loop_i && j >= loop_j)
+					break;
+
+				bt_idx = i * pbl_bt_sz / 8 + j;
+				dma_free_coherent(dev, pbl_bt_sz,
+						  mr->pbl_bt_l2[bt_idx],
+						  mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	} else if (mhop_num == 3 && err_loop_index == 1) {
+		for (i -= 1; i >= 0; i--) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * pbl_bt_sz / 8 + j;
+				dma_free_coherent(dev, pbl_bt_sz,
+						  mr->pbl_bt_l2[bt_idx],
+						  mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	} else if (mhop_num == 2 && err_loop_index == 1) {
+		for (i -= 1; i >= 0; i--)
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+	} else {
+		dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
+			 mhop_num, err_loop_index);
+		return;
+	}
+
+	dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
+	mr->pbl_bt_l0 = NULL;
+	mr->pbl_l0_dma_addr = 0;
+}
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+			       struct hns_roce_mr *mr)
+{
+	struct device *dev = hr_dev->dev;
+	int mr_alloc_done = 0;
+	int npages_allocated;
+	int i = 0, j = 0;
+	u32 pbl_bt_sz;
+	u32 mhop_num;
+	u64 pbl_last_bt_num;
+	u64 pbl_bt_cnt = 0;
+	u64 bt_idx;
+	u64 size;
+
+	mhop_num = hr_dev->caps.pbl_hop_num;
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+	if (mhop_num == HNS_ROCE_HOP_NUM_0)
+		return 0;
+
+	/* hop_num = 1 */
+	if (mhop_num == 1) {
+		if (npages > pbl_bt_sz / 8) {
+			dev_err(dev, "npages %d is larger than buf_pg_sz!",
+				npages);
+			return -EINVAL;
+		}
+		mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+						 &(mr->pbl_dma_addr),
+						 GFP_KERNEL);
+		if (!mr->pbl_buf)
+			return -ENOMEM;
+
+		mr->pbl_size = npages;
+		mr->pbl_ba = mr->pbl_dma_addr;
+		mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+		mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+		mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+		return 0;
+	}
+
+	mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+				      sizeof(*mr->pbl_l1_dma_addr),
+				      GFP_KERNEL);
+	if (!mr->pbl_l1_dma_addr)
+		return -ENOMEM;
+
+	mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+				GFP_KERNEL);
+	if (!mr->pbl_bt_l1)
+		goto err_kcalloc_bt_l1;
+
+	if (mhop_num == 3) {
+		mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+					      sizeof(*mr->pbl_l2_dma_addr),
+					      GFP_KERNEL);
+		if (!mr->pbl_l2_dma_addr)
+			goto err_kcalloc_l2_dma;
+
+		mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+					sizeof(*mr->pbl_bt_l2),
+					GFP_KERNEL);
+		if (!mr->pbl_bt_l2)
+			goto err_kcalloc_bt_l2;
+	}
+
+	/* alloc L0 BT */
+	mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+					   &(mr->pbl_l0_dma_addr),
+					   GFP_KERNEL);
+	if (!mr->pbl_bt_l0)
+		goto err_dma_alloc_l0;
+
+	if (mhop_num == 2) {
+		/* alloc L1 BT */
+		for (i = 0; i < pbl_bt_sz / 8; i++) {
+			if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+				size = pbl_bt_sz;
+			} else {
+				npages_allocated = i * (pbl_bt_sz / 8);
+				size = (npages - npages_allocated) * 8;
+			}
+			mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+						    &(mr->pbl_l1_dma_addr[i]),
+						    GFP_KERNEL);
+			if (!mr->pbl_bt_l1[i]) {
+				hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+				goto err_dma_alloc_l0;
+			}
+
+			*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+			pbl_bt_cnt++;
+			if (pbl_bt_cnt >= pbl_last_bt_num)
+				break;
+		}
+	} else if (mhop_num == 3) {
+		/* alloc L1, L2 BT */
+		for (i = 0; i < pbl_bt_sz / 8; i++) {
+			mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+						    &(mr->pbl_l1_dma_addr[i]),
+						    GFP_KERNEL);
+			if (!mr->pbl_bt_l1[i]) {
+				hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+				goto err_dma_alloc_l0;
+			}
+
+			*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * pbl_bt_sz / 8 + j;
+
+				if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+					size = pbl_bt_sz;
+				} else {
+					npages_allocated = bt_idx *
+							   (pbl_bt_sz / 8);
+					size = (npages - npages_allocated) * 8;
+				}
+				mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+					      dev, size,
+					      &(mr->pbl_l2_dma_addr[bt_idx]),
+					      GFP_KERNEL);
+				if (!mr->pbl_bt_l2[bt_idx]) {
+					hns_roce_loop_free(hr_dev, mr, 2, i, j);
+					goto err_dma_alloc_l0;
+				}
+
+				*(mr->pbl_bt_l1[i] + j) =
+						mr->pbl_l2_dma_addr[bt_idx];
+
+				pbl_bt_cnt++;
+				if (pbl_bt_cnt >= pbl_last_bt_num) {
+					mr_alloc_done = 1;
+					break;
+				}
+			}
+
+			if (mr_alloc_done)
+				break;
+		}
+	}
+
+	mr->l0_chunk_last_num = i + 1;
+	if (mhop_num == 3)
+		mr->l1_chunk_last_num = j + 1;
+
+	mr->pbl_size = npages;
+	mr->pbl_ba = mr->pbl_l0_dma_addr;
+	mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+	mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+	mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+	return 0;
+
+err_dma_alloc_l0:
+	kfree(mr->pbl_bt_l2);
+	mr->pbl_bt_l2 = NULL;
+
+err_kcalloc_bt_l2:
+	kfree(mr->pbl_l2_dma_addr);
+	mr->pbl_l2_dma_addr = NULL;
+
+err_kcalloc_l2_dma:
+	kfree(mr->pbl_bt_l1);
+	mr->pbl_bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+	kfree(mr->pbl_l1_dma_addr);
+	mr->pbl_l1_dma_addr = NULL;
+
+	return -ENOMEM;
+}
+
 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
 			     u64 size, u32 access, int npages,
 			     struct hns_roce_mr *mr)
@@ -282,16 +515,111 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
 		mr->type = MR_TYPE_DMA;
 		mr->pbl_buf = NULL;
 		mr->pbl_dma_addr = 0;
+		/* PBL multi-hop addressing parameters */
+		mr->pbl_bt_l2 = NULL;
+		mr->pbl_bt_l1 = NULL;
+		mr->pbl_bt_l0 = NULL;
+		mr->pbl_l2_dma_addr = NULL;
+		mr->pbl_l1_dma_addr = NULL;
+		mr->pbl_l0_dma_addr = 0;
 	} else {
 		mr->type = MR_TYPE_MR;
-		mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
-						 &(mr->pbl_dma_addr),
-						 GFP_KERNEL);
-		if (!mr->pbl_buf)
-			return -ENOMEM;
+		if (!hr_dev->caps.pbl_hop_num) {
+			mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+							 &(mr->pbl_dma_addr),
+							 GFP_KERNEL);
+			if (!mr->pbl_buf)
+				return -ENOMEM;
+		} else {
+			ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+		}
 	}
 
-	return 0;
+	return ret;
+}
+
+static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
+			       struct hns_roce_mr *mr)
+{
+	struct device *dev = hr_dev->dev;
+	int npages_allocated;
+	int npages;
+	int i, j;
+	u32 pbl_bt_sz;
+	u32 mhop_num;
+	u64 bt_idx;
+
+	npages = ib_umem_page_count(mr->umem);
+	pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+	mhop_num = hr_dev->caps.pbl_hop_num;
+
+	if (mhop_num == HNS_ROCE_HOP_NUM_0)
+		return;
+
+	/* hop_num = 1 */
+	if (mhop_num == 1) {
+		dma_free_coherent(dev, (unsigned int)(npages * 8),
+				  mr->pbl_buf, mr->pbl_dma_addr);
+		return;
+	}
+
+	dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
+			  mr->pbl_l0_dma_addr);
+
+	if (mhop_num == 2) {
+		for (i = 0; i < mr->l0_chunk_last_num; i++) {
+			if (i == mr->l0_chunk_last_num - 1) {
+				npages_allocated = i * (pbl_bt_sz / 8);
+
+				dma_free_coherent(dev,
+					      (npages - npages_allocated) * 8,
+					      mr->pbl_bt_l1[i],
+					      mr->pbl_l1_dma_addr[i]);
+
+				break;
+			}
+
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+		}
+	} else if (mhop_num == 3) {
+		for (i = 0; i < mr->l0_chunk_last_num; i++) {
+			dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+					  mr->pbl_l1_dma_addr[i]);
+
+			for (j = 0; j < pbl_bt_sz / 8; j++) {
+				bt_idx = i * (pbl_bt_sz / 8) + j;
+
+				if ((i == mr->l0_chunk_last_num - 1)
+				    && j == mr->l1_chunk_last_num - 1) {
+					npages_allocated = bt_idx *
+							   (pbl_bt_sz / 8);
+
+					dma_free_coherent(dev,
+					      (npages - npages_allocated) * 8,
+					      mr->pbl_bt_l2[bt_idx],
+					      mr->pbl_l2_dma_addr[bt_idx]);
+
+					break;
+				}
+
+				dma_free_coherent(dev, pbl_bt_sz,
+						mr->pbl_bt_l2[bt_idx],
+						mr->pbl_l2_dma_addr[bt_idx]);
+			}
+		}
+	}
+
+	kfree(mr->pbl_bt_l1);
+	kfree(mr->pbl_l1_dma_addr);
+	mr->pbl_bt_l1 = NULL;
+	mr->pbl_l1_dma_addr = NULL;
+	if (mhop_num == 3) {
+		kfree(mr->pbl_bt_l2);
+		kfree(mr->pbl_l2_dma_addr);
+		mr->pbl_bt_l2 = NULL;
+		mr->pbl_l2_dma_addr = NULL;
+	}
 }
 
 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
@@ -310,10 +638,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 
 	if (mr->size != ~0ULL) {
 		npages = ib_umem_page_count(mr->umem);
-		dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
-				  mr->pbl_dma_addr);
+
+		if (!hr_dev->caps.pbl_hop_num)
+			dma_free_coherent(dev, (unsigned int)(npages * 8),
+					  mr->pbl_buf, mr->pbl_dma_addr);
+		else
+			hns_roce_mhop_free(hr_dev, mr);
 	}
 
+	if (mr->enabled)
+		hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+				   key_to_hw_index(mr->key));
+
 	hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
 			     key_to_hw_index(mr->key), BITMAP_NO_RR);
 }
@@ -501,8 +837,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
 
 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
 {
-	int ret = 0;
-	struct hns_roce_mr *mr = NULL;
+	struct hns_roce_mr *mr;
+	int ret;
 
 	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 	if (mr == NULL)
@@ -571,16 +907,36 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
-static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
+				     struct hns_roce_mr *mr,
 				     struct ib_umem *umem)
 {
-	int i = 0;
-	int entry;
 	struct scatterlist *sg;
+	int i = 0, j = 0;
+	int entry;
+
+	if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
+		return 0;
 
 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-		mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
-		i++;
+		if (!hr_dev->caps.pbl_hop_num) {
+			mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
+			i++;
+		} else if (hr_dev->caps.pbl_hop_num == 1) {
+			mr->pbl_buf[i] = sg_dma_address(sg);
+			i++;
+		} else {
+			if (hr_dev->caps.pbl_hop_num == 2)
+				mr->pbl_bt_l1[i][j] = sg_dma_address(sg);
+			else if (hr_dev->caps.pbl_hop_num == 3)
+				mr->pbl_bt_l2[i][j] = sg_dma_address(sg);
+
+			j++;
+			if (j >= (PAGE_SIZE / 8)) {
+				i++;
+				j = 0;
+			}
+		}
 	}
 
 	/* Memory barrier */
@@ -595,9 +951,11 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
 	struct device *dev = hr_dev->dev;
-	struct hns_roce_mr *mr = NULL;
-	int ret = 0;
-	int n = 0;
+	struct hns_roce_mr *mr;
+	int bt_size;
+	int ret;
+	int n;
+	int i;
 
 	mr = kmalloc(sizeof(*mr), GFP_KERNEL);
 	if (!mr)
@@ -618,11 +976,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		goto err_umem;
 	}
 
-	if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
-		dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
-			length);
-		ret = -EINVAL;
-		goto err_umem;
+	if (!hr_dev->caps.pbl_hop_num) {
+		if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+			dev_err(dev,
+			     " MR len %lld err. MR is limited to 4G at most!\n",
+			     length);
+			ret = -EINVAL;
+			goto err_umem;
+		}
+	} else {
+		int pbl_size = 1;
+
+		bt_size = (1 << PAGE_SHIFT) / 8;
+		for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
+			pbl_size *= bt_size;
+		if (n > pbl_size) {
+			dev_err(dev,
+			    " MR len %lld err. MR page num is limited to %d!\n",
+			    length, pbl_size);
+			ret = -EINVAL;
+			goto err_umem;
+		}
 	}
 
 	ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
@@ -630,7 +1004,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	if (ret)
 		goto err_umem;
 
-	ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+	ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
 	if (ret)
 		goto err_mr;
 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

  parent reply	other threads:[~2017-08-30  9:23 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-30  9:22 [PATCH for-next 00/20] RDMA/hns: Add hip08 RoCE driver support Wei Hu (Xavier)
     [not found] ` <1504084998-64397-1-git-send-email-xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-08-30  9:22   ` [PATCH for-next 01/20] RDMA/hns: Split hw v1 driver from hns roce driver Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 02/20] RDMA/hns: Move priv in order to add multiple hns_roce support Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 03/20] RDMA/hns: Initialize the PCI device for hip08 RoCE Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 04/20] RDMA/hns: Modify assignment device variable to support both PCI device and platform device Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 05/20] RDMA/hns: Add command queue support for hip08 RoCE driver Wei Hu (Xavier)
     [not found]     ` <1504084998-64397-6-git-send-email-xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-25 17:06       ` Doug Ledford
     [not found]         ` <1506359213.120853.75.camel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-09-25 17:18           ` Leon Romanovsky
     [not found]             ` <20170925171821.GQ25094-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-09-25 17:36               ` Doug Ledford
     [not found]                 ` <1506361015.120853.81.camel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-09-26  5:15                   ` Leon Romanovsky
2017-09-26 13:13                   ` Wei Hu (Xavier)
     [not found]                     ` <59CA5261.80209-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-26 15:24                       ` Wei Hu (Xavier)
     [not found]                         ` <e99f8917-1906-697b-3dcd-5f024b444750-WVlzvzqoTvw@public.gmane.org>
2017-09-26 15:51                           ` Leon Romanovsky
     [not found]                             ` <20170926155149.GE6816-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-09-26 16:13                               ` Wei Hu (Xavier)
     [not found]                                 ` <5514bf6d-3a98-a6fe-ea90-476f5ae1f623-WVlzvzqoTvw@public.gmane.org>
2017-09-26 21:12                                   ` Wei Hu (Xavier)
2017-09-26 16:18                       ` Doug Ledford
     [not found]                         ` <81dd332d-e060-d7e3-bec9-1791511c5470-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-09-27  2:46                           ` Wei Hu (Xavier)
     [not found]                             ` <9172f8c5-3dd6-a573-8e28-1b3ae4b1726b-WVlzvzqoTvw@public.gmane.org>
2017-09-27 12:21                               ` Doug Ledford
     [not found]                                 ` <1b8bda3b-c514-7e46-08bf-3ea50ea68096-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-09-27 12:41                                   ` Doug Ledford
2017-09-28  4:34                                     ` Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 06/20] RDMA/hns: Add profile support for hip08 driver Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 07/20] RDMA/hns: Add mailbox's implementation for hip08 RoCE driver Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 08/20] RDMA/hns: Add the interfaces to support multi hop addressing for the contexts in hip08 Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 09/20] RDMA/hns: Configure BT BA and BT attribute " Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 10/20] RDMA/hns: Update the interfaces for MTT/CQE multi hop addressing " Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 11/20] RDMA/hns: Split CQE from MTT " Wei Hu (Xavier)
     [not found]     ` <1504084998-64397-12-git-send-email-xavier.huwei-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-13 17:52       ` Leon Romanovsky
     [not found]         ` <20170913175259.GW3405-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-09-15  1:09           ` Wei Hu (Xavier)
     [not found]             ` <59BB2848.6080802-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-20  2:48               ` Wei Hu (Xavier)
2017-08-30  9:23   ` Wei Hu (Xavier) [this message]
2017-08-30  9:23   ` [PATCH for-next 13/20] RDMA/hns: Configure mac&gid and user access region for hip08 RoCE driver Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 14/20] RDMA/hns: Add CQ operations support " Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 15/20] RDMA/hns: Add QP operations support for hip08 SoC Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 16/20] RDMA/hns: Add support for processing send wr and receive wr Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 17/20] RDMA/hns: Configure the MTPT in hip08 Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 18/20] RDMA/hns: Add releasing resource operation in error branch Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 19/20] RDMA/hns: Replace condition statement using hardware version information Wei Hu (Xavier)
2017-08-30  9:23   ` [PATCH for-next 20/20] RDMA/hns: Fix inconsistent warning Wei Hu (Xavier)
2017-09-13 17:55   ` [PATCH for-next 00/20] RDMA/hns: Add hip08 RoCE driver support Leon Romanovsky
     [not found]     ` <20170913175554.GX3405-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-09-14  7:45       ` Wei Hu (Xavier)
     [not found]         ` <59BA33B1.8030300-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-14 12:43           ` Leon Romanovsky
     [not found]             ` <20170914124341.GY3405-U/DQcQFIOTAAJjI8aNfphQ@public.gmane.org>
2017-09-15  1:12               ` Wei Hu (Xavier)
     [not found]                 ` <59BB28F1.9040007-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-20  2:50                   ` Wei Hu (Xavier)
2017-09-25  6:18   ` Wei Hu (Xavier)
     [not found]     ` <59C89FD0.9050606-hv44wF8Li93QT0dZR+AlfA@public.gmane.org>
2017-09-25 15:57       ` Doug Ledford
     [not found]         ` <1506355051.120853.70.camel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2017-09-25 17:37           ` Doug Ledford

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1504084998-64397-13-git-send-email-xavier.huwei@huawei.com \
    --to=xavier.huwei-hv44wf8li93qt0dzr+alfa@public.gmane.org \
    --cc=charles.chenxin-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    --cc=dledford-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org \
    --cc=lijun_nudt-9Onoh4P/yGk@public.gmane.org \
    --cc=linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org \
    --cc=linuxarm-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    --cc=liuyixian-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    --cc=oulijun-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    --cc=xavier.huwei-9Onoh4P/yGk@public.gmane.org \
    --cc=xushaobo2-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    --cc=zhangxiping3-hv44wF8Li93QT0dZR+AlfA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).