All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function
@ 2023-01-09 19:53 Zhu Yanjun
  2023-01-09 19:53 ` [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
                   ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-09 19:53 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the shared source codes into several new functions for future use.
No bug fix and new feature in this commit series.

The new functions are as below:

irdma_reg_user_mr_type_mem
irdma_alloc_iwmr
irdma_free_iwmr
irdma_reg_user_mr_type_qp
irdma_reg_user_mr_type_cq

These functions will be used in the dmabuf feature.

Zhu Yanjun (4):
  RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  RDMA/irdma: Split mr alloc and free into new functions
  RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq

 drivers/infiniband/hw/irdma/verbs.c | 260 +++++++++++++++++-----------
 1 file changed, 160 insertions(+), 100 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
@ 2023-01-09 19:53 ` Zhu Yanjun
  2023-01-10  4:10   ` Saleem, Shiraz
  2023-01-09 19:54 ` [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-09 19:53 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

The source codes related with IRDMA_MEMREG_TYPE_MEM are split
into a new function irdma_reg_user_mr_type_mem.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 85 +++++++++++++++++------------
 1 file changed, 51 insertions(+), 34 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f6973ea55eda..40109da6489a 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2745,6 +2745,55 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
 	return ret;
 }
 
+static int irdma_reg_user_mr_type_mem(struct irdma_device *iwdev,
+				      struct irdma_mr *iwmr, int access)
+{
+	int err = 0;
+	bool use_pbles = false;
+	u32 stag = 0;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	use_pbles = (iwmr->page_cnt != 1);
+
+	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+	if (err)
+		return err;
+
+	if (use_pbles) {
+		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+						iwmr->page_size);
+		if (err) {
+			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+			iwpbl->pbl_allocated = false;
+			return err;
+		}
+	}
+
+	stag = irdma_create_stag(iwdev);
+	if (!stag) {
+		if (use_pbles) {
+			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+			iwpbl->pbl_allocated = false;
+		}
+		return -ENOMEM;
+	}
+
+	iwmr->stag = stag;
+	iwmr->ibmr.rkey = stag;
+	iwmr->ibmr.lkey = stag;
+	err = irdma_hwreg_mr(iwdev, iwmr, access);
+	if (err) {
+		irdma_free_stag(iwdev, stag);
+		if (use_pbles) {
+			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+			iwpbl->pbl_allocated = false;
+		}
+		return err;
+	}
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2761,17 +2810,15 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
 	struct irdma_ucontext *ucontext;
-	struct irdma_pble_alloc *palloc;
 	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total, stag = 0;
+	u32 total;
 	u8 shadow_pgcnt = 1;
 	bool use_pbles = false;
 	unsigned long flags;
 	int err = -EINVAL;
-	int ret;
 
 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
 		return ERR_PTR(-EINVAL);
@@ -2818,7 +2865,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 	}
 	iwmr->len = region->length;
 	iwpbl->user_base = virt;
-	palloc = &iwpbl->pble_alloc;
 	iwmr->type = req.reg_type;
 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
 
@@ -2864,36 +2910,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
-		use_pbles = (iwmr->page_cnt != 1);
-
-		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
 		if (err)
 			goto error;
-
-		if (use_pbles) {
-			ret = irdma_check_mr_contiguous(palloc,
-							iwmr->page_size);
-			if (ret) {
-				irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
-				iwpbl->pbl_allocated = false;
-			}
-		}
-
-		stag = irdma_create_stag(iwdev);
-		if (!stag) {
-			err = -ENOMEM;
-			goto error;
-		}
-
-		iwmr->stag = stag;
-		iwmr->ibmr.rkey = stag;
-		iwmr->ibmr.lkey = stag;
-		err = irdma_hwreg_mr(iwdev, iwmr, access);
-		if (err) {
-			irdma_free_stag(iwdev, stag);
-			goto error;
-		}
-
 		break;
 	default:
 		goto error;
@@ -2904,8 +2923,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 	return &iwmr->ibmr;
 
 error:
-	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
-		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 	ib_umem_release(region);
 	kfree(iwmr);
 
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions
  2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
  2023-01-09 19:53 ` [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
@ 2023-01-09 19:54 ` Zhu Yanjun
  2023-01-10  4:11   ` Saleem, Shiraz
  2023-01-09 19:54 ` [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-09 19:54 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

In the function irdma_reg_user_mr, the mr allocation and free
will be used by other functions. As such, the source codes related
with mr allocation and free are split into the new functions.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 78 ++++++++++++++++++-----------
 1 file changed, 50 insertions(+), 28 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 40109da6489a..5cff8656d79e 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct irdma_device *iwdev,
 	return err;
 }
 
+static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+					 struct ib_pd *pd, u64 virt,
+					 __u16 reg_type,
+					 struct irdma_device *iwdev)
+{
+	struct irdma_mr *iwmr;
+	struct irdma_pbl *iwpbl;
+
+	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+	if (!iwmr)
+		return ERR_PTR(-ENOMEM);
+
+	iwpbl = &iwmr->iwpbl;
+	iwpbl->iwmr = iwmr;
+	iwmr->region = region;
+	iwmr->ibmr.pd = pd;
+	iwmr->ibmr.device = pd->device;
+	iwmr->ibmr.iova = virt;
+	iwmr->page_size = PAGE_SIZE;
+	iwmr->type = reg_type;
+
+	if (reg_type == IRDMA_MEMREG_TYPE_MEM) {
+		iwmr->page_size = ib_umem_find_best_pgsz(region,
+							 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
+							 virt);
+		if (unlikely(!iwmr->page_size)) {
+			kfree(iwmr);
+			return ERR_PTR(-EOPNOTSUPP);
+		}
+	}
+
+	iwmr->len = region->length;
+	iwpbl->user_base = virt;
+	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+	return iwmr;
+}
+
+/*
+ * This function frees the resources from irdma_alloc_iwmr
+ */
+static void irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+	kfree(iwmr);
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return ERR_PTR(-EFAULT);
 	}
 
-	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
-	if (!iwmr) {
+	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev);
+	if (IS_ERR(iwmr)) {
 		ib_umem_release(region);
-		return ERR_PTR(-ENOMEM);
+		return (struct ib_mr *)iwmr;
 	}
 
 	iwpbl = &iwmr->iwpbl;
-	iwpbl->iwmr = iwmr;
-	iwmr->region = region;
-	iwmr->ibmr.pd = pd;
-	iwmr->ibmr.device = pd->device;
-	iwmr->ibmr.iova = virt;
-	iwmr->page_size = PAGE_SIZE;
-
-	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
-		iwmr->page_size = ib_umem_find_best_pgsz(region,
-							 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
-							 virt);
-		if (unlikely(!iwmr->page_size)) {
-			kfree(iwmr);
-			ib_umem_release(region);
-			return ERR_PTR(-EOPNOTSUPP);
-		}
-	}
-	iwmr->len = region->length;
-	iwpbl->user_base = virt;
-	iwmr->type = req.reg_type;
-	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
 
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
@@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		goto error;
 	}
 
-	iwmr->type = req.reg_type;
-
 	return &iwmr->ibmr;
-
 error:
 	ib_umem_release(region);
-	kfree(iwmr);
+	irdma_free_iwmr(iwmr);
 
 	return ERR_PTR(err);
 }
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
  2023-01-09 19:53 ` [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
  2023-01-09 19:54 ` [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
@ 2023-01-09 19:54 ` Zhu Yanjun
  2023-01-10  4:10   ` Saleem, Shiraz
  2023-01-09 19:54 ` [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
  2023-01-10  4:14 ` [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz
  4 siblings, 1 reply; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-09 19:54 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with QP handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
 1 file changed, 34 insertions(+), 14 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 287d4313f14d..e90eba73c396 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2831,6 +2831,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
 	kfree(iwmr);
 }
 
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+				     struct irdma_device *iwdev,
+				     struct ib_udata *udata,
+				     struct irdma_mr *iwmr)
+{
+	u32 total;
+	int err = 0;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles = false;
+	unsigned long flags;
+	struct irdma_ucontext *ucontext;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	total = req.sq_pages + req.rq_pages;
+	use_pbles = (total > 2);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2886,23 +2919,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
-		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-		total = req.sq_pages + req.rq_pages;
-		use_pbles = (total > 2);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
 		if (err)
 			goto error;
 
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
                   ` (2 preceding siblings ...)
  2023-01-09 19:54 ` [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
@ 2023-01-09 19:54 ` Zhu Yanjun
  2023-01-10  4:12   ` Saleem, Shiraz
  2023-01-10  4:14 ` [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz
  4 siblings, 1 reply; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-09 19:54 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with CQ handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
 1 file changed, 35 insertions(+), 25 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index e90eba73c396..b4befbafb830 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
 	return err;
 }
 
+static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
+				     struct irdma_mr *iwmr,
+				     struct ib_udata *udata,
+				     struct irdma_mem_reg_req req)
+{
+	int err = 0;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles = false;
+	struct irdma_ucontext *ucontext;
+	unsigned long flags;
+	u32 total;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+		shadow_pgcnt = 0;
+	total = req.cq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	use_pbles = (req.cq_pages > 1);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 {
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
-	struct irdma_ucontext *ucontext;
-	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total;
-	u8 shadow_pgcnt = 1;
-	bool use_pbles = false;
-	unsigned long flags;
 	int err = -EINVAL;
 
 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
@@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return (struct ib_mr *)iwmr;
 	}
 
-	iwpbl = &iwmr->iwpbl;
-
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
 		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
@@ -2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
-		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-			shadow_pgcnt = 0;
-		total = req.cq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-
-		use_pbles = (req.cq_pages > 1);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
 		if (err)
 			goto error;
-
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
 		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* RE: [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-09 19:53 ` [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
@ 2023-01-10  4:10   ` Saleem, Shiraz
  2023-01-11  5:41     ` Zhu Yanjun
  0 siblings, 1 reply; 14+ messages in thread
From: Saleem, Shiraz @ 2023-01-10  4:10 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into
> irdma_reg_user_mr_type_mem
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> The source codes related with IRDMA_MEMREG_TYPE_MEM are split into a new
> function irdma_reg_user_mr_type_mem.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 85 +++++++++++++++++------------
>  1 file changed, 51 insertions(+), 34 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index f6973ea55eda..40109da6489a 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2745,6 +2745,55 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev,
> struct irdma_mr *iwmr,
>  	return ret;
>  }
> 
> +static int irdma_reg_user_mr_type_mem(struct irdma_device *iwdev,
> +				      struct irdma_mr *iwmr, int access) {
> +	int err = 0;
> +	bool use_pbles = false;
> +	u32 stag = 0;

No need to initialize any of these?

> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	use_pbles = (iwmr->page_cnt != 1);
> +
> +	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
> +	if (err)
> +		return err;
> +
> +	if (use_pbles) {
> +		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
> +						iwmr->page_size);
> +		if (err) {
> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
> >pble_alloc);
> +			iwpbl->pbl_allocated = false;
> +			return err;

No this should not cause an error. Just that we don't want to use pbles for this region. reset use_pbles to false here?

> +		}
> +	}
> +
> +	stag = irdma_create_stag(iwdev);
> +	if (!stag) {
> +		if (use_pbles) {
> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
> >pble_alloc);
> +			iwpbl->pbl_allocated = false;
> +		}
> +		return -ENOMEM;
> +	}
> +
> +	iwmr->stag = stag;
> +	iwmr->ibmr.rkey = stag;
> +	iwmr->ibmr.lkey = stag;
> +	err = irdma_hwreg_mr(iwdev, iwmr, access);
> +	if (err) {
> +		irdma_free_stag(iwdev, stag);
> +		if (use_pbles) {
> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
> >pble_alloc);
> +			iwpbl->pbl_allocated = false;

Setting the iwpbl->pbl_allocated to false is not required. We are going to free up the iwmr memory on this error anyway.

Just a suggestion. Maybe just use a  goto a label "free_pble" that does the irdma_free_pble and returns err. And re-use it for irdma_create_stag error unwind too.

> +		}
> +		return err;
> +	}
> +
> +	return err;
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2761,17 +2810,15 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,  #define IRDMA_MEM_REG_MIN_REQ_LEN
> offsetofend(struct irdma_mem_reg_req, sq_pages)
>  	struct irdma_device *iwdev = to_iwdev(pd->device);
>  	struct irdma_ucontext *ucontext;
> -	struct irdma_pble_alloc *palloc;
>  	struct irdma_pbl *iwpbl;
>  	struct irdma_mr *iwmr;
>  	struct ib_umem *region;
>  	struct irdma_mem_reg_req req;
> -	u32 total, stag = 0;
> +	u32 total;
>  	u8 shadow_pgcnt = 1;
>  	bool use_pbles = false;
>  	unsigned long flags;
>  	int err = -EINVAL;
> -	int ret;
> 
>  	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>  		return ERR_PTR(-EINVAL);
> @@ -2818,7 +2865,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
> u64 start, u64 len,
>  	}
>  	iwmr->len = region->length;
>  	iwpbl->user_base = virt;
> -	palloc = &iwpbl->pble_alloc;
>  	iwmr->type = req.reg_type;
>  	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
> 
> @@ -2864,36 +2910,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,
>  		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_MEM:
> -		use_pbles = (iwmr->page_cnt != 1);
> -
> -		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
> +		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);

Perhaps you can just pass the iwmr and access as args for this API and compute the iwdev in the function using to_iwdev(iwmr->ibmr.device)

>  		if (err)
>  			goto error;
> -
> -		if (use_pbles) {
> -			ret = irdma_check_mr_contiguous(palloc,
> -							iwmr->page_size);
> -			if (ret) {
> -				irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
> -				iwpbl->pbl_allocated = false;
> -			}
> -		}
> -
> -		stag = irdma_create_stag(iwdev);
> -		if (!stag) {
> -			err = -ENOMEM;
> -			goto error;
> -		}
> -
> -		iwmr->stag = stag;
> -		iwmr->ibmr.rkey = stag;
> -		iwmr->ibmr.lkey = stag;
> -		err = irdma_hwreg_mr(iwdev, iwmr, access);
> -		if (err) {
> -			irdma_free_stag(iwdev, stag);
> -			goto error;
> -		}
> -
>  		break;
>  	default:
>  		goto error;
> @@ -2904,8 +2923,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
> u64 start, u64 len,
>  	return &iwmr->ibmr;
> 
>  error:
> -	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
> -		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
>  	ib_umem_release(region);
>  	kfree(iwmr);
> 
> --
> 2.27.0


^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-09 19:54 ` [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
@ 2023-01-10  4:10   ` Saleem, Shiraz
  2023-01-11  6:11     ` Zhu Yanjun
  0 siblings, 1 reply; 14+ messages in thread
From: Saleem, Shiraz @ 2023-01-10  4:10 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into
> irdma_reg_user_mr_type_qp
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with QP handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>  1 file changed, 34 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index 287d4313f14d..e90eba73c396 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2831,6 +2831,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>  	kfree(iwmr);
>  }
> 
> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
> +				     struct irdma_device *iwdev,
> +				     struct ib_udata *udata,
> +				     struct irdma_mr *iwmr)

You could omit iwdev and compute it from iwmr.

> +{
> +	u32 total;
> +	int err = 0;

No need to initialize.

> +	u8 shadow_pgcnt = 1;

> +	bool use_pbles = false;


No need to initialize use_pbles.


> +	unsigned long flags;
> +	struct irdma_ucontext *ucontext;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	total = req.sq_pages + req.rq_pages;
> +	use_pbles = (total > 2);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
> +
> +	return err;
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2886,23 +2919,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,
> 
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
> -		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -		total = req.sq_pages + req.rq_pages;
> -		use_pbles = (total > 2);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
>  		if (err)
>  			goto error;
> 
> -		ucontext = rdma_udata_to_drv_context(udata, struct
> irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
>  		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> --
> 2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions
  2023-01-09 19:54 ` [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
@ 2023-01-10  4:11   ` Saleem, Shiraz
  2023-01-11  5:59     ` Zhu Yanjun
  0 siblings, 1 reply; 14+ messages in thread
From: Saleem, Shiraz @ 2023-01-10  4:11 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new
> functions
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> In the function irdma_reg_user_mr, the mr allocation and free will be used by other
> functions. As such, the source codes related with mr allocation and free are split
> into the new functions.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 78 ++++++++++++++++++-----------
>  1 file changed, 50 insertions(+), 28 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index 40109da6489a..5cff8656d79e 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct
> irdma_device *iwdev,
>  	return err;
>  }
> 
> +static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
> +					 struct ib_pd *pd, u64 virt,
> +					 __u16 reg_type,

enum irdma_memreg_type

> +					 struct irdma_device *iwdev)
> +{
> +	struct irdma_mr *iwmr;
> +	struct irdma_pbl *iwpbl;
> +
> +	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
> +	if (!iwmr)
> +		return ERR_PTR(-ENOMEM);
> +
> +	iwpbl = &iwmr->iwpbl;
> +	iwpbl->iwmr = iwmr;
> +	iwmr->region = region;
> +	iwmr->ibmr.pd = pd;
> +	iwmr->ibmr.device = pd->device;
> +	iwmr->ibmr.iova = virt;
> +	iwmr->page_size = PAGE_SIZE;

Delete this and see comment below,

> +	iwmr->type = reg_type;
> +
> +	if (reg_type == IRDMA_MEMREG_TYPE_MEM) {
> +		iwmr->page_size = ib_umem_find_best_pgsz(region,
> +							 iwdev->rf-
> >sc_dev.hw_attrs.page_size_cap,

I think Jason made the comment to always validate the page size with this function before use in rdma_umem_for_each_dma_block.

we can move it out of this if block with something like,

pgsz_bitmask = reg_type == IRDMA_MEMREG_TYPE_MEM ?
			iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;

iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmask, virt);



> +							 virt);
> +		if (unlikely(!iwmr->page_size)) {
> +			kfree(iwmr);
> +			return ERR_PTR(-EOPNOTSUPP);
> +		}
> +	}
> +
> +	iwmr->len = region->length;
> +	iwpbl->user_base = virt;
> +	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
> +
> +	return iwmr;
> +}
> +
> +/*
> + * This function frees the resources from irdma_alloc_iwmr  */ static

This doesn't follow kdoc format? And not very useful. I would delete it.

> +void irdma_free_iwmr(struct irdma_mr *iwmr) {
> +	kfree(iwmr);
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,
>  		return ERR_PTR(-EFAULT);
>  	}
> 
> -	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
> -	if (!iwmr) {
> +	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev);
> +	if (IS_ERR(iwmr)) {
>  		ib_umem_release(region);
> -		return ERR_PTR(-ENOMEM);
> +		return (struct ib_mr *)iwmr;
>  	}
> 
>  	iwpbl = &iwmr->iwpbl;
> -	iwpbl->iwmr = iwmr;
> -	iwmr->region = region;
> -	iwmr->ibmr.pd = pd;
> -	iwmr->ibmr.device = pd->device;
> -	iwmr->ibmr.iova = virt;
> -	iwmr->page_size = PAGE_SIZE;
> -
> -	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
> -		iwmr->page_size = ib_umem_find_best_pgsz(region,
> -							 iwdev->rf-
> >sc_dev.hw_attrs.page_size_cap,
> -							 virt);
> -		if (unlikely(!iwmr->page_size)) {
> -			kfree(iwmr);
> -			ib_umem_release(region);
> -			return ERR_PTR(-EOPNOTSUPP);
> -		}
> -	}
> -	iwmr->len = region->length;
> -	iwpbl->user_base = virt;
> -	iwmr->type = req.reg_type;
> -	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
> 
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
> @@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,
>  		goto error;
>  	}
> 
> -	iwmr->type = req.reg_type;
> -
>  	return &iwmr->ibmr;
> -
>  error:
>  	ib_umem_release(region);
> -	kfree(iwmr);
> +	irdma_free_iwmr(iwmr);
> 
>  	return ERR_PTR(err);
>  }
> --
> 2.27.0


^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-09 19:54 ` [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
@ 2023-01-10  4:12   ` Saleem, Shiraz
  2023-01-11  6:23     ` Zhu Yanjun
  0 siblings, 1 reply; 14+ messages in thread
From: Saleem, Shiraz @ 2023-01-10  4:12 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into
> irdma_reg_user_mr_type_cq
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with CQ handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
>  1 file changed, 35 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index e90eba73c396..b4befbafb830 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct
> irdma_mem_reg_req req,
>  	return err;
>  }
> 
> +static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
> +				     struct irdma_mr *iwmr,
> +				     struct ib_udata *udata,
> +				     struct irdma_mem_reg_req req)

I would keep the order of these API args same as the one for irdma_reg_user_mr_type_qp.

> +{
> +	int err = 0;

No need to initialize.

> +	u8 shadow_pgcnt = 1;
> +	bool use_pbles = false;

No need to initialize use_pbles.

> +	struct irdma_ucontext *ucontext;
> +	unsigned long flags;
> +	u32 total;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> +		shadow_pgcnt = 0;
> +	total = req.cq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	use_pbles = (req.cq_pages > 1);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
> +
> +	return err;
> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
> *pd, u64 start, u64 len,  {  #define IRDMA_MEM_REG_MIN_REQ_LEN
> offsetofend(struct irdma_mem_reg_req, sq_pages)
>  	struct irdma_device *iwdev = to_iwdev(pd->device);
> -	struct irdma_ucontext *ucontext;
> -	struct irdma_pbl *iwpbl;
>  	struct irdma_mr *iwmr;
>  	struct ib_umem *region;
>  	struct irdma_mem_reg_req req;
> -	u32 total;
> -	u8 shadow_pgcnt = 1;
> -	bool use_pbles = false;
> -	unsigned long flags;
>  	int err = -EINVAL;

Do we need to initialize err here too? Probably separate from this patch but could clean up.

> 
>  	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
> @@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
> u64 start, u64 len,
>  		return (struct ib_mr *)iwmr;
>  	}
> 
> -	iwpbl = &iwmr->iwpbl;
> -
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
>  		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr); @@ -
> 2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64
> start, u64 len,
> 
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
> IRDMA_FEATURE_CQ_RESIZE)
> -			shadow_pgcnt = 0;
> -		total = req.cq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -
> -		use_pbles = (req.cq_pages > 1);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
>  		if (err)
>  			goto error;
> -
> -		ucontext = rdma_udata_to_drv_context(udata, struct
> irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_MEM:
>  		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
> --
> 2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function
  2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
                   ` (3 preceding siblings ...)
  2023-01-09 19:54 ` [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
@ 2023-01-10  4:14 ` Saleem, Shiraz
  4 siblings, 0 replies; 14+ messages in thread
From: Saleem, Shiraz @ 2023-01-10  4:14 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the shared source codes into several new functions for future use.
> No bug fix and new feature in this commit series.
> 
> The new functions are as below:
> 
> irdma_reg_user_mr_type_mem
> irdma_alloc_iwmr
> irdma_free_iwmr
> irdma_reg_user_mr_type_qp
> irdma_reg_user_mr_type_cq

Thanks! General direction is good. I provided some feedback.

> 
> These functions will be used in the dmabuf feature.

Do you want to add that dma buf patch to this series too? So that we can see it how it re-uses the new APIs you created.

So 1st 4 patches would be clean-up/refactor patches in preparation for patch #5 which is the dma buf API addition.

> 
> Zhu Yanjun (4):
>   RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
>   RDMA/irdma: Split mr alloc and free into new functions
>   RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
>   RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
> 
>  drivers/infiniband/hw/irdma/verbs.c | 260 +++++++++++++++++-----------
>  1 file changed, 160 insertions(+), 100 deletions(-)
> 
> --
> 2.31.1


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-10  4:10   ` Saleem, Shiraz
@ 2023-01-11  5:41     ` Zhu Yanjun
  0 siblings, 0 replies; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-11  5:41 UTC (permalink / raw)
  To: Saleem, Shiraz, Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma


在 2023/1/10 12:10, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into
>> irdma_reg_user_mr_type_mem
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> The source codes related with IRDMA_MEMREG_TYPE_MEM are split into a new
>> function irdma_reg_user_mr_type_mem.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 85 +++++++++++++++++------------
>>   1 file changed, 51 insertions(+), 34 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index f6973ea55eda..40109da6489a 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2745,6 +2745,55 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev,
>> struct irdma_mr *iwmr,
>>   	return ret;
>>   }
>>
>> +static int irdma_reg_user_mr_type_mem(struct irdma_device *iwdev,
>> +				      struct irdma_mr *iwmr, int access) {
>> +	int err = 0;
>> +	bool use_pbles = false;
>> +	u32 stag = 0;
> No need to initialize any of these?


Got it.


>
>> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> +	use_pbles = (iwmr->page_cnt != 1);
>> +
>> +	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
>> +	if (err)
>> +		return err;
>> +
>> +	if (use_pbles) {
>> +		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
>> +						iwmr->page_size);
>> +		if (err) {
>> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
>>> pble_alloc);
>> +			iwpbl->pbl_allocated = false;
>> +			return err;
> No this should not cause an error. Just that we don't want to use pbles for this region. reset use_pbles to false here?


Got it.


>
>> +		}
>> +	}
>> +
>> +	stag = irdma_create_stag(iwdev);
>> +	if (!stag) {
>> +		if (use_pbles) {
>> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
>>> pble_alloc);
>> +			iwpbl->pbl_allocated = false;
>> +		}
>> +		return -ENOMEM;
>> +	}
>> +
>> +	iwmr->stag = stag;
>> +	iwmr->ibmr.rkey = stag;
>> +	iwmr->ibmr.lkey = stag;
>> +	err = irdma_hwreg_mr(iwdev, iwmr, access);
>> +	if (err) {
>> +		irdma_free_stag(iwdev, stag);
>> +		if (use_pbles) {
>> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl-
>>> pble_alloc);
>> +			iwpbl->pbl_allocated = false;
> Setting the iwpbl->pbl_allocated to false is not required. We are going to free up the iwmr memory on this error anyway.
>
> Just a suggestion. Maybe just use a  goto a label "free_pble" that does the irdma_free_pble and returns err. And re-use it for irdma_create_stag error unwind too.


Sure. I followed your advice in the latest commits.


>
>> +		}
>> +		return err;
>> +	}
>> +
>> +	return err;
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2761,17 +2810,15 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,  #define IRDMA_MEM_REG_MIN_REQ_LEN
>> offsetofend(struct irdma_mem_reg_req, sq_pages)
>>   	struct irdma_device *iwdev = to_iwdev(pd->device);
>>   	struct irdma_ucontext *ucontext;
>> -	struct irdma_pble_alloc *palloc;
>>   	struct irdma_pbl *iwpbl;
>>   	struct irdma_mr *iwmr;
>>   	struct ib_umem *region;
>>   	struct irdma_mem_reg_req req;
>> -	u32 total, stag = 0;
>> +	u32 total;
>>   	u8 shadow_pgcnt = 1;
>>   	bool use_pbles = false;
>>   	unsigned long flags;
>>   	int err = -EINVAL;
>> -	int ret;
>>
>>   	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>>   		return ERR_PTR(-EINVAL);
>> @@ -2818,7 +2865,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
>> u64 start, u64 len,
>>   	}
>>   	iwmr->len = region->length;
>>   	iwpbl->user_base = virt;
>> -	palloc = &iwpbl->pble_alloc;
>>   	iwmr->type = req.reg_type;
>>   	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
>>
>> @@ -2864,36 +2910,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,
>>   		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_MEM:
>> -		use_pbles = (iwmr->page_cnt != 1);
>> -
>> -		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
>> +		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
> Perhaps you can just pass the iwmr and access as args for this API and compute the iwdev in the function using to_iwdev(iwmr->ibmr.device)


Got it. I followed your advice in the latest commits.

I will send out the latest commits very soon.

Zhu Yanjun


>
>>   		if (err)
>>   			goto error;
>> -
>> -		if (use_pbles) {
>> -			ret = irdma_check_mr_contiguous(palloc,
>> -							iwmr->page_size);
>> -			if (ret) {
>> -				irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
>> -				iwpbl->pbl_allocated = false;
>> -			}
>> -		}
>> -
>> -		stag = irdma_create_stag(iwdev);
>> -		if (!stag) {
>> -			err = -ENOMEM;
>> -			goto error;
>> -		}
>> -
>> -		iwmr->stag = stag;
>> -		iwmr->ibmr.rkey = stag;
>> -		iwmr->ibmr.lkey = stag;
>> -		err = irdma_hwreg_mr(iwdev, iwmr, access);
>> -		if (err) {
>> -			irdma_free_stag(iwdev, stag);
>> -			goto error;
>> -		}
>> -
>>   		break;
>>   	default:
>>   		goto error;
>> @@ -2904,8 +2923,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
>> u64 start, u64 len,
>>   	return &iwmr->ibmr;
>>
>>   error:
>> -	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
>> -		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
>>   	ib_umem_release(region);
>>   	kfree(iwmr);
>>
>> --
>> 2.27.0

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions
  2023-01-10  4:11   ` Saleem, Shiraz
@ 2023-01-11  5:59     ` Zhu Yanjun
  0 siblings, 0 replies; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-11  5:59 UTC (permalink / raw)
  To: Saleem, Shiraz, Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma


在 2023/1/10 12:11, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new
>> functions
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> In the function irdma_reg_user_mr, the mr allocation and free will be used by other
>> functions. As such, the source codes related with mr allocation and free are split
>> into the new functions.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 78 ++++++++++++++++++-----------
>>   1 file changed, 50 insertions(+), 28 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index 40109da6489a..5cff8656d79e 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2794,6 +2794,52 @@ static int irdma_reg_user_mr_type_mem(struct
>> irdma_device *iwdev,
>>   	return err;
>>   }
>>
>> +static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
>> +					 struct ib_pd *pd, u64 virt,
>> +					 __u16 reg_type,
> enum irdma_memreg_type


Good catch


>
>> +					 struct irdma_device *iwdev)
>> +{
>> +	struct irdma_mr *iwmr;
>> +	struct irdma_pbl *iwpbl;
>> +
>> +	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
>> +	if (!iwmr)
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	iwpbl = &iwmr->iwpbl;
>> +	iwpbl->iwmr = iwmr;
>> +	iwmr->region = region;
>> +	iwmr->ibmr.pd = pd;
>> +	iwmr->ibmr.device = pd->device;
>> +	iwmr->ibmr.iova = virt;
>> +	iwmr->page_size = PAGE_SIZE;
> Delete this and see comment below,


Will delete.


>
>> +	iwmr->type = reg_type;
>> +
>> +	if (reg_type == IRDMA_MEMREG_TYPE_MEM) {
>> +		iwmr->page_size = ib_umem_find_best_pgsz(region,
>> +							 iwdev->rf-
>>> sc_dev.hw_attrs.page_size_cap,
> I think Jason made the comment to always validate the page size with this function before use in rdma_umem_for_each_dma_block.
>
> we can move it out of this if block with something like,
>
> pgsz_bitmask = reg_type == IRDMA_MEMREG_TYPE_MEM ?
> 			iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
>
> iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmask, virt);


Wonderful. I followed your suggestions in the latest commits.


>
>
>
>> +							 virt);
>> +		if (unlikely(!iwmr->page_size)) {
>> +			kfree(iwmr);
>> +			return ERR_PTR(-EOPNOTSUPP);
>> +		}
>> +	}
>> +
>> +	iwmr->len = region->length;
>> +	iwpbl->user_base = virt;
>> +	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
>> +
>> +	return iwmr;
>> +}
>> +
>> +/*
>> + * This function frees the resources from irdma_alloc_iwmr  */ static
> This doesn't follow kdoc format? And not very useful. I would delete it.

Will delete.

Appreciate your helps. I will send out the latest commits very soon.

Zhu Yanjun

>
>> +void irdma_free_iwmr(struct irdma_mr *iwmr) {
>> +	kfree(iwmr);
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2839,34 +2885,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,
>>   		return ERR_PTR(-EFAULT);
>>   	}
>>
>> -	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
>> -	if (!iwmr) {
>> +	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type, iwdev);
>> +	if (IS_ERR(iwmr)) {
>>   		ib_umem_release(region);
>> -		return ERR_PTR(-ENOMEM);
>> +		return (struct ib_mr *)iwmr;
>>   	}
>>
>>   	iwpbl = &iwmr->iwpbl;
>> -	iwpbl->iwmr = iwmr;
>> -	iwmr->region = region;
>> -	iwmr->ibmr.pd = pd;
>> -	iwmr->ibmr.device = pd->device;
>> -	iwmr->ibmr.iova = virt;
>> -	iwmr->page_size = PAGE_SIZE;
>> -
>> -	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
>> -		iwmr->page_size = ib_umem_find_best_pgsz(region,
>> -							 iwdev->rf-
>>> sc_dev.hw_attrs.page_size_cap,
>> -							 virt);
>> -		if (unlikely(!iwmr->page_size)) {
>> -			kfree(iwmr);
>> -			ib_umem_release(region);
>> -			return ERR_PTR(-EOPNOTSUPP);
>> -		}
>> -	}
>> -	iwmr->len = region->length;
>> -	iwpbl->user_base = virt;
>> -	iwmr->type = req.reg_type;
>> -	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
>>
>>   	switch (req.reg_type) {
>>   	case IRDMA_MEMREG_TYPE_QP:
>> @@ -2918,13 +2943,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,
>>   		goto error;
>>   	}
>>
>> -	iwmr->type = req.reg_type;
>> -
>>   	return &iwmr->ibmr;
>> -
>>   error:
>>   	ib_umem_release(region);
>> -	kfree(iwmr);
>> +	irdma_free_iwmr(iwmr);
>>
>>   	return ERR_PTR(err);
>>   }
>> --
>> 2.27.0

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-10  4:10   ` Saleem, Shiraz
@ 2023-01-11  6:11     ` Zhu Yanjun
  0 siblings, 0 replies; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-11  6:11 UTC (permalink / raw)
  To: Saleem, Shiraz, Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma


在 2023/1/10 12:10, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 3/4] RDMA/irdma: Split QP handler into
>> irdma_reg_user_mr_type_qp
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> Split the source codes related with QP handling into a new function.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>>   1 file changed, 34 insertions(+), 14 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index 287d4313f14d..e90eba73c396 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2831,6 +2831,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>>   	kfree(iwmr);
>>   }
>>
>> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>> +				     struct irdma_device *iwdev,
>> +				     struct ib_udata *udata,
>> +				     struct irdma_mr *iwmr)
> You could omit iwdev and compute it from iwmr.


Got it.


>
>> +{
>> +	u32 total;
>> +	int err = 0;
> No need to initialize.
Got it.
>
>> +	u8 shadow_pgcnt = 1;
>> +	bool use_pbles = false;
>
> No need to initialize use_pbles.

Thanks. I will send out the latest commits very soon.

Zhu Yanjun

>
>
>> +	unsigned long flags;
>> +	struct irdma_ucontext *ucontext;
>> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> +	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> +	if (total > iwmr->page_cnt)
>> +		return -EINVAL;
>> +
>> +	total = req.sq_pages + req.rq_pages;
>> +	use_pbles = (total > 2);
>> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +	if (err)
>> +		return err;
>> +
>> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> +					     ibucontext);
>> +	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> +	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> +	iwpbl->on_list = true;
>> +	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>> +
>> +	return err;
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2886,23 +2919,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,
>>
>>   	switch (req.reg_type) {
>>   	case IRDMA_MEMREG_TYPE_QP:
>> -		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> -		if (total > iwmr->page_cnt) {
>> -			err = -EINVAL;
>> -			goto error;
>> -		}
>> -		total = req.sq_pages + req.rq_pages;
>> -		use_pbles = (total > 2);
>> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr);
>>   		if (err)
>>   			goto error;
>>
>> -		ucontext = rdma_udata_to_drv_context(udata, struct
>> irdma_ucontext,
>> -						     ibucontext);
>> -		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> -		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> -		iwpbl->on_list = true;
>> -		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_CQ:
>>   		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> --
>> 2.31.1

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-10  4:12   ` Saleem, Shiraz
@ 2023-01-11  6:23     ` Zhu Yanjun
  0 siblings, 0 replies; 14+ messages in thread
From: Zhu Yanjun @ 2023-01-11  6:23 UTC (permalink / raw)
  To: Saleem, Shiraz, Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma


在 2023/1/10 12:12, Saleem, Shiraz 写道:
>> Subject: [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into
>> irdma_reg_user_mr_type_cq
>>
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>
>> Split the source codes related with CQ handling into a new function.
>>
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>>   drivers/infiniband/hw/irdma/verbs.c | 60 +++++++++++++++++------------
>>   1 file changed, 35 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index e90eba73c396..b4befbafb830 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2864,6 +2864,40 @@ static int irdma_reg_user_mr_type_qp(struct
>> irdma_mem_reg_req req,
>>   	return err;
>>   }
>>
>> +static int irdma_reg_user_mr_type_cq(struct irdma_device *iwdev,
>> +				     struct irdma_mr *iwmr,
>> +				     struct ib_udata *udata,
>> +				     struct irdma_mem_reg_req req)
> I would keep the order of these API args same as the one for irdma_reg_user_mr_type_qp.
Got it.
>
>> +{
>> +	int err = 0;
> No need to initialize.
Got it.
>
>> +	u8 shadow_pgcnt = 1;
>> +	bool use_pbles = false;
> No need to initialize use_pbles.
Got it.
>
>> +	struct irdma_ucontext *ucontext;
>> +	unsigned long flags;
>> +	u32 total;
>> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> +		shadow_pgcnt = 0;
>> +	total = req.cq_pages + shadow_pgcnt;
>> +	if (total > iwmr->page_cnt)
>> +		return -EINVAL;
>> +
>> +	use_pbles = (req.cq_pages > 1);
>> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +	if (err)
>> +		return err;
>> +
>> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> +					     ibucontext);
>> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> +	iwpbl->on_list = true;
>> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> +
>> +	return err;
>> +}
>> +
>>   /**
>>    * irdma_reg_user_mr - Register a user memory region
>>    * @pd: ptr of pd
>> @@ -2879,15 +2913,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd
>> *pd, u64 start, u64 len,  {  #define IRDMA_MEM_REG_MIN_REQ_LEN
>> offsetofend(struct irdma_mem_reg_req, sq_pages)
>>   	struct irdma_device *iwdev = to_iwdev(pd->device);
>> -	struct irdma_ucontext *ucontext;
>> -	struct irdma_pbl *iwpbl;
>>   	struct irdma_mr *iwmr;
>>   	struct ib_umem *region;
>>   	struct irdma_mem_reg_req req;
>> -	u32 total;
>> -	u8 shadow_pgcnt = 1;
>> -	bool use_pbles = false;
>> -	unsigned long flags;
>>   	int err = -EINVAL;
> Do we need to initialize err here too? Probably separate from this patch but could clean up.

Yes. In switch-case-default, in default branch, err is not assigned to 
any error.

In the latest commit, err is assigned to -EINVAL in default branch and 
here err is not set.

I will send out the latest commits very soon.

Zhu Yanjun

>
>>   	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>> @@ -2915,8 +2943,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd,
>> u64 start, u64 len,
>>   		return (struct ib_mr *)iwmr;
>>   	}
>>
>> -	iwpbl = &iwmr->iwpbl;
>> -
>>   	switch (req.reg_type) {
>>   	case IRDMA_MEMREG_TYPE_QP:
>>   		err = irdma_reg_user_mr_type_qp(req, iwdev, udata, iwmr); @@ -
>> 2925,25 +2951,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64
>> start, u64 len,
>>
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_CQ:
>> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
>> IRDMA_FEATURE_CQ_RESIZE)
>> -			shadow_pgcnt = 0;
>> -		total = req.cq_pages + shadow_pgcnt;
>> -		if (total > iwmr->page_cnt) {
>> -			err = -EINVAL;
>> -			goto error;
>> -		}
>> -
>> -		use_pbles = (req.cq_pages > 1);
>> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> +		err = irdma_reg_user_mr_type_cq(iwdev, iwmr, udata, req);
>>   		if (err)
>>   			goto error;
>> -
>> -		ucontext = rdma_udata_to_drv_context(udata, struct
>> irdma_ucontext,
>> -						     ibucontext);
>> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> -		iwpbl->on_list = true;
>> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>>   		break;
>>   	case IRDMA_MEMREG_TYPE_MEM:
>>   		err = irdma_reg_user_mr_type_mem(iwdev, iwmr, access);
>> --
>> 2.31.1

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2023-01-11  6:24 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-09 19:53 [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
2023-01-09 19:53 ` [PATCH for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
2023-01-10  4:10   ` Saleem, Shiraz
2023-01-11  5:41     ` Zhu Yanjun
2023-01-09 19:54 ` [PATCH for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
2023-01-10  4:11   ` Saleem, Shiraz
2023-01-11  5:59     ` Zhu Yanjun
2023-01-09 19:54 ` [PATCH for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
2023-01-10  4:10   ` Saleem, Shiraz
2023-01-11  6:11     ` Zhu Yanjun
2023-01-09 19:54 ` [PATCH for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
2023-01-10  4:12   ` Saleem, Shiraz
2023-01-11  6:23     ` Zhu Yanjun
2023-01-10  4:14 ` [PATCH for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.