linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function
@ 2023-01-12  0:06 Zhu Yanjun
  2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Zhu Yanjun @ 2023-01-12  0:06 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

V1->V2: Thanks Saleem, Shiraz. 
        1) Remove the unnecessary variable initializations;
        2) Get iwdev by to_iwdev;
	3) Use the label free_pble to handle errors;
	4) Validate the page size before rdma_umem_for_each_dma_block

Split the shared source codes into several new functions for future use.
No bug fix and new feature in this commit series.

The new functions are as below:

irdma_reg_user_mr_type_mem
irdma_alloc_iwmr
irdma_free_iwmr
irdma_reg_user_mr_type_qp
irdma_reg_user_mr_type_cq

These functions will be used in the dmabuf feature.

Zhu Yanjun (4):
  RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  RDMA/irdma: Split mr alloc and free into new functions
  RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq

 drivers/infiniband/hw/irdma/verbs.c | 264 +++++++++++++++++-----------
 1 file changed, 165 insertions(+), 99 deletions(-)

-- 
2.31.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
@ 2023-01-12  0:06 ` Zhu Yanjun
  2023-01-15 11:20   ` Leon Romanovsky
  2023-01-16  2:56   ` yanjun.zhu
  2023-01-12  0:06 ` [PATCHv2 for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 12+ messages in thread
From: Zhu Yanjun @ 2023-01-12  0:06 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

The source codes related with IRDMA_MEMREG_TYPE_MEM are split
into a new function irdma_reg_user_mr_type_mem.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 81 +++++++++++++++++------------
 1 file changed, 49 insertions(+), 32 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f4674ecf9c8c..b67c14aac0f2 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2745,6 +2745,53 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
 	return ret;
 }
 
+static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+{
+	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+	int err;
+	bool use_pbles;
+	u32 stag;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+
+	use_pbles = (iwmr->page_cnt != 1);
+
+	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+	if (err)
+		return err;
+
+	if (use_pbles) {
+		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+						iwmr->page_size);
+		if (err) {
+			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+			iwpbl->pbl_allocated = false;
+		}
+	}
+
+	stag = irdma_create_stag(iwdev);
+	if (!stag) {
+		err = -ENOMEM;
+		goto free_pble;
+	}
+
+	iwmr->stag = stag;
+	iwmr->ibmr.rkey = stag;
+	iwmr->ibmr.lkey = stag;
+	err = irdma_hwreg_mr(iwdev, iwmr, access);
+	if (err) {
+		irdma_free_stag(iwdev, stag);
+		goto free_pble;
+	}
+
+	return 0;
+
+free_pble:
+	if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2761,12 +2808,11 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
 	struct irdma_ucontext *ucontext;
-	struct irdma_pble_alloc *palloc;
 	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total, stag = 0;
+	u32 total;
 	u8 shadow_pgcnt = 1;
 	bool use_pbles = false;
 	unsigned long flags;
@@ -2817,7 +2863,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 	}
 	iwmr->len = region->length;
 	iwpbl->user_base = virt;
-	palloc = &iwpbl->pble_alloc;
 	iwmr->type = req.reg_type;
 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
 
@@ -2863,36 +2908,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
-		use_pbles = (iwmr->page_cnt != 1);
-
-		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+		err = irdma_reg_user_mr_type_mem(iwmr, access);
 		if (err)
 			goto error;
 
-		if (use_pbles) {
-			err = irdma_check_mr_contiguous(palloc,
-							iwmr->page_size);
-			if (err) {
-				irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
-				iwpbl->pbl_allocated = false;
-			}
-		}
-
-		stag = irdma_create_stag(iwdev);
-		if (!stag) {
-			err = -ENOMEM;
-			goto error;
-		}
-
-		iwmr->stag = stag;
-		iwmr->ibmr.rkey = stag;
-		iwmr->ibmr.lkey = stag;
-		err = irdma_hwreg_mr(iwdev, iwmr, access);
-		if (err) {
-			irdma_free_stag(iwdev, stag);
-			goto error;
-		}
-
 		break;
 	default:
 		goto error;
@@ -2903,8 +2922,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 	return &iwmr->ibmr;
 
 error:
-	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
-		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
 	ib_umem_release(region);
 	kfree(iwmr);
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCHv2 for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions
  2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
  2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
@ 2023-01-12  0:06 ` Zhu Yanjun
  2023-01-12  0:06 ` [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Zhu Yanjun @ 2023-01-12  0:06 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

In the function irdma_reg_user_mr, the mr allocation and free
will be used by other functions. As such, the source codes related
with mr allocation and free are split into the new functions.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 74 ++++++++++++++++++-----------
 1 file changed, 46 insertions(+), 28 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index b67c14aac0f2..f4712276b920 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2792,6 +2792,48 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
 	return err;
 }
 
+static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+					 struct ib_pd *pd, u64 virt,
+					 enum irdma_memreg_type reg_type)
+{
+	struct irdma_mr *iwmr;
+	struct irdma_pbl *iwpbl;
+	struct irdma_device *iwdev = to_iwdev(pd->device);
+	unsigned long pgsz_bitmap;
+
+	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+	if (!iwmr)
+		return ERR_PTR(-ENOMEM);
+
+	iwpbl = &iwmr->iwpbl;
+	iwpbl->iwmr = iwmr;
+	iwmr->region = region;
+	iwmr->ibmr.pd = pd;
+	iwmr->ibmr.device = pd->device;
+	iwmr->ibmr.iova = virt;
+	iwmr->type = reg_type;
+
+	pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
+		iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
+
+	iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
+	if (unlikely(!iwmr->page_size)) {
+		kfree(iwmr);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	iwmr->len = region->length;
+	iwpbl->user_base = virt;
+	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+	return iwmr;
+}
+
+static void irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+	kfree(iwmr);
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2837,34 +2879,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return ERR_PTR(-EFAULT);
 	}
 
-	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
-	if (!iwmr) {
+	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+	if (IS_ERR(iwmr)) {
 		ib_umem_release(region);
-		return ERR_PTR(-ENOMEM);
+		return (struct ib_mr *)iwmr;
 	}
 
 	iwpbl = &iwmr->iwpbl;
-	iwpbl->iwmr = iwmr;
-	iwmr->region = region;
-	iwmr->ibmr.pd = pd;
-	iwmr->ibmr.device = pd->device;
-	iwmr->ibmr.iova = virt;
-	iwmr->page_size = PAGE_SIZE;
-
-	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
-		iwmr->page_size = ib_umem_find_best_pgsz(region,
-							 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
-							 virt);
-		if (unlikely(!iwmr->page_size)) {
-			kfree(iwmr);
-			ib_umem_release(region);
-			return ERR_PTR(-EOPNOTSUPP);
-		}
-	}
-	iwmr->len = region->length;
-	iwpbl->user_base = virt;
-	iwmr->type = req.reg_type;
-	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
 
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
@@ -2917,13 +2938,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		goto error;
 	}
 
-	iwmr->type = req.reg_type;
-
 	return &iwmr->ibmr;
-
 error:
 	ib_umem_release(region);
-	kfree(iwmr);
+	irdma_free_iwmr(iwmr);
 
 	return ERR_PTR(err);
 }
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
  2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
  2023-01-12  0:06 ` [PATCHv2 for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
@ 2023-01-12  0:06 ` Zhu Yanjun
  2023-01-15 11:23   ` Leon Romanovsky
  2023-01-16  2:58   ` yanjun.zhu
  2023-01-12  0:06 ` [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
  2023-01-12 16:42 ` [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz
  4 siblings, 2 replies; 12+ messages in thread
From: Zhu Yanjun @ 2023-01-12  0:06 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with QP handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
 1 file changed, 34 insertions(+), 14 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f4712276b920..74dd1972c325 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2834,6 +2834,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
 	kfree(iwmr);
 }
 
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+				     struct ib_udata *udata,
+				     struct irdma_mr *iwmr)
+{
+	u32 total;
+	int err;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles;
+	unsigned long flags;
+	struct irdma_ucontext *ucontext;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+
+	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	total = req.sq_pages + req.rq_pages;
+	use_pbles = (total > 2);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2889,23 +2922,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
-		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-		total = req.sq_pages + req.rq_pages;
-		use_pbles = (total > 2);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
 		if (err)
 			goto error;
 
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
                   ` (2 preceding siblings ...)
  2023-01-12  0:06 ` [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
@ 2023-01-12  0:06 ` Zhu Yanjun
  2023-01-15 11:28   ` Leon Romanovsky
  2023-01-16  3:03   ` yanjun.zhu
  2023-01-12 16:42 ` [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz
  4 siblings, 2 replies; 12+ messages in thread
From: Zhu Yanjun @ 2023-01-12  0:06 UTC (permalink / raw)
  To: mustafa.ismail, shiraz.saleem, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

From: Zhu Yanjun <yanjun.zhu@linux.dev>

Split the source codes related with CQ handling into a new function.

Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
 drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
 1 file changed, 37 insertions(+), 26 deletions(-)

diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 74dd1972c325..3902c74d59f2 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
 	return err;
 }
 
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+				     struct ib_udata *udata,
+				     struct irdma_mr *iwmr)
+{
+	int err;
+	u8 shadow_pgcnt = 1;
+	bool use_pbles;
+	struct irdma_ucontext *ucontext;
+	unsigned long flags;
+	u32 total;
+	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+
+	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+		shadow_pgcnt = 0;
+	total = req.cq_pages + shadow_pgcnt;
+	if (total > iwmr->page_cnt)
+		return -EINVAL;
+
+	use_pbles = (req.cq_pages > 1);
+	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+	if (err)
+		return err;
+
+	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+					     ibucontext);
+	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+	iwpbl->on_list = true;
+	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+	return err;
+}
+
 /**
  * irdma_reg_user_mr - Register a user memory region
  * @pd: ptr of pd
@@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 {
 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
 	struct irdma_device *iwdev = to_iwdev(pd->device);
-	struct irdma_ucontext *ucontext;
-	struct irdma_pbl *iwpbl;
 	struct irdma_mr *iwmr;
 	struct ib_umem *region;
 	struct irdma_mem_reg_req req;
-	u32 total;
-	u8 shadow_pgcnt = 1;
-	bool use_pbles = false;
-	unsigned long flags;
-	int err = -EINVAL;
+	int err;
 
 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
 		return ERR_PTR(-EINVAL);
@@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 		return (struct ib_mr *)iwmr;
 	}
 
-	iwpbl = &iwmr->iwpbl;
-
 	switch (req.reg_type) {
 	case IRDMA_MEMREG_TYPE_QP:
 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
@@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	case IRDMA_MEMREG_TYPE_CQ:
-		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
-			shadow_pgcnt = 0;
-		total = req.cq_pages + shadow_pgcnt;
-		if (total > iwmr->page_cnt) {
-			err = -EINVAL;
-			goto error;
-		}
-
-		use_pbles = (req.cq_pages > 1);
-		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
 		if (err)
 			goto error;
-
-		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
-						     ibucontext);
-		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
-		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
-		iwpbl->on_list = true;
-		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IRDMA_MEMREG_TYPE_MEM:
 		err = irdma_reg_user_mr_type_mem(iwmr, access);
@@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
 
 		break;
 	default:
+		err = -EINVAL;
 		goto error;
 	}
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* RE: [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function
  2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
                   ` (3 preceding siblings ...)
  2023-01-12  0:06 ` [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
@ 2023-01-12 16:42 ` Saleem, Shiraz
  4 siblings, 0 replies; 12+ messages in thread
From: Saleem, Shiraz @ 2023-01-12 16:42 UTC (permalink / raw)
  To: Zhu, Yanjun, Ismail, Mustafa, jgg, leon, linux-rdma; +Cc: Zhu Yanjun

> Subject: [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr
> function
> 
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> V1->V2: Thanks Saleem, Shiraz.
>         1) Remove the unnecessary variable initializations;
>         2) Get iwdev by to_iwdev;
> 	3) Use the label free_pble to handle errors;
> 	4) Validate the page size before rdma_umem_for_each_dma_block
> 
> Split the shared source codes into several new functions for future use.
> No bug fix and new feature in this commit series.
> 
> The new functions are as below:
> 
> irdma_reg_user_mr_type_mem
> irdma_alloc_iwmr
> irdma_free_iwmr
> irdma_reg_user_mr_type_qp
> irdma_reg_user_mr_type_cq
> 
> These functions will be used in the dmabuf feature.
> 
> Zhu Yanjun (4):
>   RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
>   RDMA/irdma: Split mr alloc and free into new functions
>   RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
>   RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
> 
>  drivers/infiniband/hw/irdma/verbs.c | 264 +++++++++++++++++-----------
>  1 file changed, 165 insertions(+), 99 deletions(-)
> 
> --

Thanks!

Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
@ 2023-01-15 11:20   ` Leon Romanovsky
  2023-01-16  2:56   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: Leon Romanovsky @ 2023-01-15 11:20 UTC (permalink / raw)
  To: Zhu Yanjun; +Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma, Zhu Yanjun

On Wed, Jan 11, 2023 at 07:06:14PM -0500, Zhu Yanjun wrote:
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> The source codes related with IRDMA_MEMREG_TYPE_MEM are split
> into a new function irdma_reg_user_mr_type_mem.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 81 +++++++++++++++++------------
>  1 file changed, 49 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index f4674ecf9c8c..b67c14aac0f2 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2745,6 +2745,53 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
>  	return ret;
>  }
>  
> +static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
> +{
> +	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
> +	int err;
> +	bool use_pbles;
> +	u32 stag;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +
> +	use_pbles = (iwmr->page_cnt != 1);
> +
> +	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
> +	if (err)
> +		return err;
> +
> +	if (use_pbles) {
> +		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
> +						iwmr->page_size);
> +		if (err) {
> +			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
> +			iwpbl->pbl_allocated = false;
> +		}
> +	}
> +
> +	stag = irdma_create_stag(iwdev);
> +	if (!stag) {
> +		err = -ENOMEM;
> +		goto free_pble;
> +	}
> +
> +	iwmr->stag = stag;
> +	iwmr->ibmr.rkey = stag;
> +	iwmr->ibmr.lkey = stag;
> +	err = irdma_hwreg_mr(iwdev, iwmr, access);
> +	if (err) {
> +		irdma_free_stag(iwdev, stag);
> +		goto free_pble;

Please add new goto label and put irdma_free_stag() there.

Thanks

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-12  0:06 ` [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
@ 2023-01-15 11:23   ` Leon Romanovsky
  2023-01-16  2:58   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: Leon Romanovsky @ 2023-01-15 11:23 UTC (permalink / raw)
  To: Zhu Yanjun; +Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma, Zhu Yanjun

On Wed, Jan 11, 2023 at 07:06:16PM -0500, Zhu Yanjun wrote:
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with QP handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>  1 file changed, 34 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index f4712276b920..74dd1972c325 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2834,6 +2834,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>  	kfree(iwmr);
>  }
>  
> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
> +				     struct ib_udata *udata,
> +				     struct irdma_mr *iwmr)
> +{
> +	u32 total;
> +	int err;
> +	u8 shadow_pgcnt = 1;

It is constant, you don't need variable for that.

> +	bool use_pbles;
> +	unsigned long flags;
> +	struct irdma_ucontext *ucontext;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
> +
> +	total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	total = req.sq_pages + req.rq_pages;
> +	use_pbles = (total > 2);

There is no need in brackets here.

> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
> +
> +	return err;

return 0;

> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2889,23 +2922,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
> -		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -		total = req.sq_pages + req.rq_pages;
> -		use_pbles = (total > 2);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
>  		if (err)
>  			goto error;
>  
> -		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
>  		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
> -- 
> 2.31.1
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-12  0:06 ` [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
@ 2023-01-15 11:28   ` Leon Romanovsky
  2023-01-16  3:03   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: Leon Romanovsky @ 2023-01-15 11:28 UTC (permalink / raw)
  To: Zhu Yanjun; +Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma, Zhu Yanjun

On Wed, Jan 11, 2023 at 07:06:17PM -0500, Zhu Yanjun wrote:
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> Split the source codes related with CQ handling into a new function.
> 
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
>  drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
>  1 file changed, 37 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
> index 74dd1972c325..3902c74d59f2 100644
> --- a/drivers/infiniband/hw/irdma/verbs.c
> +++ b/drivers/infiniband/hw/irdma/verbs.c
> @@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>  	return err;
>  }
>  
> +static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
> +				     struct ib_udata *udata,
> +				     struct irdma_mr *iwmr)
> +{
> +	int err;
> +	u8 shadow_pgcnt = 1;
> +	bool use_pbles;
> +	struct irdma_ucontext *ucontext;
> +	unsigned long flags;
> +	u32 total;
> +	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
> +	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);

It will be nice to see more structured variable initialization.

I'm not going to insist on it, but IMHO netdev reverse Christmas
tree rule looks more appealing than this random list.

> +
> +	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
> +		shadow_pgcnt = 0;
> +	total = req.cq_pages + shadow_pgcnt;
> +	if (total > iwmr->page_cnt)
> +		return -EINVAL;
> +
> +	use_pbles = (req.cq_pages > 1);
> +	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +	if (err)
> +		return err;
> +
> +	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> +					     ibucontext);
> +	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> +	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> +	iwpbl->on_list = true;
> +	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
> +
> +	return err;

return 0;

> +}
> +
>  /**
>   * irdma_reg_user_mr - Register a user memory region
>   * @pd: ptr of pd
> @@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  {
>  #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
>  	struct irdma_device *iwdev = to_iwdev(pd->device);
> -	struct irdma_ucontext *ucontext;
> -	struct irdma_pbl *iwpbl;
>  	struct irdma_mr *iwmr;
>  	struct ib_umem *region;
>  	struct irdma_mem_reg_req req;
> -	u32 total;
> -	u8 shadow_pgcnt = 1;
> -	bool use_pbles = false;
> -	unsigned long flags;
> -	int err = -EINVAL;
> +	int err;
>  
>  	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>  		return ERR_PTR(-EINVAL);
> @@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  		return (struct ib_mr *)iwmr;
>  	}
>  
> -	iwpbl = &iwmr->iwpbl;
> -
>  	switch (req.reg_type) {
>  	case IRDMA_MEMREG_TYPE_QP:
>  		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
> @@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  
>  		break;
>  	case IRDMA_MEMREG_TYPE_CQ:
> -		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
> -			shadow_pgcnt = 0;
> -		total = req.cq_pages + shadow_pgcnt;
> -		if (total > iwmr->page_cnt) {
> -			err = -EINVAL;
> -			goto error;
> -		}
> -
> -		use_pbles = (req.cq_pages > 1);
> -		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
> +		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
>  		if (err)
>  			goto error;
> -
> -		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
> -						     ibucontext);
> -		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
> -		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
> -		iwpbl->on_list = true;
> -		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>  		break;
>  	case IRDMA_MEMREG_TYPE_MEM:
>  		err = irdma_reg_user_mr_type_mem(iwmr, access);
> @@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>  
>  		break;
>  	default:
> +		err = -EINVAL;
>  		goto error;
>  	}
>  
> -- 
> 2.31.1
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem
  2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
  2023-01-15 11:20   ` Leon Romanovsky
@ 2023-01-16  2:56   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: yanjun.zhu @ 2023-01-16  2:56 UTC (permalink / raw)
  To: Leon Romanovsky, Zhu Yanjun
  Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma

January 15, 2023 7:20 PM, "Leon Romanovsky" <leon@kernel.org> wrote:

> On Wed, Jan 11, 2023 at 07:06:14PM -0500, Zhu Yanjun wrote:
> 
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>> 
>> The source codes related with IRDMA_MEMREG_TYPE_MEM are split
>> into a new function irdma_reg_user_mr_type_mem.
>> 
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>> drivers/infiniband/hw/irdma/verbs.c | 81 +++++++++++++++++------------
>> 1 file changed, 49 insertions(+), 32 deletions(-)
>> 
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index f4674ecf9c8c..b67c14aac0f2 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2745,6 +2745,53 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
>> return ret;
>> }
>> 
>> +static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
>> +{
>> + struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
>> + int err;
>> + bool use_pbles;
>> + u32 stag;
>> + struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> +
>> + use_pbles = (iwmr->page_cnt != 1);
>> +
>> + err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
>> + if (err)
>> + return err;
>> +
>> + if (use_pbles) {
>> + err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
>> + iwmr->page_size);
>> + if (err) {
>> + irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
>> + iwpbl->pbl_allocated = false;
>> + }
>> + }
>> +
>> + stag = irdma_create_stag(iwdev);
>> + if (!stag) {
>> + err = -ENOMEM;
>> + goto free_pble;
>> + }
>> +
>> + iwmr->stag = stag;
>> + iwmr->ibmr.rkey = stag;
>> + iwmr->ibmr.lkey = stag;
>> + err = irdma_hwreg_mr(iwdev, iwmr, access);
>> + if (err) {
>> + irdma_free_stag(iwdev, stag);
>> + goto free_pble;
> 
> Please add new goto label and put irdma_free_stag() there.

Got it. 

Zhu Yanjun

> 
> Thanks

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp
  2023-01-12  0:06 ` [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
  2023-01-15 11:23   ` Leon Romanovsky
@ 2023-01-16  2:58   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: yanjun.zhu @ 2023-01-16  2:58 UTC (permalink / raw)
  To: Leon Romanovsky, Zhu Yanjun
  Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma

January 15, 2023 7:23 PM, "Leon Romanovsky" <leon@kernel.org> wrote:

> On Wed, Jan 11, 2023 at 07:06:16PM -0500, Zhu Yanjun wrote:
> 
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>> 
>> Split the source codes related with QP handling into a new function.
>> 
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>> drivers/infiniband/hw/irdma/verbs.c | 48 ++++++++++++++++++++---------
>> 1 file changed, 34 insertions(+), 14 deletions(-)
>> 
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index f4712276b920..74dd1972c325 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2834,6 +2834,39 @@ static void irdma_free_iwmr(struct irdma_mr *iwmr)
>> kfree(iwmr);
>> }
>> 
>> +static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>> + struct ib_udata *udata,
>> + struct irdma_mr *iwmr)
>> +{
>> + u32 total;
>> + int err;
>> + u8 shadow_pgcnt = 1;
> 
> It is constant, you don't need variable for that.

Got it. The variable is removed.

> 
>> + bool use_pbles;
>> + unsigned long flags;
>> + struct irdma_ucontext *ucontext;
>> + struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> + struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
>> +
>> + total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> + if (total > iwmr->page_cnt)
>> + return -EINVAL;
>> +
>> + total = req.sq_pages + req.rq_pages;
>> + use_pbles = (total > 2);
> 
> There is no need in brackets here.

The brackets are removed in the latest commit.

> 
>> + err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + if (err)
>> + return err;
>> +
>> + ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> + ibucontext);
>> + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> + list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> + iwpbl->on_list = true;
>> + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>> +
>> + return err;
> 
> return 0;

Got it.

Zhu Yanjun

> 
>> +}
>> +
>> /**
>> * irdma_reg_user_mr - Register a user memory region
>> * @pd: ptr of pd
>> @@ -2889,23 +2922,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64
>> len,
>> 
>> switch (req.reg_type) {
>> case IRDMA_MEMREG_TYPE_QP:
>> - total = req.sq_pages + req.rq_pages + shadow_pgcnt;
>> - if (total > iwmr->page_cnt) {
>> - err = -EINVAL;
>> - goto error;
>> - }
>> - total = req.sq_pages + req.rq_pages;
>> - use_pbles = (total > 2);
>> - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
>> if (err)
>> goto error;
>> 
>> - ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> - ibucontext);
>> - spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
>> - list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
>> - iwpbl->on_list = true;
>> - spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
>> break;
>> case IRDMA_MEMREG_TYPE_CQ:
>> if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
>> --
>> 2.31.1

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
  2023-01-12  0:06 ` [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
  2023-01-15 11:28   ` Leon Romanovsky
@ 2023-01-16  3:03   ` yanjun.zhu
  1 sibling, 0 replies; 12+ messages in thread
From: yanjun.zhu @ 2023-01-16  3:03 UTC (permalink / raw)
  To: Leon Romanovsky, Zhu Yanjun
  Cc: mustafa.ismail, shiraz.saleem, jgg, linux-rdma

January 15, 2023 7:28 PM, "Leon Romanovsky" <leon@kernel.org> wrote:

> On Wed, Jan 11, 2023 at 07:06:17PM -0500, Zhu Yanjun wrote:
> 
>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>> 
>> Split the source codes related with CQ handling into a new function.
>> 
>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>> ---
>> drivers/infiniband/hw/irdma/verbs.c | 63 +++++++++++++++++------------
>> 1 file changed, 37 insertions(+), 26 deletions(-)
>> 
>> diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
>> index 74dd1972c325..3902c74d59f2 100644
>> --- a/drivers/infiniband/hw/irdma/verbs.c
>> +++ b/drivers/infiniband/hw/irdma/verbs.c
>> @@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
>> return err;
>> }
>> 
>> +static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
>> + struct ib_udata *udata,
>> + struct irdma_mr *iwmr)
>> +{
>> + int err;
>> + u8 shadow_pgcnt = 1;
>> + bool use_pbles;
>> + struct irdma_ucontext *ucontext;
>> + unsigned long flags;
>> + u32 total;
>> + struct irdma_pbl *iwpbl = &iwmr->iwpbl;
>> + struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
> 
> It will be nice to see more structured variable initialization.
> 
> I'm not going to insist on it, but IMHO netdev reverse Christmas
> tree rule looks more appealing than this random list.

Got it. The structured variables are initialized.
And the netdev reverse Christmas tree rule is used in the commits.

> 
>> +
>> + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
>> + shadow_pgcnt = 0;
>> + total = req.cq_pages + shadow_pgcnt;
>> + if (total > iwmr->page_cnt)
>> + return -EINVAL;
>> +
>> + use_pbles = (req.cq_pages > 1);
>> + err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + if (err)
>> + return err;
>> +
>> + ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> + ibucontext);
>> + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> + iwpbl->on_list = true;
>> + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> +
>> + return err;
> 
> return 0;

I will send out the latest commits very soon.

Zhu Yanjun

> 
>> +}
>> +
>> /**
>> * irdma_reg_user_mr - Register a user memory region
>> * @pd: ptr of pd
>> @@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64
>> len,
>> {
>> #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
>> struct irdma_device *iwdev = to_iwdev(pd->device);
>> - struct irdma_ucontext *ucontext;
>> - struct irdma_pbl *iwpbl;
>> struct irdma_mr *iwmr;
>> struct ib_umem *region;
>> struct irdma_mem_reg_req req;
>> - u32 total;
>> - u8 shadow_pgcnt = 1;
>> - bool use_pbles = false;
>> - unsigned long flags;
>> - int err = -EINVAL;
>> + int err;
>> 
>> if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
>> return ERR_PTR(-EINVAL);
>> @@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> return (struct ib_mr *)iwmr;
>> }
>> 
>> - iwpbl = &iwmr->iwpbl;
>> -
>> switch (req.reg_type) {
>> case IRDMA_MEMREG_TYPE_QP:
>> err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
>> @@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> 
>> break;
>> case IRDMA_MEMREG_TYPE_CQ:
>> - if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
>> - shadow_pgcnt = 0;
>> - total = req.cq_pages + shadow_pgcnt;
>> - if (total > iwmr->page_cnt) {
>> - err = -EINVAL;
>> - goto error;
>> - }
>> -
>> - use_pbles = (req.cq_pages > 1);
>> - err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
>> + err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
>> if (err)
>> goto error;
>> -
>> - ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
>> - ibucontext);
>> - spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
>> - list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
>> - iwpbl->on_list = true;
>> - spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
>> break;
>> case IRDMA_MEMREG_TYPE_MEM:
>> err = irdma_reg_user_mr_type_mem(iwmr, access);
>> @@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
>> 
>> break;
>> default:
>> + err = -EINVAL;
>> goto error;
>> }
>> 
>> --
>> 2.31.1

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2023-01-16  3:04 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-12  0:06 [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Zhu Yanjun
2023-01-12  0:06 ` [PATCHv2 for-next 1/4] RDMA/irdma: Split MEM handler into irdma_reg_user_mr_type_mem Zhu Yanjun
2023-01-15 11:20   ` Leon Romanovsky
2023-01-16  2:56   ` yanjun.zhu
2023-01-12  0:06 ` [PATCHv2 for-next 2/4] RDMA/irdma: Split mr alloc and free into new functions Zhu Yanjun
2023-01-12  0:06 ` [PATCHv2 for-next 3/4] RDMA/irdma: Split QP handler into irdma_reg_user_mr_type_qp Zhu Yanjun
2023-01-15 11:23   ` Leon Romanovsky
2023-01-16  2:58   ` yanjun.zhu
2023-01-12  0:06 ` [PATCHv2 for-next 4/4] RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq Zhu Yanjun
2023-01-15 11:28   ` Leon Romanovsky
2023-01-16  3:03   ` yanjun.zhu
2023-01-12 16:42 ` [PATCHv2 for-next 0/4] RDMA/irdma: Refactor irdma_reg_user_mr function Saleem, Shiraz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).