linux-rdma.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines
@ 2022-08-04  4:37 Bob Pearson
  2022-08-04  4:42 ` Bob Pearson
  2022-08-05  7:08 ` lizhijian
  0 siblings, 2 replies; 5+ messages in thread
From: Bob Pearson @ 2022-08-04  4:37 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, jhack, linux-rdma; +Cc: Bob Pearson

Currently the rxe driver has incorrect code in error paths for
allocating MR objects. The PD and umem are always freed in
rxe_mr_cleanup() but in some error paths they are already
freed or never set. This patch makes sure that the PD is always
set and checks to see if umem is set before freeing it in
rxe_mr_cleanup(). umem and maps are freed if an error occurs
in an allocate mr call.

Reported-by: Li Zhijian <lizhijian@fujitsu.com>
Link: https://lore.kernel.org/linux-rdma/11dafa5f-c52d-16c1-fe37-2cd45ab20474@fujitsu.com/
Fixes: 3902b429ca14 ("Implement invalidate MW operations")
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
v4:
  added set mr->ibmr.pd back to avoid an -ENOMEM error causing
  rxe_put(mr_pd(mr)); to seg fault in rxe_mr_cleanup() since pd
  is not getting set in the error path.
v3:
  rebased to apply to current for-next after
  	Revert "RDMA/rxe: Create duplicate mapping tables for FMRs"
v2:
  Moved setting mr->umem until after checks to avoid sending
  an ERR_PTR to ib_umem_release().
  Cleaned up umem and map sets if errors occur in alloc mr calls.
  Rebased to current for-next.

 drivers/infiniband/sw/rxe/rxe_loc.h   |  6 +-
 drivers/infiniband/sw/rxe/rxe_mr.c    | 93 +++++++++++----------------
 drivers/infiniband/sw/rxe/rxe_verbs.c | 57 +++++++---------
 3 files changed, 62 insertions(+), 94 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 22f6cc31d1d6..c2a5c8814a48 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 
 /* rxe_mr.c */
 u8 rxe_get_next_key(u32 last_key);
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr);
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 		enum rxe_mr_copy_dir dir);
 int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 850b80f5ad8b..3814f8d3c2a9 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -67,20 +67,24 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
 
 static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
 {
-	int i;
-	int num_map;
 	struct rxe_map **map = mr->map;
+	int num_map;
+	int i;
 
 	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
 
 	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
 	if (!mr->map)
-		goto err1;
+		return -ENOMEM;
 
 	for (i = 0; i < num_map; i++) {
 		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
-		if (!mr->map[i])
-			goto err2;
+		if (!mr->map[i]) {
+			for (i--; i >= 0; i--)
+				kfree(mr->map[i]);
+			kfree(mr->map);
+			return -ENOMEM;
+		}
 	}
 
 	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
@@ -93,55 +97,40 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
 	mr->max_buf = num_map * RXE_BUF_PER_MAP;
 
 	return 0;
-
-err2:
-	for (i--; i >= 0; i--)
-		kfree(mr->map[i]);
-
-	kfree(mr->map);
-err1:
-	return -ENOMEM;
 }
 
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
+void rxe_mr_init_dma(int access, struct rxe_mr *mr)
 {
 	rxe_mr_init(access, mr);
 
-	mr->ibmr.pd = &pd->ibpd;
 	mr->access = access;
 	mr->state = RXE_MR_STATE_VALID;
 	mr->type = IB_MR_TYPE_DMA;
 }
 
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
-	struct ib_umem		*umem;
-	struct sg_page_iter	sg_iter;
-	int			num_buf;
-	void			*vaddr;
+	struct rxe_phys_buf *buf = NULL;
+	struct sg_page_iter sg_iter;
+	struct rxe_map **map;
+	struct ib_umem *umem;
+	int num_buf;
+	void *vaddr;
 	int err;
-	int i;
 
-	umem = ib_umem_get(pd->ibpd.device, start, length, access);
-	if (IS_ERR(umem)) {
-		pr_warn("%s: Unable to pin memory region err = %d\n",
-			__func__, (int)PTR_ERR(umem));
-		err = PTR_ERR(umem);
-		goto err_out;
-	}
+	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
+	if (IS_ERR(umem))
+		return PTR_ERR(umem);
 
 	num_buf = ib_umem_num_pages(umem);
 
 	rxe_mr_init(access, mr);
 
-	err = rxe_mr_alloc(mr, num_buf);
+	err = -ENOMEM; //err = rxe_mr_alloc(mr, num_buf);
 	if (err) {
-		pr_warn("%s: Unable to allocate memory for map\n",
-				__func__);
-		goto err_release_umem;
+		ib_umem_release(umem);
+		return err;
 	}
 
 	mr->page_shift = PAGE_SHIFT;
@@ -152,7 +141,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 	if (length > 0) {
 		buf = map[0]->buf;
 
-		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
+		for_each_sgtable_page(&umem->sgt_append.sgt, &sg_iter, 0) {
 			if (num_buf >= RXE_BUF_PER_MAP) {
 				map++;
 				buf = map[0]->buf;
@@ -161,21 +150,22 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 			vaddr = page_address(sg_page_iter_page(&sg_iter));
 			if (!vaddr) {
-				pr_warn("%s: Unable to get virtual address\n",
-						__func__);
-				err = -ENOMEM;
-				goto err_cleanup_map;
+				int i;
+
+				for (i = 0; i < mr->num_map; i++)
+					kfree(mr->map[i]);
+				kfree(mr->map);
+				ib_umem_release(umem);
+				return -ENOMEM;
 			}
 
 			buf->addr = (uintptr_t)vaddr;
 			buf->size = PAGE_SIZE;
 			num_buf++;
 			buf++;
-
 		}
 	}
 
-	mr->ibmr.pd = &pd->ibpd;
 	mr->umem = umem;
 	mr->access = access;
 	mr->length = length;
@@ -186,18 +176,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 	mr->type = IB_MR_TYPE_USER;
 
 	return 0;
-
-err_cleanup_map:
-	for (i = 0; i < mr->num_map; i++)
-		kfree(mr->map[i]);
-	kfree(mr->map);
-err_release_umem:
-	ib_umem_release(umem);
-err_out:
-	return err;
 }
 
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
 {
 	int err;
 
@@ -206,17 +187,13 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
 
 	err = rxe_mr_alloc(mr, max_pages);
 	if (err)
-		goto err1;
+		return err;
 
-	mr->ibmr.pd = &pd->ibpd;
 	mr->max_buf = max_pages;
 	mr->state = RXE_MR_STATE_FREE;
 	mr->type = IB_MR_TYPE_MEM_REG;
 
 	return 0;
-
-err1:
-	return err;
 }
 
 static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
@@ -630,7 +607,9 @@ void rxe_mr_cleanup(struct rxe_pool_elem *elem)
 	int i;
 
 	rxe_put(mr_pd(mr));
-	ib_umem_release(mr->umem);
+
+	if (mr->umem)
+		ib_umem_release(mr->umem);
 
 	if (mr->map) {
 		for (i = 0; i < mr->num_map; i++)
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index e264cf69bf55..4ab32df13bd6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -903,45 +903,39 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
 		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
-	rxe_mr_init_dma(pd, access, mr);
+	mr->ibmr.pd = ibpd;
+
+	rxe_mr_init_dma(access, mr);
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
 }
 
-static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
-				     u64 start,
-				     u64 length,
-				     u64 iova,
-				     int access, struct ib_udata *udata)
+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
+				     u64 length, u64 iova, int access,
+				     struct ib_udata *udata)
 {
-	int err;
 	struct rxe_dev *rxe = to_rdev(ibpd->device);
 	struct rxe_pd *pd = to_rpd(ibpd);
 	struct rxe_mr *mr;
+	int err;
 
 	mr = rxe_alloc(&rxe->mr_pool);
-	if (!mr) {
-		err = -ENOMEM;
-		goto err2;
-	}
-
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
+	mr->ibmr.pd = ibpd;
 
-	err = rxe_mr_init_user(pd, start, length, iova, access, mr);
-	if (err)
-		goto err3;
+	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+	if (err) {
+		rxe_cleanup(mr);
+		return ERR_PTR(err);
+	}
 
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
-
-err3:
-	rxe_put(pd);
-	rxe_cleanup(mr);
-err2:
-	return ERR_PTR(err);
 }
 
 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
@@ -956,26 +950,21 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
 		return ERR_PTR(-EINVAL);
 
 	mr = rxe_alloc(&rxe->mr_pool);
-	if (!mr) {
-		err = -ENOMEM;
-		goto err1;
-	}
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
+	mr->ibmr.pd = ibpd;
 
-	err = rxe_mr_init_fast(pd, max_num_sg, mr);
-	if (err)
-		goto err2;
+	err = rxe_mr_init_fast(max_num_sg, mr);
+	if (err) {
+		rxe_cleanup(mr);
+		return ERR_PTR(err);
+	}
 
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
-
-err2:
-	rxe_put(pd);
-	rxe_cleanup(mr);
-err1:
-	return ERR_PTR(err);
 }
 
 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)

base-commit: 6b822d408b58c3c4f26dae93245c6b7d8b39e0f9
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-08-04  4:37 [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
@ 2022-08-04  4:42 ` Bob Pearson
  2022-08-05  7:08 ` lizhijian
  1 sibling, 0 replies; 5+ messages in thread
From: Bob Pearson @ 2022-08-04  4:42 UTC (permalink / raw)
  To: jgg, zyjzyj2000, lizhijian, jhack, linux-rdma

On 8/3/22 23:37, Bob Pearson wrote:

Li,

It wasn't umem it was pd not getting set in the rdma-core routines when the forced error occurred.
Then when I called rxe_put(pd) it seg faulted. Fixed it by putting back the code that set pd early
before an error can occur.

Bob

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-08-04  4:37 [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
  2022-08-04  4:42 ` Bob Pearson
@ 2022-08-05  7:08 ` lizhijian
  2022-08-05 17:48   ` Bob Pearson
  1 sibling, 1 reply; 5+ messages in thread
From: lizhijian @ 2022-08-05  7:08 UTC (permalink / raw)
  To: Bob Pearson, jgg, zyjzyj2000, jhack, linux-rdma

Bob

It does fix the panic.

TBH i'm not a big fan with you patches style, you didn't make the smallest patch.
You made a lots of extra cleanups and coding style fixes which is good to me in logical
but it make patches massive. that would take people more attention to have a review.

I'd like a smallest fix + extra cleanup if needed, the decision is up to the maintainers :)



On 04/08/2022 12:37, Bob Pearson wrote:
> Currently the rxe driver has incorrect code in error paths for
> allocating MR objects. The PD and umem are always freed in
> rxe_mr_cleanup() but in some error paths they are already
> freed or never set. This patch makes sure that the PD is always
> set and checks to see if umem is set before freeing it in
> rxe_mr_cleanup(). umem and maps are freed if an error occurs
> in an allocate mr call.
>
> Reported-by: Li Zhijian <lizhijian@fujitsu.com>
> Link: https://lore.kernel.org/linux-rdma/11dafa5f-c52d-16c1-fe37-2cd45ab20474@fujitsu.com/
> Fixes: 3902b429ca14 ("Implement invalidate MW operations")
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
> v4:
>    added set mr->ibmr.pd back to avoid an -ENOMEM error causing
>    rxe_put(mr_pd(mr)); to seg fault in rxe_mr_cleanup() since pd
>    is not getting set in the error path.
> v3:
>    rebased to apply to current for-next after
>    	Revert "RDMA/rxe: Create duplicate mapping tables for FMRs"
> v2:
>    Moved setting mr->umem until after checks to avoid sending
>    an ERR_PTR to ib_umem_release().
>    Cleaned up umem and map sets if errors occur in alloc mr calls.
>    Rebased to current for-next.
>
>   drivers/infiniband/sw/rxe/rxe_loc.h   |  6 +-
>   drivers/infiniband/sw/rxe/rxe_mr.c    | 93 +++++++++++----------------
>   drivers/infiniband/sw/rxe/rxe_verbs.c | 57 +++++++---------
>   3 files changed, 62 insertions(+), 94 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
> index 22f6cc31d1d6..c2a5c8814a48 100644
> --- a/drivers/infiniband/sw/rxe/rxe_loc.h
> +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
> @@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
>   
>   /* rxe_mr.c */
>   u8 rxe_get_next_key(u32 last_key);
> -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
> -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> +void rxe_mr_init_dma(int access, struct rxe_mr *mr);
> +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   		     int access, struct rxe_mr *mr);
> -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
> +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
>   int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
>   		enum rxe_mr_copy_dir dir);
>   int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 850b80f5ad8b..3814f8d3c2a9 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -67,20 +67,24 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
>   
>   static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)

For this subroutine, nothing updates but changed another style


>   {
> -	int i;
> -	int num_map;
>   	struct rxe_map **map = mr->map;
> +	int num_map;
> +	int i;
>   
>   	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
>   
>   	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
>   	if (!mr->map)
> -		goto err1;
> +		return -ENOMEM;
>   
>   	for (i = 0; i < num_map; i++) {
>   		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
> -		if (!mr->map[i])
> -			goto err2;
> +		if (!mr->map[i]) {
> +			for (i--; i >= 0; i--)
> +				kfree(mr->map[i]);
> +			kfree(mr->map);
> +			return -ENOMEM;
> +		}
>   	}
>   
>   	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
> @@ -93,55 +97,40 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
>   	mr->max_buf = num_map * RXE_BUF_PER_MAP;
>   
>   	return 0;
> -
> -err2:
> -	for (i--; i >= 0; i--)
> -		kfree(mr->map[i]);
> -
> -	kfree(mr->map);
> -err1:
> -	return -ENOMEM;
>   }
>   
> -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> +void rxe_mr_init_dma(int access, struct rxe_mr *mr)
>   {
>   	rxe_mr_init(access, mr);
>   
> -	mr->ibmr.pd = &pd->ibpd;
>   	mr->access = access;
>   	mr->state = RXE_MR_STATE_VALID;
>   	mr->type = IB_MR_TYPE_DMA;
>   }
>   
> -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   		     int access, struct rxe_mr *mr)
>   {
> -	struct rxe_map		**map;
> -	struct rxe_phys_buf	*buf = NULL;
> -	struct ib_umem		*umem;
> -	struct sg_page_iter	sg_iter;
> -	int			num_buf;
> -	void			*vaddr;
> +	struct rxe_phys_buf *buf = NULL;
> +	struct sg_page_iter sg_iter;
> +	struct rxe_map **map;
> +	struct ib_umem *umem;
> +	int num_buf;
> +	void *vaddr;

your indent style is good to me, but it make me nervous to check if there is any logical change.



>   	int err;
> -	int i;
>   
> -	umem = ib_umem_get(pd->ibpd.device, start, length, access);
> -	if (IS_ERR(umem)) {
> -		pr_warn("%s: Unable to pin memory region err = %d\n",
> -			__func__, (int)PTR_ERR(umem));
> -		err = PTR_ERR(umem);
> -		goto err_out;
I'm favor of its original 'goto' style, and the warning.

> -	}
> +	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
> +	if (IS_ERR(umem))
> +		return PTR_ERR(umem);
>   
>   	num_buf = ib_umem_num_pages(umem);
>   
>   	rxe_mr_init(access, mr);
>   
> -	err = rxe_mr_alloc(mr, num_buf);
> +	err = -ENOMEM; //err = rxe_mr_alloc(mr, num_buf);

this is a dirty change.



>   	if (err) {
> -		pr_warn("%s: Unable to allocate memory for map\n",
> -				__func__);
> -		goto err_release_umem;
> +		ib_umem_release(umem);
ditto, goto is not too bad, especially we have another place do the same cleanup.

same opinions to other changes

Thanks
Zhijian

> +		return err;
>   	}
>   
>   	mr->page_shift = PAGE_SHIFT;
> @@ -152,7 +141,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   	if (length > 0) {
>   		buf = map[0]->buf;
>   
> -		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
> +		for_each_sgtable_page(&umem->sgt_append.sgt, &sg_iter, 0) {
>   			if (num_buf >= RXE_BUF_PER_MAP) {
>   				map++;
>   				buf = map[0]->buf;
> @@ -161,21 +150,22 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   
>   			vaddr = page_address(sg_page_iter_page(&sg_iter));
>   			if (!vaddr) {
> -				pr_warn("%s: Unable to get virtual address\n",
> -						__func__);
> -				err = -ENOMEM;
> -				goto err_cleanup_map;
> +				int i;
> +
> +				for (i = 0; i < mr->num_map; i++)
> +					kfree(mr->map[i]);
> +				kfree(mr->map);
> +				ib_umem_release(umem);
> +				return -ENOMEM;
>   			}
>   
>   			buf->addr = (uintptr_t)vaddr;
>   			buf->size = PAGE_SIZE;
>   			num_buf++;
>   			buf++;
> -
>   		}
>   	}
>   
> -	mr->ibmr.pd = &pd->ibpd;
>   	mr->umem = umem;
>   	mr->access = access;
>   	mr->length = length;
> @@ -186,18 +176,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   	mr->type = IB_MR_TYPE_USER;
>   
>   	return 0;
> -
> -err_cleanup_map:
> -	for (i = 0; i < mr->num_map; i++)
> -		kfree(mr->map[i]);
> -	kfree(mr->map);
> -err_release_umem:
> -	ib_umem_release(umem);
> -err_out:
> -	return err;
>   }
>   
> -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
> +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
>   {
>   	int err;
>   
> @@ -206,17 +187,13 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
>   
>   	err = rxe_mr_alloc(mr, max_pages);
>   	if (err)
> -		goto err1;
> +		return err;
>   
> -	mr->ibmr.pd = &pd->ibpd;
>   	mr->max_buf = max_pages;
>   	mr->state = RXE_MR_STATE_FREE;
>   	mr->type = IB_MR_TYPE_MEM_REG;
>   
>   	return 0;
> -
> -err1:
> -	return err;
>   }
>   
>   static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
> @@ -630,7 +607,9 @@ void rxe_mr_cleanup(struct rxe_pool_elem *elem)
>   	int i;
>   
>   	rxe_put(mr_pd(mr));
> -	ib_umem_release(mr->umem);
> +
> +	if (mr->umem)
> +		ib_umem_release(mr->umem);
>   
>   	if (mr->map) {
>   		for (i = 0; i < mr->num_map; i++)
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index e264cf69bf55..4ab32df13bd6 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -903,45 +903,39 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
>   		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> -	rxe_mr_init_dma(pd, access, mr);
> +	mr->ibmr.pd = ibpd;
> +
> +	rxe_mr_init_dma(access, mr);
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
>   }
>   
> -static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
> -				     u64 start,
> -				     u64 length,
> -				     u64 iova,
> -				     int access, struct ib_udata *udata)
> +static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
> +				     u64 length, u64 iova, int access,
> +				     struct ib_udata *udata)
>   {
> -	int err;
>   	struct rxe_dev *rxe = to_rdev(ibpd->device);
>   	struct rxe_pd *pd = to_rpd(ibpd);
>   	struct rxe_mr *mr;
> +	int err;
>   
>   	mr = rxe_alloc(&rxe->mr_pool);
> -	if (!mr) {
> -		err = -ENOMEM;
> -		goto err2;
> -	}
> -
> +	if (!mr)
> +		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> +	mr->ibmr.pd = ibpd;
>   
> -	err = rxe_mr_init_user(pd, start, length, iova, access, mr);
> -	if (err)
> -		goto err3;
> +	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
> +	if (err) {
> +		rxe_cleanup(mr);
> +		return ERR_PTR(err);
> +	}
>   
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
> -
> -err3:
> -	rxe_put(pd);
> -	rxe_cleanup(mr);
> -err2:
> -	return ERR_PTR(err);
>   }
>   
>   static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
> @@ -956,26 +950,21 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
>   		return ERR_PTR(-EINVAL);
>   
>   	mr = rxe_alloc(&rxe->mr_pool);
> -	if (!mr) {
> -		err = -ENOMEM;
> -		goto err1;
> -	}
> +	if (!mr)
> +		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> +	mr->ibmr.pd = ibpd;
>   
> -	err = rxe_mr_init_fast(pd, max_num_sg, mr);
> -	if (err)
> -		goto err2;
> +	err = rxe_mr_init_fast(max_num_sg, mr);
> +	if (err) {
> +		rxe_cleanup(mr);
> +		return ERR_PTR(err);
> +	}
>   
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
> -
> -err2:
> -	rxe_put(pd);
> -	rxe_cleanup(mr);
> -err1:
> -	return ERR_PTR(err);
>   }
>   
>   static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
>
> base-commit: 6b822d408b58c3c4f26dae93245c6b7d8b39e0f9

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-08-05  7:08 ` lizhijian
@ 2022-08-05 17:48   ` Bob Pearson
  2022-08-05 18:21     ` Jason Gunthorpe
  0 siblings, 1 reply; 5+ messages in thread
From: Bob Pearson @ 2022-08-05 17:48 UTC (permalink / raw)
  To: lizhijian, jgg, zyjzyj2000, jhack, linux-rdma

On 8/5/22 02:08, lizhijian@fujitsu.com wrote:
> Bob
> 
> It does fix the panic.
> 
> TBH i'm not a big fan with you patches style, you didn't make the smallest patch.
> You made a lots of extra cleanups and coding style fixes which is good to me in logical
> but it make patches massive. that would take people more attention to have a review.
> 
> I'd like a smallest fix + extra cleanup if needed, the decision is up to the maintainers :)
> 
> 
> 
Li,

OK. I'll split it up. I'm ambivalent on goto's. Some days I like them and some I don't. No logical reason.
In this particular case I was concerned with making absolutely sure that the cleanups balanced out.
So I touched all that code in passing. Now that it works. I can reduce the change.

Bob

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-08-05 17:48   ` Bob Pearson
@ 2022-08-05 18:21     ` Jason Gunthorpe
  0 siblings, 0 replies; 5+ messages in thread
From: Jason Gunthorpe @ 2022-08-05 18:21 UTC (permalink / raw)
  To: Bob Pearson; +Cc: lizhijian, zyjzyj2000, jhack, linux-rdma

On Fri, Aug 05, 2022 at 12:48:32PM -0500, Bob Pearson wrote:
> On 8/5/22 02:08, lizhijian@fujitsu.com wrote:
> > Bob
> > 
> > It does fix the panic.
> > 
> > TBH i'm not a big fan with you patches style, you didn't make the smallest patch.
> > You made a lots of extra cleanups and coding style fixes which is good to me in logical
> > but it make patches massive. that would take people more attention to have a review.
> > 
> > I'd like a smallest fix + extra cleanup if needed, the decision is up to the maintainers :)
> > 
> > 
> > 
> Li,
> 
> OK. I'll split it up. I'm ambivalent on goto's. Some days I like them and some I don't. No logical reason.
> In this particular case I was concerned with making absolutely sure that the cleanups balanced out.
> So I touched all that code in passing. Now that it works. I can reduce the change.

kernel style is goto unwind on error, you should use it freely and
never duplicate error unwind in branches..

Jason

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-08-05 18:21 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-04  4:37 [PATCH v4 for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
2022-08-04  4:42 ` Bob Pearson
2022-08-05  7:08 ` lizhijian
2022-08-05 17:48   ` Bob Pearson
2022-08-05 18:21     ` Jason Gunthorpe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).