All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines
@ 2022-07-15 18:24 Bob Pearson
  2022-07-18  2:03 ` lizhijian
  2022-07-21 19:15 ` Jason Gunthorpe
  0 siblings, 2 replies; 4+ messages in thread
From: Bob Pearson @ 2022-07-15 18:24 UTC (permalink / raw)
  To: lizhijian, jgg, zyjzyj2000, linux-rdma; +Cc: Bob Pearson

Currently the rxe driver has incorrect code in error paths for
allocating MR objects. The PD and umem are always freed in
rxe_mr_cleanup() but in some error paths they are already
freed or never set. This patch makes sure that the PD is always
set and checks to see if umem is set before freeing it in
rxe_mr_cleanup().

Reported-by: Li Zhijian <lizhijian@fujitsu.com>
Link: https://lore.kernel.org/linux-rdma/11dafa5f-c52d-16c1-fe37-2cd45ab20474@fujitsu.com/
Fixes: 3902b429ca14 ("Implement invalidate MW operations")
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h   |  6 ++--
 drivers/infiniband/sw/rxe/rxe_mr.c    | 36 +++++++-------------
 drivers/infiniband/sw/rxe/rxe_verbs.c | 47 +++++++++++----------------
 3 files changed, 34 insertions(+), 55 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 0e022ae1b8a5..8969918275f9 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
 
 /* rxe_mr.c */
 u8 rxe_get_next_key(u32 last_key);
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr);
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
 int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
 		enum rxe_mr_copy_dir dir);
 int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 9a5c2af6a56f..674af4c36c49 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -151,17 +151,16 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
 	return -ENOMEM;
 }
 
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
+void rxe_mr_init_dma(int access, struct rxe_mr *mr)
 {
 	rxe_mr_init(access, mr);
 
-	mr->ibmr.pd = &pd->ibpd;
 	mr->access = access;
 	mr->state = RXE_MR_STATE_VALID;
 	mr->type = IB_MR_TYPE_DMA;
 }
 
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
 	struct rxe_map_set	*set;
@@ -173,12 +172,11 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 	void			*vaddr;
 	int err;
 
-	umem = ib_umem_get(pd->ibpd.device, start, length, access);
+	mr->umem = umem = ib_umem_get(&rxe->ib_dev, start, length, access);
 	if (IS_ERR(umem)) {
 		pr_warn("%s: Unable to pin memory region err = %d\n",
 			__func__, (int)PTR_ERR(umem));
-		err = PTR_ERR(umem);
-		goto err_out;
+		return PTR_ERR(umem);
 	}
 
 	num_buf = ib_umem_num_pages(umem);
@@ -189,7 +187,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 	if (err) {
 		pr_warn("%s: Unable to allocate memory for map\n",
 				__func__);
-		goto err_release_umem;
+		return err;
 	}
 
 	set = mr->cur_map_set;
@@ -213,8 +211,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 			if (!vaddr) {
 				pr_warn("%s: Unable to get virtual address\n",
 						__func__);
-				err = -ENOMEM;
-				goto err_release_umem;
+				return -ENOMEM;
 			}
 
 			buf->addr = (uintptr_t)vaddr;
@@ -224,8 +221,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 		}
 	}
 
-	mr->ibmr.pd = &pd->ibpd;
-	mr->umem = umem;
 	mr->access = access;
 	mr->state = RXE_MR_STATE_VALID;
 	mr->type = IB_MR_TYPE_USER;
@@ -236,14 +231,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 	set->offset = ib_umem_offset(umem);
 
 	return 0;
-
-err_release_umem:
-	ib_umem_release(umem);
-err_out:
-	return err;
 }
 
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
 {
 	int err;
 
@@ -252,17 +242,13 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
 
 	err = rxe_mr_alloc(mr, max_pages, 1);
 	if (err)
-		goto err1;
+		return err;
 
-	mr->ibmr.pd = &pd->ibpd;
 	mr->max_buf = max_pages;
 	mr->state = RXE_MR_STATE_FREE;
 	mr->type = IB_MR_TYPE_MEM_REG;
 
 	return 0;
-
-err1:
-	return err;
 }
 
 static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
@@ -695,10 +681,12 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 void rxe_mr_cleanup(struct rxe_pool_elem *elem)
 {
 	struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+	struct rxe_pd *pd = mr_pd(mr);
 
-	rxe_put(mr_pd(mr));
+	rxe_put(pd);
 
-	ib_umem_release(mr->umem);
+	if (mr->umem)
+		ib_umem_release(mr->umem);
 
 	if (mr->cur_map_set)
 		rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 151c6280abd5..173172a83c74 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -903,7 +903,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
 		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
-	rxe_mr_init_dma(pd, access, mr);
+	mr->ibmr.pd = ibpd;
+
+	rxe_mr_init_dma(access, mr);
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
@@ -921,27 +923,21 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
 	struct rxe_mr *mr;
 
 	mr = rxe_alloc(&rxe->mr_pool);
-	if (!mr) {
-		err = -ENOMEM;
-		goto err2;
-	}
-
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
+	mr->ibmr.pd = ibpd;
 
-	err = rxe_mr_init_user(pd, start, length, iova, access, mr);
-	if (err)
-		goto err3;
+	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+	if (err) {
+		rxe_cleanup(mr);
+		return ERR_PTR(err);
+	}
 
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
-
-err3:
-	rxe_put(pd);
-	rxe_cleanup(mr);
-err2:
-	return ERR_PTR(err);
 }
 
 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
@@ -956,26 +952,21 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
 		return ERR_PTR(-EINVAL);
 
 	mr = rxe_alloc(&rxe->mr_pool);
-	if (!mr) {
-		err = -ENOMEM;
-		goto err1;
-	}
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
 
 	rxe_get(pd);
+	mr->ibmr.pd = ibpd;
 
-	err = rxe_mr_init_fast(pd, max_num_sg, mr);
-	if (err)
-		goto err2;
+	err = rxe_mr_init_fast(max_num_sg, mr);
+	if (err) {
+		rxe_cleanup(mr);
+		return ERR_PTR(err);
+	}
 
 	rxe_finalize(mr);
 
 	return &mr->ibmr;
-
-err2:
-	rxe_put(pd);
-	rxe_cleanup(mr);
-err1:
-	return ERR_PTR(err);
 }
 
 /* build next_map_set from scatterlist

base-commit: 2635d2a8d4664b665bc12e15eee88e9b1b40ae7b
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-07-15 18:24 [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
@ 2022-07-18  2:03 ` lizhijian
  2022-07-21 19:15 ` Jason Gunthorpe
  1 sibling, 0 replies; 4+ messages in thread
From: lizhijian @ 2022-07-18  2:03 UTC (permalink / raw)
  To: Bob Pearson, jgg, zyjzyj2000, linux-rdma



On 16/07/2022 02:24, Bob Pearson wrote:
> Currently the rxe driver has incorrect code in error paths for
> allocating MR objects. The PD and umem are always freed in
> rxe_mr_cleanup() but in some error paths they are already
> freed or never set. This patch makes sure that the PD is always
> set and checks to see if umem is set before freeing it in
> rxe_mr_cleanup().
>
> Reported-by: Li Zhijian <lizhijian@fujitsu.com>
> Link: https://lore.kernel.org/linux-rdma/11dafa5f-c52d-16c1-fe37-2cd45ab20474@fujitsu.com/
> Fixes: 3902b429ca14 ("Implement invalidate MW operations")
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>   drivers/infiniband/sw/rxe/rxe_loc.h   |  6 ++--
>   drivers/infiniband/sw/rxe/rxe_mr.c    | 36 +++++++-------------
>   drivers/infiniband/sw/rxe/rxe_verbs.c | 47 +++++++++++----------------
>   3 files changed, 34 insertions(+), 55 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
> index 0e022ae1b8a5..8969918275f9 100644
> --- a/drivers/infiniband/sw/rxe/rxe_loc.h
> +++ b/drivers/infiniband/sw/rxe/rxe_loc.h
> @@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
>   
>   /* rxe_mr.c */
>   u8 rxe_get_next_key(u32 last_key);
> -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
> -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> +void rxe_mr_init_dma(int access, struct rxe_mr *mr);
> +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   		     int access, struct rxe_mr *mr);
> -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
> +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
>   int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
>   		enum rxe_mr_copy_dir dir);
>   int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 9a5c2af6a56f..674af4c36c49 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -151,17 +151,16 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
>   	return -ENOMEM;
>   }
>   
> -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> +void rxe_mr_init_dma(int access, struct rxe_mr *mr)
>   {
>   	rxe_mr_init(access, mr);
>   
> -	mr->ibmr.pd = &pd->ibpd;
>   	mr->access = access;
>   	mr->state = RXE_MR_STATE_VALID;
>   	mr->type = IB_MR_TYPE_DMA;
>   }
>   
> -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
>   		     int access, struct rxe_mr *mr)
>   {
>   	struct rxe_map_set	*set;
> @@ -173,12 +172,11 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   	void			*vaddr;
>   	int err;
>   
> -	umem = ib_umem_get(pd->ibpd.device, start, length, access);
> +	mr->umem = umem = ib_umem_get(&rxe->ib_dev, start, length, access);
>   	if (IS_ERR(umem)) {

In this case, mr->umem is not NULL as well, which will confuse the releasing patch below

+	if (mr->umem)
+		ib_umem_release(mr->umem);
  

IMHO, by convention, we should follow the rule: destroy/free the object allocated in the same routine when the routine got something wrong.
@Jason, Yanjun, what's your opinion.

Thanks
Zhijian



>   		pr_warn("%s: Unable to pin memory region err = %d\n",
>   			__func__, (int)PTR_ERR(umem));
> -		err = PTR_ERR(umem);
> -		goto err_out;
> +		return PTR_ERR(umem);




>   	}
>   
>   	num_buf = ib_umem_num_pages(umem);
> @@ -189,7 +187,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   	if (err) {
>   		pr_warn("%s: Unable to allocate memory for map\n",
>   				__func__);
> -		goto err_release_umem;
> +		return err;
>   	}
>   
>   	set = mr->cur_map_set;
> @@ -213,8 +211,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   			if (!vaddr) {
>   				pr_warn("%s: Unable to get virtual address\n",
>   						__func__);
> -				err = -ENOMEM;
> -				goto err_release_umem;
> +				return -ENOMEM;
>   			}
>   
>   			buf->addr = (uintptr_t)vaddr;
> @@ -224,8 +221,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   		}
>   	}
>   
> -	mr->ibmr.pd = &pd->ibpd;
> -	mr->umem = umem;
>   	mr->access = access;
>   	mr->state = RXE_MR_STATE_VALID;
>   	mr->type = IB_MR_TYPE_USER;
> @@ -236,14 +231,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>   	set->offset = ib_umem_offset(umem);
>   
>   	return 0;
> -
> -err_release_umem:
> -	ib_umem_release(umem);
> -err_out:
> -	return err;
>   }
>   
> -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
> +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
>   {
>   	int err;
>   
> @@ -252,17 +242,13 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
>   
>   	err = rxe_mr_alloc(mr, max_pages, 1);
>   	if (err)
> -		goto err1;
> +		return err;
>   
> -	mr->ibmr.pd = &pd->ibpd;
>   	mr->max_buf = max_pages;
>   	mr->state = RXE_MR_STATE_FREE;
>   	mr->type = IB_MR_TYPE_MEM_REG;
>   
>   	return 0;
> -
> -err1:
> -	return err;
>   }
>   
>   static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
> @@ -695,10 +681,12 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
>   void rxe_mr_cleanup(struct rxe_pool_elem *elem)
>   {
>   	struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
> +	struct rxe_pd *pd = mr_pd(mr);
>   
> -	rxe_put(mr_pd(mr));
> +	rxe_put(pd);
>   
> -	ib_umem_release(mr->umem);
> +	if (mr->umem)
> +		ib_umem_release(mr->umem);
>   
>   	if (mr->cur_map_set)
>   		rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
> diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
> index 151c6280abd5..173172a83c74 100644
> --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
> +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
> @@ -903,7 +903,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
>   		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> -	rxe_mr_init_dma(pd, access, mr);
> +	mr->ibmr.pd = ibpd;
> +
> +	rxe_mr_init_dma(access, mr);
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
> @@ -921,27 +923,21 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
>   	struct rxe_mr *mr;
>   
>   	mr = rxe_alloc(&rxe->mr_pool);
> -	if (!mr) {
> -		err = -ENOMEM;
> -		goto err2;
> -	}
> -
> +	if (!mr)
> +		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> +	mr->ibmr.pd = ibpd;
>   
> -	err = rxe_mr_init_user(pd, start, length, iova, access, mr);
> -	if (err)
> -		goto err3;
> +	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
> +	if (err) {
> +		rxe_cleanup(mr);
> +		return ERR_PTR(err);
> +	}
>   
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
> -
> -err3:
> -	rxe_put(pd);
> -	rxe_cleanup(mr);
> -err2:
> -	return ERR_PTR(err);
>   }
>   
>   static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
> @@ -956,26 +952,21 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
>   		return ERR_PTR(-EINVAL);
>   
>   	mr = rxe_alloc(&rxe->mr_pool);
> -	if (!mr) {
> -		err = -ENOMEM;
> -		goto err1;
> -	}
> +	if (!mr)
> +		return ERR_PTR(-ENOMEM);
>   
>   	rxe_get(pd);
> +	mr->ibmr.pd = ibpd;
>   
> -	err = rxe_mr_init_fast(pd, max_num_sg, mr);
> -	if (err)
> -		goto err2;
> +	err = rxe_mr_init_fast(max_num_sg, mr);
> +	if (err) {
> +		rxe_cleanup(mr);
> +		return ERR_PTR(err);
> +	}
>   
>   	rxe_finalize(mr);
>   
>   	return &mr->ibmr;
> -
> -err2:
> -	rxe_put(pd);
> -	rxe_cleanup(mr);
> -err1:
> -	return ERR_PTR(err);
>   }
>   
>   /* build next_map_set from scatterlist
>
> base-commit: 2635d2a8d4664b665bc12e15eee88e9b1b40ae7b

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-07-15 18:24 [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
  2022-07-18  2:03 ` lizhijian
@ 2022-07-21 19:15 ` Jason Gunthorpe
  2022-07-21 20:28   ` Bob Pearson
  1 sibling, 1 reply; 4+ messages in thread
From: Jason Gunthorpe @ 2022-07-21 19:15 UTC (permalink / raw)
  To: Bob Pearson; +Cc: lizhijian, zyjzyj2000, linux-rdma

On Fri, Jul 15, 2022 at 01:24:15PM -0500, Bob Pearson wrote:
> @@ -173,12 +172,11 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  	void			*vaddr;
>  	int err;
>  
> -	umem = ib_umem_get(pd->ibpd.device, start, length, access);
> +	mr->umem = umem = ib_umem_get(&rxe->ib_dev, start, length, access);
>  	if (IS_ERR(umem)) {

So this puts an err_ptr into mr->umem and then later the cleanup:

	if (mr->umem)
		ib_umem_release(mr->umem);

Will not like it very much.

I'm OK with the idea of initializing structures that are cleaned up by
their single master cleanup, but you have to do it in a way that
doesn't put corrupted data into the object..

Jason

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines
  2022-07-21 19:15 ` Jason Gunthorpe
@ 2022-07-21 20:28   ` Bob Pearson
  0 siblings, 0 replies; 4+ messages in thread
From: Bob Pearson @ 2022-07-21 20:28 UTC (permalink / raw)
  To: Jason Gunthorpe; +Cc: lizhijian, zyjzyj2000, linux-rdma

On 7/21/22 14:15, Jason Gunthorpe wrote:
> On Fri, Jul 15, 2022 at 01:24:15PM -0500, Bob Pearson wrote:
>> @@ -173,12 +172,11 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>  	void			*vaddr;
>>  	int err;
>>  
>> -	umem = ib_umem_get(pd->ibpd.device, start, length, access);
>> +	mr->umem = umem = ib_umem_get(&rxe->ib_dev, start, length, access);
>>  	if (IS_ERR(umem)) {
> 
> So this puts an err_ptr into mr->umem and then later the cleanup:
> 
> 	if (mr->umem)
> 		ib_umem_release(mr->umem);
> 
> Will not like it very much.
> 
> I'm OK with the idea of initializing structures that are cleaned up by
> their single master cleanup, but you have to do it in a way that
> doesn't put corrupted data into the object..
> 
> Jason

I fixed this patch but it collides with the map set revert Li
sent in.

Bob

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-07-21 20:28 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-15 18:24 [PATCH for-next] RDMA/rxe: Fix error paths in MR alloc routines Bob Pearson
2022-07-18  2:03 ` lizhijian
2022-07-21 19:15 ` Jason Gunthorpe
2022-07-21 20:28   ` Bob Pearson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.