All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
@ 2021-07-04 22:35 Bob Pearson
  2021-07-05  2:09 ` yangx.jy
  2021-07-05  3:42 ` Zhu Yanjun
  0 siblings, 2 replies; 9+ messages in thread
From: Bob Pearson @ 2021-07-04 22:35 UTC (permalink / raw)
  To: jgg, zyjzyj2000, linux-rdma, haakon.brugge; +Cc: Bob Pearson

In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
free the memory at mr->map. This patch adds code to do that.
This error only occurs if page_address() fails to return a non zero address
which should never happen for 64 bit architectures.

Fixes: 8700e3e7c485 ("Soft RoCE driver")
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..f49baff9ca3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
 int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
-	struct ib_umem		*umem;
-	struct sg_page_iter	sg_iter;
-	int			num_buf;
-	void			*vaddr;
+	struct rxe_map **map;
+	struct rxe_phys_buf *buf = NULL;
+	struct ib_umem *umem;
+	struct sg_page_iter sg_iter;
+	int num_buf;
+	void *vaddr;
 	int err;
+	int i;
 
 	umem = ib_umem_get(pd->ibpd.device, start, length, access);
 	if (IS_ERR(umem)) {
-		pr_warn("err %d from rxe_umem_get\n",
-			(int)PTR_ERR(umem));
+		pr_warn("%s: Unable to pin memory region err = %d\n",
+			__func__, (int)PTR_ERR(umem));
 		err = PTR_ERR(umem);
-		goto err1;
+		goto err_out;
 	}
 
 	mr->umem = umem;
@@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	err = rxe_mr_alloc(mr, num_buf);
 	if (err) {
-		pr_warn("err %d from rxe_mr_alloc\n", err);
-		ib_umem_release(umem);
-		goto err1;
+		pr_warn("%s: Unable to allocate memory for map\n",
+				__func__);
+		goto err_release_umem;
 	}
 
 	mr->page_shift = PAGE_SHIFT;
 	mr->page_mask = PAGE_SIZE - 1;
 
-	num_buf			= 0;
+	num_buf = 0;
 	map = mr->map;
 	if (length > 0) {
 		buf = map[0]->buf;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 			vaddr = page_address(sg_page_iter_page(&sg_iter));
 			if (!vaddr) {
-				pr_warn("null vaddr\n");
-				ib_umem_release(umem);
+				pr_warn("%s: Unable to get virtual address\n",
+						__func__);
 				err = -ENOMEM;
-				goto err1;
+				goto err_cleanup_map;
 			}
 
 			buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	return 0;
 
-err1:
+err_cleanup_map:
+	for (i = 0; i < mr->num_map; i++)
+		kfree(mr->map[i]);
+	kfree(mr->map);
+err_release_umem:
+	ib_umem_release(umem);
+err_out:
 	return err;
 }
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-04 22:35 [PATCH for-next] RDMA/rxe: Fix memory leak in error path code Bob Pearson
@ 2021-07-05  2:09 ` yangx.jy
  2021-07-05  3:42 ` Zhu Yanjun
  1 sibling, 0 replies; 9+ messages in thread
From: yangx.jy @ 2021-07-05  2:09 UTC (permalink / raw)
  To: Bob Pearson; +Cc: jgg, zyjzyj2000, linux-rdma, haakon.brugge

On 2021/7/5 6:35, Bob Pearson wrote:
> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> free the memory at mr->map. This patch adds code to do that.
> This error only occurs if page_address() fails to return a non zero address
> which should never happen for 64 bit architectures.
Hi Bob,

Thanks for your quick fix.

It looks good to me.
Reviewed-by: Xiao Yang <yangx.jy@fujitsu.com>

Best Regards,
Xiao Yang
> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 6aabcb4de235..f49baff9ca3d 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  		     int access, struct rxe_mr *mr)
>  {
> -	struct rxe_map		**map;
> -	struct rxe_phys_buf	*buf = NULL;
> -	struct ib_umem		*umem;
> -	struct sg_page_iter	sg_iter;
> -	int			num_buf;
> -	void			*vaddr;
> +	struct rxe_map **map;
> +	struct rxe_phys_buf *buf = NULL;
> +	struct ib_umem *umem;
> +	struct sg_page_iter sg_iter;
> +	int num_buf;
> +	void *vaddr;
>  	int err;
> +	int i;
>  
>  	umem = ib_umem_get(pd->ibpd.device, start, length, access);
>  	if (IS_ERR(umem)) {
> -		pr_warn("err %d from rxe_umem_get\n",
> -			(int)PTR_ERR(umem));
> +		pr_warn("%s: Unable to pin memory region err = %d\n",
> +			__func__, (int)PTR_ERR(umem));
>  		err = PTR_ERR(umem);
> -		goto err1;
> +		goto err_out;
>  	}
>  
>  	mr->umem = umem;
> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  	err = rxe_mr_alloc(mr, num_buf);
>  	if (err) {
> -		pr_warn("err %d from rxe_mr_alloc\n", err);
> -		ib_umem_release(umem);
> -		goto err1;
> +		pr_warn("%s: Unable to allocate memory for map\n",
> +				__func__);
> +		goto err_release_umem;
>  	}
>  
>  	mr->page_shift = PAGE_SHIFT;
>  	mr->page_mask = PAGE_SIZE - 1;
>  
> -	num_buf			= 0;
> +	num_buf = 0;
>  	map = mr->map;
>  	if (length > 0) {
>  		buf = map[0]->buf;
> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  			vaddr = page_address(sg_page_iter_page(&sg_iter));
>  			if (!vaddr) {
> -				pr_warn("null vaddr\n");
> -				ib_umem_release(umem);
> +				pr_warn("%s: Unable to get virtual address\n",
> +						__func__);
>  				err = -ENOMEM;
> -				goto err1;
> +				goto err_cleanup_map;
>  			}
>  
>  			buf->addr = (uintptr_t)vaddr;
> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>  
>  	return 0;
>  
> -err1:
> +err_cleanup_map:
> +	for (i = 0; i < mr->num_map; i++)
> +		kfree(mr->map[i]);
> +	kfree(mr->map);
> +err_release_umem:
> +	ib_umem_release(umem);
> +err_out:
>  	return err;
>  }
>  

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-04 22:35 [PATCH for-next] RDMA/rxe: Fix memory leak in error path code Bob Pearson
  2021-07-05  2:09 ` yangx.jy
@ 2021-07-05  3:42 ` Zhu Yanjun
  2021-07-05  8:16   ` Haakon Bugge
  1 sibling, 1 reply; 9+ messages in thread
From: Zhu Yanjun @ 2021-07-05  3:42 UTC (permalink / raw)
  To: Bob Pearson; +Cc: Jason Gunthorpe, RDMA mailing list, haakon.brugge

On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>
> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> free the memory at mr->map. This patch adds code to do that.
> This error only occurs if page_address() fails to return a non zero address
> which should never happen for 64 bit architectures.

If this will never happen for 64 bit architectures, is it possible to
exclude 64 bit architecture with some MACROs or others?

Thanks,

Zhu Yanjun

>
> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> ---
>  drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> index 6aabcb4de235..f49baff9ca3d 100644
> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>  int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>                      int access, struct rxe_mr *mr)
>  {
> -       struct rxe_map          **map;
> -       struct rxe_phys_buf     *buf = NULL;
> -       struct ib_umem          *umem;
> -       struct sg_page_iter     sg_iter;
> -       int                     num_buf;
> -       void                    *vaddr;
> +       struct rxe_map **map;
> +       struct rxe_phys_buf *buf = NULL;
> +       struct ib_umem *umem;
> +       struct sg_page_iter sg_iter;
> +       int num_buf;
> +       void *vaddr;
>         int err;
> +       int i;
>
>         umem = ib_umem_get(pd->ibpd.device, start, length, access);
>         if (IS_ERR(umem)) {
> -               pr_warn("err %d from rxe_umem_get\n",
> -                       (int)PTR_ERR(umem));
> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> +                       __func__, (int)PTR_ERR(umem));
>                 err = PTR_ERR(umem);
> -               goto err1;
> +               goto err_out;
>         }
>
>         mr->umem = umem;
> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>         err = rxe_mr_alloc(mr, num_buf);
>         if (err) {
> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> -               ib_umem_release(umem);
> -               goto err1;
> +               pr_warn("%s: Unable to allocate memory for map\n",
> +                               __func__);
> +               goto err_release_umem;
>         }
>
>         mr->page_shift = PAGE_SHIFT;
>         mr->page_mask = PAGE_SIZE - 1;
>
> -       num_buf                 = 0;
> +       num_buf = 0;
>         map = mr->map;
>         if (length > 0) {
>                 buf = map[0]->buf;
> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>                         vaddr = page_address(sg_page_iter_page(&sg_iter));
>                         if (!vaddr) {
> -                               pr_warn("null vaddr\n");
> -                               ib_umem_release(umem);
> +                               pr_warn("%s: Unable to get virtual address\n",
> +                                               __func__);
>                                 err = -ENOMEM;
> -                               goto err1;
> +                               goto err_cleanup_map;
>                         }
>
>                         buf->addr = (uintptr_t)vaddr;
> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>
>         return 0;
>
> -err1:
> +err_cleanup_map:
> +       for (i = 0; i < mr->num_map; i++)
> +               kfree(mr->map[i]);
> +       kfree(mr->map);
> +err_release_umem:
> +       ib_umem_release(umem);
> +err_out:
>         return err;
>  }
>
> --
> 2.30.2
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05  3:42 ` Zhu Yanjun
@ 2021-07-05  8:16   ` Haakon Bugge
  2021-07-05  8:35     ` Zhu Yanjun
  0 siblings, 1 reply; 9+ messages in thread
From: Haakon Bugge @ 2021-07-05  8:16 UTC (permalink / raw)
  To: Zhu Yanjun; +Cc: Bob Pearson, Jason Gunthorpe, OFED mailing list, haakon.brugge



> On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> 
> On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>> 
>> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
>> free the memory at mr->map. This patch adds code to do that.
>> This error only occurs if page_address() fails to return a non zero address
>> which should never happen for 64 bit architectures.
> 
> If this will never happen for 64 bit architectures, is it possible to
> exclude 64 bit architecture with some MACROs or others?
> 
> Thanks,
> 
> Zhu Yanjun
> 
>> 
>> Fixes: 8700e3e7c485 ("Soft RoCE driver")
>> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
>> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
>> ---
>> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>> 1 file changed, 24 insertions(+), 17 deletions(-)
>> 
>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
>> index 6aabcb4de235..f49baff9ca3d 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
>> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>                     int access, struct rxe_mr *mr)
>> {
>> -       struct rxe_map          **map;
>> -       struct rxe_phys_buf     *buf = NULL;
>> -       struct ib_umem          *umem;
>> -       struct sg_page_iter     sg_iter;
>> -       int                     num_buf;
>> -       void                    *vaddr;
>> +       struct rxe_map **map;
>> +       struct rxe_phys_buf *buf = NULL;
>> +       struct ib_umem *umem;
>> +       struct sg_page_iter sg_iter;
>> +       int num_buf;
>> +       void *vaddr;

This white-space stripping must be another issue, not related to the memleak?

>>        int err;
>> +       int i;
>> 
>>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
>>        if (IS_ERR(umem)) {
>> -               pr_warn("err %d from rxe_umem_get\n",
>> -                       (int)PTR_ERR(umem));
>> +               pr_warn("%s: Unable to pin memory region err = %d\n",
>> +                       __func__, (int)PTR_ERR(umem));
>>                err = PTR_ERR(umem);
>> -               goto err1;
>> +               goto err_out;
>>        }
>> 
>>        mr->umem = umem;
>> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>        err = rxe_mr_alloc(mr, num_buf);
>>        if (err) {
>> -               pr_warn("err %d from rxe_mr_alloc\n", err);
>> -               ib_umem_release(umem);
>> -               goto err1;
>> +               pr_warn("%s: Unable to allocate memory for map\n",
>> +                               __func__);
>> +               goto err_release_umem;
>>        }
>> 
>>        mr->page_shift = PAGE_SHIFT;
>>        mr->page_mask = PAGE_SIZE - 1;
>> 
>> -       num_buf                 = 0;
>> +       num_buf = 0;

White-space change.

Otherwise:

Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>


Thxs, Håkon



>>        map = mr->map;
>>        if (length > 0) {
>>                buf = map[0]->buf;
>> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
>>                        if (!vaddr) {
>> -                               pr_warn("null vaddr\n");
>> -                               ib_umem_release(umem);
>> +                               pr_warn("%s: Unable to get virtual address\n",
>> +                                               __func__);
>>                                err = -ENOMEM;
>> -                               goto err1;
>> +                               goto err_cleanup_map;
>>                        }
>> 
>>                        buf->addr = (uintptr_t)vaddr;
>> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>> 
>>        return 0;
>> 
>> -err1:
>> +err_cleanup_map:
>> +       for (i = 0; i < mr->num_map; i++)
>> +               kfree(mr->map[i]);
>> +       kfree(mr->map);
>> +err_release_umem:
>> +       ib_umem_release(umem);
>> +err_out:
>>        return err;
>> }
>> 
>> --
>> 2.30.2
>> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05  8:16   ` Haakon Bugge
@ 2021-07-05  8:35     ` Zhu Yanjun
  2021-07-05 15:40       ` Robert Pearson
  0 siblings, 1 reply; 9+ messages in thread
From: Zhu Yanjun @ 2021-07-05  8:35 UTC (permalink / raw)
  To: Haakon Bugge
  Cc: Bob Pearson, Jason Gunthorpe, OFED mailing list, haakon.brugge

On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
>
>
>
> > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> >
> > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> >>
> >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> >> free the memory at mr->map. This patch adds code to do that.
> >> This error only occurs if page_address() fails to return a non zero address
> >> which should never happen for 64 bit architectures.
> >
> > If this will never happen for 64 bit architectures, is it possible to
> > exclude 64 bit architecture with some MACROs or others?
> >
> > Thanks,
> >
> > Zhu Yanjun
> >
> >>
> >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> >> ---
> >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> >> 1 file changed, 24 insertions(+), 17 deletions(-)
> >>
> >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> >> index 6aabcb4de235..f49baff9ca3d 100644
> >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>                     int access, struct rxe_mr *mr)
> >> {
> >> -       struct rxe_map          **map;
> >> -       struct rxe_phys_buf     *buf = NULL;
> >> -       struct ib_umem          *umem;
> >> -       struct sg_page_iter     sg_iter;
> >> -       int                     num_buf;
> >> -       void                    *vaddr;
> >> +       struct rxe_map **map;
> >> +       struct rxe_phys_buf *buf = NULL;
> >> +       struct ib_umem *umem;
> >> +       struct sg_page_iter sg_iter;
> >> +       int num_buf;
> >> +       void *vaddr;
>
> This white-space stripping must be another issue, not related to the memleak?
>
> >>        int err;
> >> +       int i;
> >>
> >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> >>        if (IS_ERR(umem)) {
> >> -               pr_warn("err %d from rxe_umem_get\n",
> >> -                       (int)PTR_ERR(umem));
> >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> >> +                       __func__, (int)PTR_ERR(umem));
> >>                err = PTR_ERR(umem);
> >> -               goto err1;
> >> +               goto err_out;
> >>        }
> >>
> >>        mr->umem = umem;
> >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>        err = rxe_mr_alloc(mr, num_buf);
> >>        if (err) {
> >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> >> -               ib_umem_release(umem);
> >> -               goto err1;
> >> +               pr_warn("%s: Unable to allocate memory for map\n",
> >> +                               __func__);
> >> +               goto err_release_umem;
> >>        }
> >>
> >>        mr->page_shift = PAGE_SHIFT;
> >>        mr->page_mask = PAGE_SIZE - 1;
> >>
> >> -       num_buf                 = 0;
> >> +       num_buf = 0;
>
> White-space change.

Yeah. It seems that some white-space changes in this commit.

Zhu Yanjun

>
> Otherwise:
>
> Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
>
>
> Thxs, Håkon
>
>
>
> >>        map = mr->map;
> >>        if (length > 0) {
> >>                buf = map[0]->buf;
> >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> >>                        if (!vaddr) {
> >> -                               pr_warn("null vaddr\n");
> >> -                               ib_umem_release(umem);
> >> +                               pr_warn("%s: Unable to get virtual address\n",
> >> +                                               __func__);
> >>                                err = -ENOMEM;
> >> -                               goto err1;
> >> +                               goto err_cleanup_map;
> >>                        }
> >>
> >>                        buf->addr = (uintptr_t)vaddr;
> >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> >>
> >>        return 0;
> >>
> >> -err1:
> >> +err_cleanup_map:
> >> +       for (i = 0; i < mr->num_map; i++)
> >> +               kfree(mr->map[i]);
> >> +       kfree(mr->map);
> >> +err_release_umem:
> >> +       ib_umem_release(umem);
> >> +err_out:
> >>        return err;
> >> }
> >>
> >> --
> >> 2.30.2
> >>
>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05  8:35     ` Zhu Yanjun
@ 2021-07-05 15:40       ` Robert Pearson
  2021-07-05 15:41         ` Robert Pearson
                           ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Robert Pearson @ 2021-07-05 15:40 UTC (permalink / raw)
  To: Zhu Yanjun
  Cc: Haakon Bugge, Jason Gunthorpe, OFED mailing list, haakon.brugge

Jason has been asking for patches to pass clang-format-patch so I've
been cleaning up the code near functional changes since it doesn't
like extra spaces such as for vertical alignment.

If I could figure out how ib_umem_works there is a chance that it
would fail if it couldn't map all the user space virtual memory into
kernel virtual addresses. But so far I have failed. It's fairly
complex.

Bob

On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>
> On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
> >
> >
> >
> > > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> > >
> > > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> > >>
> > >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> > >> free the memory at mr->map. This patch adds code to do that.
> > >> This error only occurs if page_address() fails to return a non zero address
> > >> which should never happen for 64 bit architectures.
> > >
> > > If this will never happen for 64 bit architectures, is it possible to
> > > exclude 64 bit architecture with some MACROs or others?
> > >
> > > Thanks,
> > >
> > > Zhu Yanjun
> > >
> > >>
> > >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> > >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> > >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > >> ---
> > >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> > >> 1 file changed, 24 insertions(+), 17 deletions(-)
> > >>
> > >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> index 6aabcb4de235..f49baff9ca3d 100644
> > >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> > >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> > >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>                     int access, struct rxe_mr *mr)
> > >> {
> > >> -       struct rxe_map          **map;
> > >> -       struct rxe_phys_buf     *buf = NULL;
> > >> -       struct ib_umem          *umem;
> > >> -       struct sg_page_iter     sg_iter;
> > >> -       int                     num_buf;
> > >> -       void                    *vaddr;
> > >> +       struct rxe_map **map;
> > >> +       struct rxe_phys_buf *buf = NULL;
> > >> +       struct ib_umem *umem;
> > >> +       struct sg_page_iter sg_iter;
> > >> +       int num_buf;
> > >> +       void *vaddr;
> >
> > This white-space stripping must be another issue, not related to the memleak?
> >
> > >>        int err;
> > >> +       int i;
> > >>
> > >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> > >>        if (IS_ERR(umem)) {
> > >> -               pr_warn("err %d from rxe_umem_get\n",
> > >> -                       (int)PTR_ERR(umem));
> > >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> > >> +                       __func__, (int)PTR_ERR(umem));
> > >>                err = PTR_ERR(umem);
> > >> -               goto err1;
> > >> +               goto err_out;
> > >>        }
> > >>
> > >>        mr->umem = umem;
> > >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>        err = rxe_mr_alloc(mr, num_buf);
> > >>        if (err) {
> > >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> > >> -               ib_umem_release(umem);
> > >> -               goto err1;
> > >> +               pr_warn("%s: Unable to allocate memory for map\n",
> > >> +                               __func__);
> > >> +               goto err_release_umem;
> > >>        }
> > >>
> > >>        mr->page_shift = PAGE_SHIFT;
> > >>        mr->page_mask = PAGE_SIZE - 1;
> > >>
> > >> -       num_buf                 = 0;
> > >> +       num_buf = 0;
> >
> > White-space change.
>
> Yeah. It seems that some white-space changes in this commit.
>
> Zhu Yanjun
>
> >
> > Otherwise:
> >
> > Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
> >
> >
> > Thxs, Håkon
> >
> >
> >
> > >>        map = mr->map;
> > >>        if (length > 0) {
> > >>                buf = map[0]->buf;
> > >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> > >>                        if (!vaddr) {
> > >> -                               pr_warn("null vaddr\n");
> > >> -                               ib_umem_release(umem);
> > >> +                               pr_warn("%s: Unable to get virtual address\n",
> > >> +                                               __func__);
> > >>                                err = -ENOMEM;
> > >> -                               goto err1;
> > >> +                               goto err_cleanup_map;
> > >>                        }
> > >>
> > >>                        buf->addr = (uintptr_t)vaddr;
> > >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > >>
> > >>        return 0;
> > >>
> > >> -err1:
> > >> +err_cleanup_map:
> > >> +       for (i = 0; i < mr->num_map; i++)
> > >> +               kfree(mr->map[i]);
> > >> +       kfree(mr->map);
> > >> +err_release_umem:
> > >> +       ib_umem_release(umem);
> > >> +err_out:
> > >>        return err;
> > >> }
> > >>
> > >> --
> > >> 2.30.2
> > >>
> >

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05 15:40       ` Robert Pearson
@ 2021-07-05 15:41         ` Robert Pearson
  2021-07-05 15:54         ` Jason Gunthorpe
  2021-07-05 16:16         ` Haakon Bugge
  2 siblings, 0 replies; 9+ messages in thread
From: Robert Pearson @ 2021-07-05 15:41 UTC (permalink / raw)
  To: Zhu Yanjun
  Cc: Haakon Bugge, Jason Gunthorpe, OFED mailing list, haakon.brugge

Sorry that was ib_umem_get().

On Mon, Jul 5, 2021 at 10:40 AM Robert Pearson <rpearsonhpe@gmail.com> wrote:
>
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.
>
> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.
>
> Bob
>
> On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> >
> > On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
> > >
> > >
> > >
> > > > On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
> > > >
> > > > On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
> > > >>
> > > >> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
> > > >> free the memory at mr->map. This patch adds code to do that.
> > > >> This error only occurs if page_address() fails to return a non zero address
> > > >> which should never happen for 64 bit architectures.
> > > >
> > > > If this will never happen for 64 bit architectures, is it possible to
> > > > exclude 64 bit architecture with some MACROs or others?
> > > >
> > > > Thanks,
> > > >
> > > > Zhu Yanjun
> > > >
> > > >>
> > > >> Fixes: 8700e3e7c485 ("Soft RoCE driver")
> > > >> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
> > > >> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
> > > >> ---
> > > >> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
> > > >> 1 file changed, 24 insertions(+), 17 deletions(-)
> > > >>
> > > >> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> index 6aabcb4de235..f49baff9ca3d 100644
> > > >> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
> > > >> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
> > > >> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>                     int access, struct rxe_mr *mr)
> > > >> {
> > > >> -       struct rxe_map          **map;
> > > >> -       struct rxe_phys_buf     *buf = NULL;
> > > >> -       struct ib_umem          *umem;
> > > >> -       struct sg_page_iter     sg_iter;
> > > >> -       int                     num_buf;
> > > >> -       void                    *vaddr;
> > > >> +       struct rxe_map **map;
> > > >> +       struct rxe_phys_buf *buf = NULL;
> > > >> +       struct ib_umem *umem;
> > > >> +       struct sg_page_iter sg_iter;
> > > >> +       int num_buf;
> > > >> +       void *vaddr;
> > >
> > > This white-space stripping must be another issue, not related to the memleak?
> > >
> > > >>        int err;
> > > >> +       int i;
> > > >>
> > > >>        umem = ib_umem_get(pd->ibpd.device, start, length, access);
> > > >>        if (IS_ERR(umem)) {
> > > >> -               pr_warn("err %d from rxe_umem_get\n",
> > > >> -                       (int)PTR_ERR(umem));
> > > >> +               pr_warn("%s: Unable to pin memory region err = %d\n",
> > > >> +                       __func__, (int)PTR_ERR(umem));
> > > >>                err = PTR_ERR(umem);
> > > >> -               goto err1;
> > > >> +               goto err_out;
> > > >>        }
> > > >>
> > > >>        mr->umem = umem;
> > > >> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>        err = rxe_mr_alloc(mr, num_buf);
> > > >>        if (err) {
> > > >> -               pr_warn("err %d from rxe_mr_alloc\n", err);
> > > >> -               ib_umem_release(umem);
> > > >> -               goto err1;
> > > >> +               pr_warn("%s: Unable to allocate memory for map\n",
> > > >> +                               __func__);
> > > >> +               goto err_release_umem;
> > > >>        }
> > > >>
> > > >>        mr->page_shift = PAGE_SHIFT;
> > > >>        mr->page_mask = PAGE_SIZE - 1;
> > > >>
> > > >> -       num_buf                 = 0;
> > > >> +       num_buf = 0;
> > >
> > > White-space change.
> >
> > Yeah. It seems that some white-space changes in this commit.
> >
> > Zhu Yanjun
> >
> > >
> > > Otherwise:
> > >
> > > Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
> > >
> > >
> > > Thxs, Håkon
> > >
> > >
> > >
> > > >>        map = mr->map;
> > > >>        if (length > 0) {
> > > >>                buf = map[0]->buf;
> > > >> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>                        vaddr = page_address(sg_page_iter_page(&sg_iter));
> > > >>                        if (!vaddr) {
> > > >> -                               pr_warn("null vaddr\n");
> > > >> -                               ib_umem_release(umem);
> > > >> +                               pr_warn("%s: Unable to get virtual address\n",
> > > >> +                                               __func__);
> > > >>                                err = -ENOMEM;
> > > >> -                               goto err1;
> > > >> +                               goto err_cleanup_map;
> > > >>                        }
> > > >>
> > > >>                        buf->addr = (uintptr_t)vaddr;
> > > >> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
> > > >>
> > > >>        return 0;
> > > >>
> > > >> -err1:
> > > >> +err_cleanup_map:
> > > >> +       for (i = 0; i < mr->num_map; i++)
> > > >> +               kfree(mr->map[i]);
> > > >> +       kfree(mr->map);
> > > >> +err_release_umem:
> > > >> +       ib_umem_release(umem);
> > > >> +err_out:
> > > >>        return err;
> > > >> }
> > > >>
> > > >> --
> > > >> 2.30.2
> > > >>
> > >

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05 15:40       ` Robert Pearson
  2021-07-05 15:41         ` Robert Pearson
@ 2021-07-05 15:54         ` Jason Gunthorpe
  2021-07-05 16:16         ` Haakon Bugge
  2 siblings, 0 replies; 9+ messages in thread
From: Jason Gunthorpe @ 2021-07-05 15:54 UTC (permalink / raw)
  To: Robert Pearson; +Cc: Zhu Yanjun, Haakon Bugge, OFED mailing list, haakon.brugge

On Mon, Jul 05, 2021 at 10:40:14AM -0500, Robert Pearson wrote:
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.

don't mix things though, new code should be closer to the standard
style, but don't mix significant style cleanups with bug fixes

> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.

It can fail for lots of reasons?

Jason

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
  2021-07-05 15:40       ` Robert Pearson
  2021-07-05 15:41         ` Robert Pearson
  2021-07-05 15:54         ` Jason Gunthorpe
@ 2021-07-05 16:16         ` Haakon Bugge
  2 siblings, 0 replies; 9+ messages in thread
From: Haakon Bugge @ 2021-07-05 16:16 UTC (permalink / raw)
  To: Robert Pearson
  Cc: Zhu Yanjun, Jason Gunthorpe, OFED mailing list, haakon.brugge



> On 5 Jul 2021, at 17:40, Robert Pearson <rpearsonhpe@gmail.com> wrote:
> 
> Jason has been asking for patches to pass clang-format-patch so I've
> been cleaning up the code near functional changes since it doesn't
> like extra spaces such as for vertical alignment.

One of my former colleague "trained" me on this, and almost wrote "not related to commit" on my forehead :-)

My preference is that you make one commit with style changes, another with functional changes. If the latter is reverted, Jason would still be happy about the style, right? And, it makes the process of reviewing simpler (at least for me).

> If I could figure out how ib_umem_works there is a chance that it
> would fail if it couldn't map all the user space virtual memory into
> kernel virtual addresses. But so far I have failed. It's fairly
> complex.

;-)


Thxs, Håkon

> 
> Bob
> 
> On Mon, Jul 5, 2021 at 3:35 AM Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>> 
>> On Mon, Jul 5, 2021 at 4:16 PM Haakon Bugge <haakon.bugge@oracle.com> wrote:
>>> 
>>> 
>>> 
>>>> On 5 Jul 2021, at 05:42, Zhu Yanjun <zyjzyj2000@gmail.com> wrote:
>>>> 
>>>> On Mon, Jul 5, 2021 at 6:37 AM Bob Pearson <rpearsonhpe@gmail.com> wrote:
>>>>> 
>>>>> In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
>>>>> free the memory at mr->map. This patch adds code to do that.
>>>>> This error only occurs if page_address() fails to return a non zero address
>>>>> which should never happen for 64 bit architectures.
>>>> 
>>>> If this will never happen for 64 bit architectures, is it possible to
>>>> exclude 64 bit architecture with some MACROs or others?
>>>> 
>>>> Thanks,
>>>> 
>>>> Zhu Yanjun
>>>> 
>>>>> 
>>>>> Fixes: 8700e3e7c485 ("Soft RoCE driver")
>>>>> Reported by: Haakon Bugge <haakon.bugge@oracle.com>
>>>>> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
>>>>> ---
>>>>> drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
>>>>> 1 file changed, 24 insertions(+), 17 deletions(-)
>>>>> 
>>>>> diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> index 6aabcb4de235..f49baff9ca3d 100644
>>>>> --- a/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> +++ b/drivers/infiniband/sw/rxe/rxe_mr.c
>>>>> @@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
>>>>> int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>>                    int access, struct rxe_mr *mr)
>>>>> {
>>>>> -       struct rxe_map          **map;
>>>>> -       struct rxe_phys_buf     *buf = NULL;
>>>>> -       struct ib_umem          *umem;
>>>>> -       struct sg_page_iter     sg_iter;
>>>>> -       int                     num_buf;
>>>>> -       void                    *vaddr;
>>>>> +       struct rxe_map **map;
>>>>> +       struct rxe_phys_buf *buf = NULL;
>>>>> +       struct ib_umem *umem;
>>>>> +       struct sg_page_iter sg_iter;
>>>>> +       int num_buf;
>>>>> +       void *vaddr;
>>> 
>>> This white-space stripping must be another issue, not related to the memleak?
>>> 
>>>>>       int err;
>>>>> +       int i;
>>>>> 
>>>>>       umem = ib_umem_get(pd->ibpd.device, start, length, access);
>>>>>       if (IS_ERR(umem)) {
>>>>> -               pr_warn("err %d from rxe_umem_get\n",
>>>>> -                       (int)PTR_ERR(umem));
>>>>> +               pr_warn("%s: Unable to pin memory region err = %d\n",
>>>>> +                       __func__, (int)PTR_ERR(umem));
>>>>>               err = PTR_ERR(umem);
>>>>> -               goto err1;
>>>>> +               goto err_out;
>>>>>       }
>>>>> 
>>>>>       mr->umem = umem;
>>>>> @@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>       err = rxe_mr_alloc(mr, num_buf);
>>>>>       if (err) {
>>>>> -               pr_warn("err %d from rxe_mr_alloc\n", err);
>>>>> -               ib_umem_release(umem);
>>>>> -               goto err1;
>>>>> +               pr_warn("%s: Unable to allocate memory for map\n",
>>>>> +                               __func__);
>>>>> +               goto err_release_umem;
>>>>>       }
>>>>> 
>>>>>       mr->page_shift = PAGE_SHIFT;
>>>>>       mr->page_mask = PAGE_SIZE - 1;
>>>>> 
>>>>> -       num_buf                 = 0;
>>>>> +       num_buf = 0;
>>> 
>>> White-space change.
>> 
>> Yeah. It seems that some white-space changes in this commit.
>> 
>> Zhu Yanjun
>> 
>>> 
>>> Otherwise:
>>> 
>>> Reviewed-by: Håkon Bugge <haakon.bugge@oracle.com>
>>> 
>>> 
>>> Thxs, Håkon
>>> 
>>> 
>>> 
>>>>>       map = mr->map;
>>>>>       if (length > 0) {
>>>>>               buf = map[0]->buf;
>>>>> @@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>                       vaddr = page_address(sg_page_iter_page(&sg_iter));
>>>>>                       if (!vaddr) {
>>>>> -                               pr_warn("null vaddr\n");
>>>>> -                               ib_umem_release(umem);
>>>>> +                               pr_warn("%s: Unable to get virtual address\n",
>>>>> +                                               __func__);
>>>>>                               err = -ENOMEM;
>>>>> -                               goto err1;
>>>>> +                               goto err_cleanup_map;
>>>>>                       }
>>>>> 
>>>>>                       buf->addr = (uintptr_t)vaddr;
>>>>> @@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
>>>>> 
>>>>>       return 0;
>>>>> 
>>>>> -err1:
>>>>> +err_cleanup_map:
>>>>> +       for (i = 0; i < mr->num_map; i++)
>>>>> +               kfree(mr->map[i]);
>>>>> +       kfree(mr->map);
>>>>> +err_release_umem:
>>>>> +       ib_umem_release(umem);
>>>>> +err_out:
>>>>>       return err;
>>>>> }
>>>>> 
>>>>> --
>>>>> 2.30.2
>>>>> 
>>> 


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-07-05 16:16 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-04 22:35 [PATCH for-next] RDMA/rxe: Fix memory leak in error path code Bob Pearson
2021-07-05  2:09 ` yangx.jy
2021-07-05  3:42 ` Zhu Yanjun
2021-07-05  8:16   ` Haakon Bugge
2021-07-05  8:35     ` Zhu Yanjun
2021-07-05 15:40       ` Robert Pearson
2021-07-05 15:41         ` Robert Pearson
2021-07-05 15:54         ` Jason Gunthorpe
2021-07-05 16:16         ` Haakon Bugge

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.