All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH for-next] RDMA/rxe: Fix memory leak in error path code
@ 2021-07-04 22:35 Bob Pearson
  2021-07-05  2:09 ` yangx.jy
  2021-07-05  3:42 ` Zhu Yanjun
  0 siblings, 2 replies; 9+ messages in thread
From: Bob Pearson @ 2021-07-04 22:35 UTC (permalink / raw)
  To: jgg, zyjzyj2000, linux-rdma, haakon.brugge; +Cc: Bob Pearson

In rxe_mr_init_user() in rxe_mr.c at the third error the driver fails to
free the memory at mr->map. This patch adds code to do that.
This error only occurs if page_address() fails to return a non zero address
which should never happen for 64 bit architectures.

Fixes: 8700e3e7c485 ("Soft RoCE driver")
Reported by: Haakon Bugge <haakon.bugge@oracle.com>
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c | 41 +++++++++++++++++-------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..f49baff9ca3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -106,20 +106,21 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
 int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 		     int access, struct rxe_mr *mr)
 {
-	struct rxe_map		**map;
-	struct rxe_phys_buf	*buf = NULL;
-	struct ib_umem		*umem;
-	struct sg_page_iter	sg_iter;
-	int			num_buf;
-	void			*vaddr;
+	struct rxe_map **map;
+	struct rxe_phys_buf *buf = NULL;
+	struct ib_umem *umem;
+	struct sg_page_iter sg_iter;
+	int num_buf;
+	void *vaddr;
 	int err;
+	int i;
 
 	umem = ib_umem_get(pd->ibpd.device, start, length, access);
 	if (IS_ERR(umem)) {
-		pr_warn("err %d from rxe_umem_get\n",
-			(int)PTR_ERR(umem));
+		pr_warn("%s: Unable to pin memory region err = %d\n",
+			__func__, (int)PTR_ERR(umem));
 		err = PTR_ERR(umem);
-		goto err1;
+		goto err_out;
 	}
 
 	mr->umem = umem;
@@ -129,15 +130,15 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	err = rxe_mr_alloc(mr, num_buf);
 	if (err) {
-		pr_warn("err %d from rxe_mr_alloc\n", err);
-		ib_umem_release(umem);
-		goto err1;
+		pr_warn("%s: Unable to allocate memory for map\n",
+				__func__);
+		goto err_release_umem;
 	}
 
 	mr->page_shift = PAGE_SHIFT;
 	mr->page_mask = PAGE_SIZE - 1;
 
-	num_buf			= 0;
+	num_buf = 0;
 	map = mr->map;
 	if (length > 0) {
 		buf = map[0]->buf;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 			vaddr = page_address(sg_page_iter_page(&sg_iter));
 			if (!vaddr) {
-				pr_warn("null vaddr\n");
-				ib_umem_release(umem);
+				pr_warn("%s: Unable to get virtual address\n",
+						__func__);
 				err = -ENOMEM;
-				goto err1;
+				goto err_cleanup_map;
 			}
 
 			buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
 
 	return 0;
 
-err1:
+err_cleanup_map:
+	for (i = 0; i < mr->num_map; i++)
+		kfree(mr->map[i]);
+	kfree(mr->map);
+err_release_umem:
+	ib_umem_release(umem);
+err_out:
 	return err;
 }
 
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-07-05 16:16 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-04 22:35 [PATCH for-next] RDMA/rxe: Fix memory leak in error path code Bob Pearson
2021-07-05  2:09 ` yangx.jy
2021-07-05  3:42 ` Zhu Yanjun
2021-07-05  8:16   ` Haakon Bugge
2021-07-05  8:35     ` Zhu Yanjun
2021-07-05 15:40       ` Robert Pearson
2021-07-05 15:41         ` Robert Pearson
2021-07-05 15:54         ` Jason Gunthorpe
2021-07-05 16:16         ` Haakon Bugge

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.