aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_mr.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2021-07-05 11:41:54 -0500
committerJason Gunthorpe <jgg@nvidia.com>2021-07-15 14:44:12 -0300
commitb18c7da63fcb46e2f9a093cc18d7c219e13a887c (patch)
treea5c6f22a30c4b51ceead98508031d9850d0be343 /drivers/infiniband/sw/rxe/rxe_mr.c
parentRDMA/irdma: Change the returned type to void (diff)
downloadlinux-dev-b18c7da63fcb46e2f9a093cc18d7c219e13a887c.tar.xz
linux-dev-b18c7da63fcb46e2f9a093cc18d7c219e13a887c.zip
RDMA/rxe: Fix memory leak in error path code
In rxe_mr_init_user() at the third error the driver fails to free the memory at mr->map. This patch adds code to do that. This error only occurs if page_address() fails to return a non zero address which should never happen for 64 bit architectures. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20210705164153.17652-1-rpearsonhpe@gmail.com Reported by: Haakon Bugge <haakon.bugge@oracle.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Reviewed-by: Zhu Yanjun <zyjzyj2000@gmail.com> Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_mr.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 6aabcb4de235..be4bcb420fab 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -113,13 +113,14 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
- pr_warn("err %d from rxe_umem_get\n",
- (int)PTR_ERR(umem));
+ pr_warn("%s: Unable to pin memory region err = %d\n",
+ __func__, (int)PTR_ERR(umem));
err = PTR_ERR(umem);
- goto err1;
+ goto err_out;
}
mr->umem = umem;
@@ -129,9 +130,9 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
err = rxe_mr_alloc(mr, num_buf);
if (err) {
- pr_warn("err %d from rxe_mr_alloc\n", err);
- ib_umem_release(umem);
- goto err1;
+ pr_warn("%s: Unable to allocate memory for map\n",
+ __func__);
+ goto err_release_umem;
}
mr->page_shift = PAGE_SHIFT;
@@ -151,10 +152,10 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
- pr_warn("null vaddr\n");
- ib_umem_release(umem);
+ pr_warn("%s: Unable to get virtual address\n",
+ __func__);
err = -ENOMEM;
- goto err1;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
@@ -177,7 +178,13 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
return 0;
-err1:
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
+err_release_umem:
+ ib_umem_release(umem);
+err_out:
return err;
}