aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorIlya Lesokhin <ilyal@mellanox.com>2018-03-13 15:18:48 +0200
committerDoug Ledford <dledford@redhat.com>2018-03-14 16:05:16 -0400
commitc44ef998f25eaddcd78924f98e5baed602d933e6 (patch)
tree79399095abe3f6c9b272a0f93d39d22abb5f6603 /drivers/infiniband/hw/mlx5/mr.c
parentIB/mlx5: Only synchronize RCU once when removing mkeys (diff)
downloadlinux-dev-c44ef998f25eaddcd78924f98e5baed602d933e6.tar.xz
linux-dev-c44ef998f25eaddcd78924f98e5baed602d933e6.zip
IB/mlx5: Maintain a single emergency page
The mlx5 driver needs to be able to issue invalidation to ODP MRs even if it cannot allocate memory. To this end it preallocates emergency pages to use when the situation arises. This flow should be extremely rare enough, that we don't need to worry about contention and therefore a single emergency page is good enough. Signed-off-by: Ilya Lesokhin <ilyal@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 820f93439b0c..87e8b3339ddd 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -985,7 +985,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
{
struct mlx5_ib_dev *dev = mr->dev;
struct device *ddev = dev->ib_dev.dev.parent;
- struct mlx5_ib_ucontext *uctx = NULL;
int size;
void *xlt;
dma_addr_t dma;
@@ -1001,6 +1000,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
size_t pages_to_map = 0;
size_t pages_iter = 0;
gfp_t gfp;
+ bool use_emergency_page = false;
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
* so we need to align the offset and length accordingly
@@ -1027,12 +1027,11 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
}
if (!xlt) {
- uctx = to_mucontext(mr->ibmr.pd->uobject->context);
mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
+ xlt = (void *)mlx5_ib_get_xlt_emergency_page();
size = PAGE_SIZE;
- xlt = (void *)uctx->upd_xlt_page;
- mutex_lock(&uctx->upd_xlt_page_mutex);
memset(xlt, 0, size);
+ use_emergency_page = true;
}
pages_iter = size / desc_size;
dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
@@ -1096,8 +1095,8 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
free_xlt:
- if (uctx)
- mutex_unlock(&uctx->upd_xlt_page_mutex);
+ if (use_emergency_page)
+ mlx5_ib_put_xlt_emergency_page();
else
free_pages((unsigned long)xlt, get_order(size));