aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/odp.c
diff options
context:
space:
mode:
authorArtemy Kovalyov <artemyko@mellanox.com>2019-12-22 14:46:47 +0200
committerJason Gunthorpe <jgg@mellanox.com>2020-01-03 17:00:13 -0400
commitcbe4b8f0a5766a40563876932cba6c9bf28eb98a (patch)
treec147c63d13be73964e2e7086d6c2842edb58e4d4 /drivers/infiniband/hw/mlx5/odp.c
parentIB/rdmavt: Correct comments in rdmavt_qp.h header (diff)
downloadlinux-dev-cbe4b8f0a5766a40563876932cba6c9bf28eb98a.tar.xz
linux-dev-cbe4b8f0a5766a40563876932cba6c9bf28eb98a.zip
IB/mlx5: Unify ODP MR code paths to allow extra flexibility
Building MR translation table in the ODP case requires additional flexibility, namely random access to DMA addresses. Make both direct and indirect ODP MR use same code path, separated from the non-ODP MR code path. With the restructuring the correct page_shift is now used around __mlx5_ib_populate_pas(). Fixes: d2183c6f1958 ("RDMA/umem: Move page_shift from ib_umem to ib_odp_umem") Link: https://lore.kernel.org/r/20191222124649.52300-2-leon@kernel.org Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c42
1 files changed, 40 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f924250f80c2..92da6c4f7ddd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,8 +93,8 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
- struct mlx5_ib_mr *imr, int flags)
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
struct mlx5_klm *end = pklm + nentries;
@@ -144,6 +144,44 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
}
}
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+
+static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ dma_addr_t pa;
+ size_t i;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP)
+ return;
+
+ for (i = 0; i < nentries; i++) {
+ pa = odp->dma_list[idx + i];
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+}
+
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+ populate_klm(xlt, idx, nentries, mr, flags);
+ } else {
+ populate_mtt(xlt, idx, nentries, mr, flags);
+ }
+}
+
static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);