aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/umr.h
diff options
context:
space:
mode:
authorAharon Landau <aharonl@nvidia.com>2022-04-12 10:24:01 +0300
committerJason Gunthorpe <jgg@nvidia.com>2022-04-25 11:53:00 -0300
commit6f0689fdf19ed3aca3ee3910223ad27216640693 (patch)
tree3812738a8124b3c25b339b61ade90a4142d2b86f /drivers/infiniband/hw/mlx5/umr.h
parentRDMA/mlx5: Expose wqe posting helpers outside of wr.c (diff)
downloadlinux-dev-6f0689fdf19ed3aca3ee3910223ad27216640693.tar.xz
linux-dev-6f0689fdf19ed3aca3ee3910223ad27216640693.zip
RDMA/mlx5: Introduce mlx5_umr_post_send_wait()
Introduce mlx5_umr_post_send_wait() that uses a UMR adjusted flow for posting WQEs. The next patches will gradually move UMR operations to use this flow. Once done, will get rid of mlx5_ib_post_send_wait(). mlx5_umr_post_send_wait gets already written WQE segments and will only memcpy it to the SQ. This way, we avoid packing all the data in a WR just to unpack it into the WQE. Link: https://lore.kernel.org/r/f027dd592fde62402b2d49efded8d1d22229d22b.1649747695.git.leonro@nvidia.com Signed-off-by: Aharon Landau <aharonl@nvidia.com> Reviewed-by: Michael Guralnik <michaelgur@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
index 0fe6cdd633d4..d984213caf60 100644
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -79,4 +79,16 @@ int mlx5r_umr_set_umr_ctrl_seg(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr,
const struct ib_send_wr *wr);
+struct mlx5r_umr_context {
+ struct ib_cqe cqe;
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+struct mlx5r_umr_wqe {
+ struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
+ struct mlx5_mkey_seg mkey_seg;
+ struct mlx5_wqe_data_seg data_seg;
+};
+
#endif /* _MLX5_IB_UMR_H */