aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/devx.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-09-01 11:20:56 -0300
committerJason Gunthorpe <jgg@nvidia.com>2022-09-27 10:15:24 -0300
commit72b2f7608a59727e7c2e5b11cff2749c2c080fac (patch)
treecdd1d28a40e6fa5611a92645c865d78c45435011 /drivers/infiniband/hw/mlx5/devx.c
parentRDMA/mlx5: Add support for dmabuf to devx umem (diff)
downloadlinux-dev-72b2f7608a59727e7c2e5b11cff2749c2c080fac.tar.xz
linux-dev-72b2f7608a59727e7c2e5b11cff2749c2c080fac.zip
RDMA/mlx5: Enable ATS support for MRs and umems
For mlx5 if ATS is enabled in the PCI config then the device will use ATS requests for only certain DMA operations. This has to be opted in by the SW side based on the mkey or umem settings. ATS slows down the PCI performance, so it should only be set in cases when it is needed. All of these cases revolve around optimizing PCI P2P transfers and avoiding bad cases where the bus just doesn't work. Link: https://lore.kernel.org/r/4-v1-bd147097458e+ede-umem_dmabuf_jgg@nvidia.com Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/devx.c')
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index a41e8d582f5b..2211a0be16f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2160,26 +2160,17 @@ err:
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs,
- struct devx_umem *obj)
+ struct devx_umem *obj, u32 access_flags)
{
u64 addr;
size_t size;
- u32 access;
int err;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
return -EFAULT;
- err = uverbs_get_flags32(&access, attrs,
- MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- if (err)
- return err;
-
- err = ib_check_mr_access(&dev->ib_dev, access);
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
if (err)
return err;
@@ -2193,12 +2184,12 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
return -EFAULT;
umem_dmabuf = ib_umem_dmabuf_get_pinned(
- &dev->ib_dev, addr, size, dmabuf_fd, access);
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
if (IS_ERR(umem_dmabuf))
return PTR_ERR(umem_dmabuf);
obj->umem = &umem_dmabuf->umem;
} else {
- obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
}
@@ -2240,7 +2231,8 @@ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
- struct devx_umem_reg_cmd *cmd)
+ struct devx_umem_reg_cmd *cmd,
+ int access)
{
unsigned long pgsz_bitmap;
unsigned int page_size;
@@ -2289,6 +2281,9 @@ static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, page_offset,
ib_umem_dma_offset(obj->umem, page_size));
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ);
@@ -2306,20 +2301,30 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
int err;
if (!c->devx_uid)
return -EINVAL;
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
if (!obj)
return -ENOMEM;
- err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
if (err)
goto err_obj_free;
- err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
if (err)
goto err_umem_release;