aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-11-06 19:19:38 +0100
committerJason Gunthorpe <jgg@nvidia.com>2020-11-17 15:22:07 -0400
commit5a7a9e038b032137ae9c45d5429f18a2ffdf7d42 (patch)
tree39f0d285c032b578c125978129add2e57803e5ab /drivers/infiniband/sw
parentrds: stop using dmapool (diff)
downloadlinux-dev-5a7a9e038b032137ae9c45d5429f18a2ffdf7d42.tar.xz
linux-dev-5a7a9e038b032137ae9c45d5429f18a2ffdf7d42.zip
RDMA/core: remove use of dma_virt_ops
Use the ib_dma_* helpers to skip the DMA translation instead. This removes the last user if dma_virt_ops and keeps the weird layering violation inside the RDMA core instead of burderning the DMA mapping subsystems with it. This also means the software RDMA drivers now don't have to mess with DMA parameters that are not relevant to them at all, and that in the future we can use PCI P2P transfers even for software RDMA, as there is no first fake layer of DMA mapping that the P2P DMA support. Link: https://lore.kernel.org/r/20201106181941.1878556-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c8
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h1
-rw-r--r--drivers/infiniband/sw/siw/Kconfig1
-rw-r--r--drivers/infiniband/sw/siw/siw.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c7
9 files changed, 2 insertions, 31 deletions
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index c8e268082952..0df48b3a6b56 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -4,6 +4,5 @@ config INFINIBAND_RDMAVT
depends on INFINIBAND_VIRT_DMA
depends on X86_64
depends on PCI
- select DMA_VIRT_OPS
help
This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 8490fdb9c91e..90fc234f489a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -324,8 +324,6 @@ static void __rvt_free_mr(struct rvt_mr *mr)
* @acc: access flags
*
* Return: the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the functions in
- * struct dma_virt_ops.
*/
struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
{
@@ -766,7 +764,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
/*
* We use LKEY == zero for kernel virtual addresses
- * (see rvt_get_dma_mr() and dma_virt_ops).
+ * (see rvt_get_dma_mr()).
*/
if (sge->lkey == 0) {
struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
@@ -877,7 +875,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
/*
* We use RKEY == zero for kernel virtual addresses
- * (see rvt_get_dma_mr() and dma_virt_ops).
+ * (see rvt_get_dma_mr()).
*/
rcu_read_lock();
if (rkey == 0) {
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 5bd817490b1f..49cec85a372a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -525,7 +525,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
int rvt_register_device(struct rvt_dev_info *rdi)
{
int ret = 0, i;
- u64 dma_mask;
if (!rdi)
return -EINVAL;
@@ -580,13 +579,6 @@ int rvt_register_device(struct rvt_dev_info *rdi)
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
- /* DMA Operations */
- rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
- if (ret)
- goto bail_wss;
-
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
rdi->n_pds_allocated = 0;
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 8810bfa68049..452149066792 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -5,7 +5,6 @@ config RDMA_RXE
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRYPTO_CRC32
- select DMA_VIRT_OPS
help
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index bbc89002c456..a2bd91aaa5de 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1128,7 +1128,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1139,12 +1138,6 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_parms = &rxe->dma_parms;
- dma_set_max_seg_size(&dev->dev, UINT_MAX);
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
- if (err)
- return err;
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 57967fc39c04..79e0a5a878da 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -351,7 +351,6 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
- struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index 3450ba5081df..1b5105cbabae 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -2,7 +2,6 @@ config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C
depends on INFINIBAND_VIRT_DMA
- select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index e9753831ac3f..adda78996219 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -69,7 +69,6 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
- struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index ac6756227677..d9de062852c4 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -306,7 +306,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
struct siw_device *sdev = NULL;
struct ib_device *base_dev;
struct device *parent = netdev->dev.parent;
- u64 dma_mask;
int rv;
if (!parent) {
@@ -361,12 +360,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
*/
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
- base_dev->dev.dma_parms = &sdev->dma_parms;
- dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
- goto error;
-
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);