aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe')
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h94
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
9 files changed, 77 insertions, 121 deletions
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index a0c6c7dfc181..452149066792 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -2,10 +2,9 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on !64BIT || ARCH_DMA_ADDR_T_64BIT
+ depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRYPTO_CRC32
- select DMA_VIRT_OPS
help
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 43394c3f29d4..b315ebf041ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -123,11 +123,6 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
- /* make sure all changes to the CQ are written before we update the
- * producer pointer
- */
- smp_wmb();
-
advance_producer(cq->queue);
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d2ce852447c1..6e8c41567ba0 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -31,7 +31,6 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
return 0;
case RXE_MEM_TYPE_MR:
- case RXE_MEM_TYPE_FMR:
if (iova < mem->iova ||
length > mem->length ||
iova > mem->iova + mem->length - length)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 34bef7d8e6b4..c4b06ced30a7 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -8,7 +8,6 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
-#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
@@ -20,18 +19,6 @@
static struct rxe_recv_sockets recv_sockets;
-struct device *rxe_dma_device(struct rxe_dev *rxe)
-{
- struct net_device *ndev;
-
- ndev = rxe->ndev;
-
- if (is_vlan_dev(ndev))
- ndev = vlan_dev_real_dev(ndev);
-
- return ndev->dev.parent;
-}
-
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
int err;
@@ -166,14 +153,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
struct net_device *ndev = skb->dev;
- struct net_device *rdev = ndev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
- if (!rxe && is_vlan_dev(rdev)) {
- rdev = vlan_dev_real_dev(ndev);
- rxe = rxe_get_dev_from_net(rdev);
- }
if (!rxe)
goto drop;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 7d434a6837a7..2902ca7b288c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -7,9 +7,11 @@
#ifndef RXE_QUEUE_H
#define RXE_QUEUE_H
+/* for definition of shared struct rxe_queue_buf */
+#include <uapi/rdma/rdma_user_rxe.h>
+
/* implements a simple circular buffer that can optionally be
* shared between user space and the kernel and can be resized
-
* the requested element size is rounded up to a power of 2
* and the number of elements in the buffer is also rounded
* up to a power of 2. Since the queue is empty when the
@@ -17,28 +19,6 @@
* of the queue is one less than the number of element slots
*/
-/* this data structure is shared between user space and kernel
- * space for those cases where the queue is shared. It contains
- * the producer and consumer indices. Is also contains a copy
- * of the queue size parameters for user space to use but the
- * kernel must use the parameters in the rxe_queue struct
- * this MUST MATCH the corresponding librxe struct
- * for performance reasons arrange to have producer and consumer
- * pointers in separate cache lines
- * the kernel should always mask the indices to avoid accessing
- * memory outside of the data area
- */
-struct rxe_queue_buf {
- __u32 log2_elem_size;
- __u32 index_mask;
- __u32 pad_1[30];
- __u32 producer_index;
- __u32 pad_2[31];
- __u32 consumer_index;
- __u32 pad_3[31];
- __u8 data[];
-};
-
struct rxe_queue {
struct rxe_dev *rxe;
struct rxe_queue_buf *buf;
@@ -46,7 +26,7 @@ struct rxe_queue {
size_t buf_size;
size_t elem_size;
unsigned int log2_elem_size;
- unsigned int index_mask;
+ u32 index_mask;
};
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
@@ -76,26 +56,56 @@ static inline int next_index(struct rxe_queue *q, int index)
static inline int queue_empty(struct rxe_queue *q)
{
- return ((q->buf->producer_index - q->buf->consumer_index)
- & q->index_mask) == 0;
+ u32 prod;
+ u32 cons;
+
+ /* make sure all changes to queue complete before
+ * testing queue empty
+ */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ /* same */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+
+ return ((prod - cons) & q->index_mask) == 0;
}
static inline int queue_full(struct rxe_queue *q)
{
- return ((q->buf->producer_index + 1 - q->buf->consumer_index)
- & q->index_mask) == 0;
+ u32 prod;
+ u32 cons;
+
+ /* make sure all changes to queue complete before
+ * testing queue full
+ */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ /* same */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+
+ return ((prod + 1 - cons) & q->index_mask) == 0;
}
static inline void advance_producer(struct rxe_queue *q)
{
- q->buf->producer_index = (q->buf->producer_index + 1)
- & q->index_mask;
+ u32 prod;
+
+ prod = (q->buf->producer_index + 1) & q->index_mask;
+
+ /* make sure all changes to queue complete before
+ * changing producer index
+ */
+ smp_store_release(&q->buf->producer_index, prod);
}
static inline void advance_consumer(struct rxe_queue *q)
{
- q->buf->consumer_index = (q->buf->consumer_index + 1)
- & q->index_mask;
+ u32 cons;
+
+ cons = (q->buf->consumer_index + 1) & q->index_mask;
+
+ /* make sure all changes to queue complete before
+ * changing consumer index
+ */
+ smp_store_release(&q->buf->consumer_index, cons);
}
static inline void *producer_addr(struct rxe_queue *q)
@@ -112,12 +122,28 @@ static inline void *consumer_addr(struct rxe_queue *q)
static inline unsigned int producer_index(struct rxe_queue *q)
{
- return q->buf->producer_index;
+ u32 index;
+
+ /* make sure all changes to queue
+ * complete before getting producer index
+ */
+ index = smp_load_acquire(&q->buf->producer_index);
+ index &= q->index_mask;
+
+ return index;
}
static inline unsigned int consumer_index(struct rxe_queue *q)
{
- return q->buf->consumer_index;
+ u32 index;
+
+ /* make sure all changes to queue
+ * complete before getting consumer index
+ */
+ index = smp_load_acquire(&q->buf->consumer_index);
+ index &= q->index_mask;
+
+ return index;
}
static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index af3923bf0a36..d4917646641a 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -634,7 +634,8 @@ next_wqe:
}
if (unlikely(qp_type(qp) == IB_QPT_RC &&
- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
+ psn_compare(qp->req.psn, (qp->comp.psn +
+ RXE_MAX_UNACKED_PSNS)) > 0)) {
qp->req.wait_psn = 1;
goto exit;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c7e3b6a4af38..5a098083a9d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -872,11 +872,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
- if (is_vlan_dev(skb->dev)) {
- wc->wc_flags |= IB_WC_WITH_VLAN;
- wc->vlan_id = vlan_dev_vlan_id(skb->dev);
- }
-
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f9c832e82552..a031514e2f41 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -244,11 +244,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- /* make sure all changes to the work queue are written before we
- * update the producer pointer
- */
- smp_wmb();
-
advance_producer(rq->queue);
return 0;
@@ -265,6 +260,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL;
+ if (init->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
if (udata) {
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
@@ -392,6 +390,9 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
uresp = udata->outbuf;
}
+ if (init->create_flags)
+ return ERR_PTR(-EOPNOTSUPP);
+
err = rxe_qp_chk_init(rxe, init);
if (err)
goto err1;
@@ -433,6 +434,9 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_qp *qp = to_rqp(ibqp);
+ if (mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err)
goto err1;
@@ -624,12 +628,6 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
if (unlikely(err))
goto err1;
- /*
- * make sure all changes to the work queue are
- * written before we update the producer pointer
- */
- smp_wmb();
-
advance_producer(sq->queue);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -765,7 +763,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
if (attr->flags)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err)
@@ -1033,7 +1031,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
+ return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
@@ -1070,6 +1068,7 @@ static const struct ib_device_ops rxe_dev_ops = {
.create_cq = rxe_create_cq,
.create_qp = rxe_create_qp,
.create_srq = rxe_create_srq,
+ .create_user_ah = rxe_create_ah,
.dealloc_driver = rxe_dealloc,
.dealloc_pd = rxe_dealloc_pd,
.dealloc_ucontext = rxe_dealloc_ucontext,
@@ -1118,56 +1117,18 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = num_possible_cpus();
- dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
- dev->dev.dma_parms = &rxe->dma_parms;
- dma_set_max_seg_size(&dev->dev, UINT_MAX);
- dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
- err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
- if (err)
- return err;
- dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
- | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
- | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
- | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
- | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
- | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
- | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
- ;
+ dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 3414b341b709..79e0a5a878da 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -273,7 +273,6 @@ enum rxe_mem_type {
RXE_MEM_TYPE_NONE,
RXE_MEM_TYPE_DMA,
RXE_MEM_TYPE_MR,
- RXE_MEM_TYPE_FMR,
RXE_MEM_TYPE_MW,
};
@@ -352,7 +351,6 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
- struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;