aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw')
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c84
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/sw/siw/siw.h26
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c13
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c61
18 files changed, 131 insertions, 132 deletions
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 13d7f66eadab..5724cbbe38b1 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (cq->ip)
kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
- vfree(cq->queue);
+ vfree(cq->kqueue);
}
/**
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index b9a76bf74857..72f6534fbb52 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags);
+ umem = ib_umem_get(pd->device, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3cdf75d0c7a4..7858d499db03 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -61,6 +61,8 @@
#define RVT_RWQ_COUNT_THRESHOLD 16
static void rvt_rc_timeout(struct timer_list *t);
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -452,40 +454,41 @@ no_qp_table:
}
/**
- * free_all_qps - check for QPs still in use
+ * rvt_free_qp_cb - callback function to reset a qp
+ * @qp: the qp to reset
+ * @v: a 64-bit value
+ *
+ * This function resets the qp and removes it from the
+ * qp hash table.
+ */
+static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ unsigned int *qp_inuse = (unsigned int *)v;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ /* Reset the qp and remove it from the qp hash list */
+ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
+
+ /* Increment the qp_inuse count */
+ (*qp_inuse)++;
+}
+
+/**
+ * rvt_free_all_qps - check for QPs still in use
* @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
+ * Return the number of QPs still in use.
*/
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{
- unsigned long flags;
- struct rvt_qp *qp;
- unsigned n, qp_inuse = 0;
- spinlock_t *ql; /* work around too long line below */
-
- if (rdi->driver_f.free_all_qps)
- qp_inuse = rdi->driver_f.free_all_qps(rdi);
+ unsigned int qp_inuse = 0;
qp_inuse += rvt_mcast_tree_empty(rdi);
- if (!rdi->qp_dev)
- return qp_inuse;
-
- ql = &rdi->qp_dev->qpt_lock;
- spin_lock_irqsave(ql, flags);
- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
- lockdep_is_held(ql));
- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(ql)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(ql, flags);
- synchronize_rcu();
return qp_inuse;
}
@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
}
/**
- * rvt_reset_qp - initialize the QP state to the reset state
+ * _rvt_reset_qp - initialize the QP state to the reset state
* @qp: the QP to reset
* @type: the QP type
*
* r_lock, s_hlock, and s_lock are required to be held by the caller
*/
-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- enum ib_qp_type type)
+static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
__must_hold(&qp->s_lock)
__must_hold(&qp->s_hlock)
__must_hold(&qp->r_lock)
@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock);
}
+/**
+ * rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: the device info
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
+ * before calling _rvt_reset_qp().
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ _rvt_reset_qp(rdi, qp, type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+}
+
/** rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table
* @qpn: queue pair number to free
@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) {
case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET)
- rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
break;
case IB_QPS_RTR:
@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
rvt_reset_qp(rdi, qp, ibqp->qp_type);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
index 890d7b760d2e..977906cc0d11 100644
--- a/drivers/infiniband/sw/rdmavt/rc.c
+++ b/drivers/infiniband/sw/rdmavt/rc.c
@@ -195,7 +195,14 @@ void rvt_get_credit(struct rvt_qp *qp, u32 aeth)
}
EXPORT_SYMBOL(rvt_get_credit);
-/* rvt_restart_sge - rewind the sge state for a wqe */
+/**
+ * rvt_restart_sge - rewind the sge state for a wqe
+ * @ss: the sge state pointer
+ * @wqe: the wqe to rewind
+ * @len: the data length from the start of the wqe in bytes
+ *
+ * Returns the remaining data length.
+ */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len)
{
ss->sge = wqe->sg_list[0];
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 116cafc9afcf..4bc88708b355 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
return COMPST_ERROR_RETRY;
@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
if (pkt) {
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 35a2baf2f364..e83c7b518bfa 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access);
+ umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 353c6668249e..f59616b02477 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -34,6 +34,8 @@
#ifndef RXE_PARAM_H
#define RXE_PARAM_H
+#include <uapi/rdma/rdma_user_rxe.h>
+
static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
{
if (mtu < 256)
@@ -64,7 +66,6 @@ enum rxe_device_param {
RXE_PAGE_SIZE_CAP = 0xfffff000,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
- RXE_MAX_INLINE_DATA = 400,
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
| IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_AUTO_PATH_MIG
@@ -77,6 +78,10 @@ enum rxe_device_param {
| IB_DEVICE_MEM_MGT_EXTENSIONS
| IB_DEVICE_ALLOW_USER_UNREG,
RXE_MAX_SGE = 32,
+ RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) +
+ sizeof(struct ib_sge) * RXE_MAX_SGE,
+ RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
+ sizeof(struct rxe_send_wqe),
RXE_MAX_SGE_RD = 32,
RXE_MAX_CQ = 16384,
RXE_MAX_LOG_CQE = 15,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index e2c6d1cedf41..ec21f616ac98 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -237,19 +237,17 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
*/
qp->src_port = RXE_ROCE_V2_SPORT +
(hash_32_generic(qp_num(qp), 14) & 0x3fff);
-
qp->sq.max_wr = init->cap.max_send_wr;
- qp->sq.max_sge = init->cap.max_send_sge;
- qp->sq.max_inline = init->cap.max_inline_data;
- wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
- qp->sq.max_sge * sizeof(struct ib_sge),
- sizeof(struct rxe_send_wqe) +
- qp->sq.max_inline);
+ /* These caps are limited by rxe_qp_chk_cap() done by the caller */
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = init->cap.max_send_sge =
+ wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
- qp->sq.queue = rxe_queue_init(rxe,
- &qp->sq.max_wr,
- wqe_size);
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
if (!qp->sq.queue)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 95834206c80c..92de39c4a7c1 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -408,7 +408,7 @@ struct rxe_dev {
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* guard mmap_offset */
- int mmap_offset;
+ u64 mmap_offset;
atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index b939f489cd46..af5e9f8c0fcd 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -7,6 +7,7 @@
#define _SIW_H
#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <crypto/hash.h>
@@ -209,7 +210,6 @@ struct siw_cq {
u32 cq_put;
u32 cq_get;
u32 num_cqe;
- bool kernel_verbs;
struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -254,8 +254,8 @@ struct siw_srq {
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
- char armed; /* inform user if limit hit */
- char kernel_verbs; /* '1' if kernel client */
+ bool armed:1; /* inform user if limit hit */
+ bool is_kernel_res:1; /* true if kernel client */
};
struct siw_qp_attrs {
@@ -418,13 +418,11 @@ struct siw_iwarp_tx {
};
struct siw_qp {
+ struct ib_qp base_qp;
struct siw_device *sdev;
- struct ib_qp *ib_qp;
struct kref ref;
- u32 qp_num;
struct list_head devq;
int tx_cpu;
- bool kernel_verbs;
struct siw_qp_attrs attrs;
struct siw_cep *cep;
@@ -472,11 +470,6 @@ struct siw_qp {
struct rcu_head rcu;
};
-struct siw_base_qp {
- struct ib_qp base_qp;
- struct siw_qp *qp;
-};
-
/* helper macros */
#define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
#define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
@@ -572,14 +565,9 @@ static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
return container_of(base_ctx, struct siw_ucontext, base_ucontext);
}
-static inline struct siw_base_qp *to_siw_base_qp(struct ib_qp *base_qp)
-{
- return container_of(base_qp, struct siw_base_qp, base_qp);
-}
-
static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
{
- return to_siw_base_qp(base_qp)->qp;
+ return container_of(base_qp, struct siw_qp, base_qp);
}
static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
@@ -624,7 +612,7 @@ static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
static inline u32 qp_id(struct siw_qp *qp)
{
- return qp->qp_num;
+ return qp->base_qp.qp_num;
}
static inline void siw_qp_get(struct siw_qp *qp)
@@ -735,7 +723,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 3bccfef40e7e..c5651a96b196 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -29,7 +29,7 @@
* MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
*/
static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
-static const bool relaxed_ird_negotiation = 1;
+static const bool relaxed_ird_negotiation = true;
static void siw_cm_llp_state_change(struct sock *s);
static void siw_cm_llp_data_ready(struct sock *s);
@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
- if (!cep) {
- WARN_ON(1);
+ if (!cep)
goto out;
- }
+
siw_dbg_cep(cep, "state: %d\n", cep->state);
switch (cep->state) {
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index d8db3bee9da7..d68e37859e73 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -65,7 +65,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
* reaped here, which do not hold a QP reference
* and do not qualify for memory extension verbs.
*/
- if (likely(cq->kernel_verbs)) {
+ if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
if (cqe->flags & SIW_WQE_REM_INVAL) {
wc->ex.invalidate_rkey = cqe->inval_stag;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index c147f0613d95..5cd40fb9e20c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -244,7 +244,7 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
* siw_qp_id2obj() increments object reference count
*/
siw_qp_put(qp);
- return qp->ib_qp;
+ return &qp->base_qp;
}
return NULL;
}
@@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
{ .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
+ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
+
ib_set_device_ops(base_dev, &siw_device_ops);
rv = ib_device_set_netdev(base_dev, netdev, 1);
if (rv)
@@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
- xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
- xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
-
INIT_LIST_HEAD(&sdev->cep_list);
INIT_LIST_HEAD(&sdev->qp_list);
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e99983f07663..e2061dc0b043 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -63,7 +63,7 @@ struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index)
static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
bool dirty)
{
- put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
+ unpin_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
}
void siw_umem_release(struct siw_umem *umem, bool dirty)
@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
while (nents) {
struct page **plist = &umem->page_chunk[i].plist[got];
- rv = get_user_pages(first_page_va, nents,
+ rv = pin_user_pages(first_page_va, nents,
foll_flags | FOLL_LONGTERM,
plist, NULL);
if (rv < 0)
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index b4317480cee7..875d36d4b1c6 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1070,8 +1070,8 @@ int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
cqe->imm_data = 0;
cqe->bytes = bytes;
- if (cq->kernel_verbs)
- cqe->base_qp = qp->ib_qp;
+ if (rdma_is_kernel_res(&cq->base_cq.res))
+ cqe->base_qp = &qp->base_qp;
else
cqe->qp_id = qp_id(qp);
@@ -1128,8 +1128,8 @@ int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
cqe->imm_data = 0;
cqe->bytes = bytes;
- if (cq->kernel_verbs) {
- cqe->base_qp = qp->ib_qp;
+ if (rdma_is_kernel_res(&cq->base_cq.res)) {
+ cqe->base_qp = &qp->base_qp;
if (inval_stag) {
cqe_flags |= SIW_WQE_REM_INVAL;
cqe->inval_stag = inval_stag;
@@ -1297,13 +1297,12 @@ void siw_rq_flush(struct siw_qp *qp)
int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
{
- int rv = xa_alloc(&sdev->qp_xa, &qp->ib_qp->qp_num, qp, xa_limit_32b,
+ int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b,
GFP_KERNEL);
if (!rv) {
kref_init(&qp->ref);
qp->sdev = sdev;
- qp->qp_num = qp->ib_qp->qp_num;
siw_dbg_qp(qp, "new QP\n");
}
return rv;
@@ -1312,7 +1311,6 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
void siw_free_qp(struct kref *ref)
{
struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
- struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
struct siw_device *sdev = qp->sdev;
unsigned long flags;
@@ -1335,5 +1333,4 @@ void siw_free_qp(struct kref *ref)
atomic_dec(&sdev->num_qp);
siw_dbg_qp(qp, "free QP\n");
kfree_rcu(qp, rcu);
- kfree(siw_base_qp);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index c0a887240325..9ccce2909ac4 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -68,7 +68,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
return -EFAULT;
}
if (srx->mpa_crc_hd) {
- if (rx_qp(srx)->kernel_verbs) {
+ if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) {
crypto_shash_update(srx->mpa_crc_hd,
(u8 *)(dest + pg_off), bytes);
kunmap_atomic(dest);
@@ -388,7 +388,7 @@ static struct siw_wqe *siw_rqe_get(struct siw_qp *qp)
struct siw_rqe *rqe2 = &srq->recvq[off];
if (!(rqe2->flags & SIW_WQE_VALID)) {
- srq->armed = 0;
+ srq->armed = false;
srq_event = true;
}
}
@@ -1264,7 +1264,7 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
if (wc_status == SIW_WC_SUCCESS)
wc_status = SIW_WC_GENERAL_ERR;
- } else if (qp->kernel_verbs &&
+ } else if (rdma_is_kernel_res(&qp->base_qp.res) &&
rx_type(wqe) == SIW_OP_READ_LOCAL_INV) {
/*
* Handle any STag invalidation request
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 5d97bba0ce6d..ae92c8080967 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -817,7 +817,7 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
}
} else {
wqe->bytes = wqe->sqe.sge[0].length;
- if (!qp->kernel_verbs) {
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
if (wqe->bytes > SIW_MAX_INLINE) {
rv = -EINVAL;
goto tx_error;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 5fd6d6499b3d..07e30138aaa1 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -303,7 +303,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct siw_qp *qp = NULL;
- struct siw_base_qp *siw_base_qp = NULL;
struct ib_device *base_dev = pd->device;
struct siw_device *sdev = to_siw_dev(base_dev);
struct siw_ucontext *uctx =
@@ -357,26 +356,16 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
- siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
- if (!siw_base_qp) {
- rv = -ENOMEM;
- goto err_out;
- }
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
rv = -ENOMEM;
goto err_out;
}
- siw_base_qp->qp = qp;
- qp->ib_qp = &siw_base_qp->base_qp;
-
init_rwsem(&qp->state_lock);
spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
spin_lock_init(&qp->orq_lock);
- qp->kernel_verbs = !udata;
-
rv = siw_qp_add(sdev, qp);
if (rv)
goto err_out;
@@ -389,10 +378,10 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (qp->kernel_verbs)
- qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
- else
+ if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+ else
+ qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
@@ -419,13 +408,14 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
*/
qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0;
- siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
+ qp->base_qp.qp_num);
} else if (num_rqe) {
- if (qp->kernel_verbs)
- qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
- else
+ if (udata)
qp->recvq =
vmalloc_user(num_rqe * sizeof(struct siw_rqe));
+ else
+ qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
@@ -492,13 +482,11 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags);
- return qp->ib_qp;
+ return &qp->base_qp;
err_out_xa:
xa_erase(&sdev->qp_xa, qp_id(qp));
err_out:
- kfree(siw_base_qp);
-
if (qp) {
if (uctx) {
rdma_user_mmap_entry_remove(qp->sq_entry);
@@ -742,7 +730,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
- if (wr && !qp->kernel_verbs) {
+ if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
*bad_wr = wr;
return -EINVAL;
@@ -939,7 +927,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
if (rv <= 0)
goto skip_direct_sending;
- if (qp->kernel_verbs) {
+ if (rdma_is_kernel_res(&qp->base_qp.res)) {
rv = siw_sq_start(qp);
} else {
qp->tx_ctx.in_syscall = 1;
@@ -984,8 +972,8 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ if (!rdma_is_kernel_res(&qp->base_qp.res)) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
*bad_wr = wr;
return -EINVAL;
}
@@ -1127,14 +1115,13 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
cq->base_cq.cqe = size;
cq->num_cqe = size;
- if (!udata) {
- cq->kernel_verbs = 1;
- cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- } else {
+ if (udata)
cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
sizeof(struct siw_cq_ctrl));
- }
+ else
+ cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl));
+
if (cq->queue == NULL) {
rv = -ENOMEM;
goto err_out;
@@ -1589,9 +1576,9 @@ int siw_create_srq(struct ib_srq *base_srq,
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
srq->limit = attrs->srq_limit;
if (srq->limit)
- srq->armed = 1;
+ srq->armed = true;
- srq->kernel_verbs = !udata;
+ srq->is_kernel_res = !udata;
if (udata)
srq->recvq =
@@ -1671,9 +1658,9 @@ int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
rv = -EINVAL;
goto out;
}
- srq->armed = 1;
+ srq->armed = true;
} else {
- srq->armed = 0;
+ srq->armed = false;
}
srq->limit = attrs->srq_limit;
}
@@ -1745,7 +1732,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
unsigned long flags;
int rv = 0;
- if (unlikely(!srq->kernel_verbs)) {
+ if (unlikely(!srq->is_kernel_res)) {
siw_dbg_pd(base_srq->pd,
"[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL;
@@ -1797,7 +1784,7 @@ out:
void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
{
struct ib_event event;
- struct ib_qp *base_qp = qp->ib_qp;
+ struct ib_qp *base_qp = &qp->base_qp;
/*
* Do not report asynchronous errors on QP which gets