aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/counters.c10
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c76
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/sw/siw/siw.h8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c113
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c40
23 files changed, 263 insertions, 240 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c7985c24f914..0e3cf3461999 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
if (ret)
goto err;
- cma_configfs_init();
+ ret = cma_configfs_init();
+ if (ret)
+ goto err_ib;
return 0;
+err_ib:
+ ib_unregister_client(&cma_client);
err:
unregister_netdevice_notifier(&cma_nb);
ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 61fcb3a31340..680ad27f497d 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res))
+ if (!rdma_is_visible_in_pid_ns(&qp->res))
return false;
- /* Ensure that counter belong to right PID */
- if (!rdma_is_kernel_res(&counter->res) &&
- !rdma_is_kernel_res(&qp->res) &&
- (task_pid_vnr(counter->res.task) != current->pid))
+ /* Ensure that counter belongs to the right PID */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
return qp;
err:
- rdma_restrack_put(&qp->res);
+ rdma_restrack_put(res);
return NULL;
}
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index cc08218f1ef7..7a7474000100 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i])
continue;
- curr = rdma_restrack_count(device, i,
- task_active_pid_ns(current));
+ curr = rdma_restrack_count(device, i);
ret = fill_res_info_entry(msg, names[i], curr);
if (ret)
goto err;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
* rdma_restrack_count() - the current usage of specific object
* @dev: IB device
* @type: actual type of object to operate
- * @ns: PID namespace
*/
-int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
- struct pid_namespace *ns)
+int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
{
struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
xa_lock(&rt->xa);
xas_for_each(&xas, e, U32_MAX) {
- if (ns == &init_pid_ns ||
- (!rdma_is_kernel_res(e) &&
- ns == task_active_pid_ns(e->task)))
- cnt++;
+ if (!rdma_is_visible_in_pid_ns(e))
+ continue;
+ cnt++;
}
xa_unlock(&rt->xa);
return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
*/
if (rdma_is_kernel_res(res))
return task_active_pid_ns(current) == &init_pid_ns;
- return task_active_pid_ns(current) == task_active_pid_ns(res->task);
+
+ /* PID 0 means that resource is not found in current namespace */
+ return task_pid_vnr(res->task);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EBUSY;
}
+
+ size = req->cmd_size;
+ /* change the cmd_size to the number of 16byte cmdq unit.
+ * req->cmd_size is modified here
+ */
+ bnxt_qplib_set_cmd_slots(req);
+
memset(resp, 0, sizeof(*resp));
crsqe->resp = (struct creq_qp_event *)resp;
crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req;
- size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
do { \
memset(&(req), 0, sizeof((req))); \
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
- (req).cmd_size = (sizeof((req)) + \
- BNXT_QPLIB_CMDQE_UNITS - 1) / \
- BNXT_QPLIB_CMDQE_UNITS; \
+ (req).cmd_size = sizeof((req)); \
(req).flags = cpu_to_le16(cmd_flags); \
} while (0)
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
BNXT_QPLIB_CMDQE_UNITS);
}
+/* Set the cmd_size to a factor of CMDQE unit */
+static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
+{
+ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+}
+
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
if (!data)
return -ENOMEM;
copy = min(len, datalen - 1);
- if (copy_from_user(data, buf, copy))
- return -EFAULT;
+ if (copy_from_user(data, buf, copy)) {
+ ret = -EFAULT;
+ goto free_data;
+ }
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
ptr = data;
token = ptr;
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
ret = len;
debugfs_file_put(file->f_path.dentry);
+free_data:
kfree(data);
return ret;
}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
return -ENOMEM;
ret = debugfs_file_get(file->f_path.dentry);
if (unlikely(ret))
- return ret;
+ goto free_data;
bit = find_first_bit(fault->opcodes, bitsize);
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
data[size - 1] = '\n';
data[size] = '\0';
ret = simple_read_from_buffer(buf, len, pos, data, size);
+free_data:
kfree(data);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 996fc298207e..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
hfi1_kern_clear_hw_flow(priv->rcd, qp);
}
-static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet, u8 rcv_type,
- u8 opcode)
+static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
{
struct rvt_qp *qp = packet->qp;
- struct hfi1_qp_priv *qpriv = qp->priv;
- u32 ipsn;
- struct ib_other_headers *ohdr = packet->ohdr;
- struct rvt_ack_entry *e;
- struct tid_rdma_request *req;
- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
- u32 i;
if (rcv_type >= RHF_RCV_TYPE_IB)
goto done;
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
if (rcv_type == RHF_RCV_TYPE_EAGER) {
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
- goto done_unlock;
- }
-
- /*
- * For TID READ response, error out QP after freeing the tid
- * resources.
- */
- if (opcode == TID_OP(READ_RESP)) {
- ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
- if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
- cmp_psn(ipsn, qp->s_psn) < 0) {
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto done;
- }
- goto done_unlock;
- }
-
- /*
- * Error out the qp for TID RDMA WRITE
- */
- hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
- for (i = 0; i < rvt_max_atomic(rdi); i++) {
- e = &qp->s_ack_queue[i];
- if (e->opcode == TID_OP(WRITE_REQ)) {
- req = ack_to_tid_req(e);
- hfi1_kern_exp_rcv_clear_all(req);
- }
}
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
- goto done;
-done_unlock:
+ /* Since no payload is delivered, just drop the packet */
spin_unlock(&qp->s_lock);
done:
return true;
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
u32 fpsn;
lockdep_assert_held(&qp->r_lock);
+ spin_lock(&qp->s_lock);
/* If the psn is out of valid range, drop the packet */
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
cmp_psn(ibpsn, qp->s_psn) > 0)
- return ret;
+ goto s_unlock;
- spin_lock(&qp->s_lock);
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
wqe = do_rc_completion(qp, wqe, ibp);
if (qp->s_acked == qp->s_tail)
- break;
+ goto s_unlock;
}
+ if (qp->s_acked == qp->s_tail)
+ goto s_unlock;
+
/* Handle the eflags for the request */
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
goto s_unlock;
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
if (lnh == HFI1_LRH_GRH)
goto r_unlock;
- if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
+ if (tid_rdma_tid_err(packet, rcv_type))
goto r_unlock;
}
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
*/
spin_lock(&qp->s_lock);
qpriv = qp->priv;
+ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
+ qpriv->r_tid_tail == qpriv->r_tid_head)
+ goto unlock;
e = &qp->s_ack_queue[qpriv->r_tid_tail];
+ if (e->opcode != TID_OP(WRITE_REQ))
+ goto unlock;
req = ack_to_tid_req(e);
+ if (req->comp_seg == req->cur_seg)
+ goto unlock;
flow = &req->flows[req->clear_tail];
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
struct rvt_swqe *wqe;
struct tid_rdma_request *req;
struct tid_rdma_flow *flow;
- u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
+ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
unsigned long flags;
u16 fidx;
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
ack_kpsn--;
}
+ if (unlikely(qp->s_acked == qp->s_tail))
+ goto ack_op_err;
+
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
/* Drop stale ACK/NAK */
- if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
+ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
+ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
goto ack_op_err;
while (cmp_psn(ack_kpsn,
@@ -4712,7 +4685,12 @@ done:
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
IB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
+ if (!req->flows)
+ break;
flow = &req->flows[req->acked_tail];
+ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+ if (cmp_psn(psn, flpsn) > 0)
+ break;
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
flow);
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- kfree(tun_qp->tx_ring);
- tun_qp->tx_ring = NULL;
i = MLX4_NUM_TUNNEL_BUFS;
err:
while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
+ kfree(tun_qp->tx_ring);
+ tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 07aecba16019..c6bc4aee7638 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- if (MLX5_CAP_GEN(mdev, pg))
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
}
@@ -6179,6 +6179,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
+ mlx5_ib_internal_fill_odp_caps(dev);
+
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -6603,8 +6605,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
- mlx5_ib_internal_fill_odp_caps(dev);
-
return mlx5_ib_odp_init_one(dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6abfbf3a69b7..c3ea299fe6c9 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1477,4 +1477,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
+
+static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
+ bool do_modify_atomic)
+{
+ if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return false;
+
+ if (do_modify_atomic &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return false;
+
+ return true;
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b7da619614e4..1eff031ef048 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1311,9 +1311,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
- (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
- !MLX5_CAP_GEN(dev->mdev, atomic));
+ use_umr = mlx5_ib_can_use_umr(dev, true);
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1466,7 +1464,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
goto err;
}
- if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+ if (!mlx5_ib_can_use_umr(dev, true) ||
+ (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
/*
* UMR can't be used - MKey needs to be replaced.
*/
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 905936423a03..14fe94bcc788 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps));
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!MLX5_CAP_GEN(dev->mdev, pg) ||
+ !mlx5_ib_can_use_umr(dev, true))
return;
caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
- MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
return;
@@ -1549,8 +1551,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret = 0;
- if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
- ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return ret;
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1560,9 +1564,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
- if (!MLX5_CAP_GEN(dev->mdev, pg))
- return ret;
-
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
return ret;
@@ -1570,7 +1571,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{
- if (!MLX5_CAP_GEN(dev->mdev, pg))
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
return;
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e96dcdfd6c3a..8937d72ddcf6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4161,7 +4161,7 @@ static u64 get_xlt_octo(u64 bytes)
MLX5_IB_UMR_OCTOWORD;
}
-static __be64 frwr_mkey_mask(void)
+static __be64 frwr_mkey_mask(bool atomic)
{
u64 result;
@@ -4174,10 +4174,12 @@ static __be64 frwr_mkey_mask(void)
MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_SMALL_FENCE |
MLX5_MKEY_MASK_FREE;
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
return cpu_to_be64(result);
}
@@ -4203,7 +4205,7 @@ static __be64 sig_mkey_mask(void)
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags)
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
@@ -4211,7 +4213,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->flags = flags;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask();
+ umr->mkey_mask = frwr_mkey_mask(atomic);
}
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4810,10 +4812,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
+ if (!mlx5_ib_can_use_umr(dev, atomic)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
"Invalid IB_SEND_INLINE send flag\n");
@@ -4825,7 +4839,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
if (umr_inline)
flags |= MLX5_UMR_INLINE;
- set_reg_umr_seg(*seg, mr, flags);
+ set_reg_umr_seg(*seg, mr, flags, atomic);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 77b1aabf6ff3..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
};
struct siw_pble {
- u64 addr; /* Address of assigned user buffer */
- u64 size; /* Size of this entry */
- u64 pbl_off; /* Total offset from start of PBL */
+ dma_addr_t addr; /* Address of assigned buffer */
+ unsigned int size; /* Size of this entry */
+ unsigned long pbl_off; /* Total offset from start of PBL */
};
struct siw_pbl {
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
#define siw_dbg_cep(cep, fmt, ...) \
- ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
+ ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
cep, __func__, ##__VA_ARGS__)
void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 9ce8a1b925d2..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
getname_local(cep->sock, &event.local_addr);
getname_peer(cep->sock, &event.remote_addr);
}
- siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n",
- cep->qp ? qp_id(cep->qp) : -1, id, reason, status);
+ siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
+ cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
return id->event_handler(id, &event);
}
@@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_get(new_cep);
new_s->sk->sk_user_data = new_cep;
- siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
-
if (siw_tcp_nagle == false) {
int val = 1;
@@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
cep = work->cep;
siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
- cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX,
+ work->type, cep->state);
siw_cep_set_inuse(cep);
@@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
}
if (release_cep) {
siw_dbg_cep(cep,
- "release: timer=%s, QP[%u], id 0x%p\n",
+ "release: timer=%s, QP[%u]\n",
cep->mpa_timer ? "y" : "n",
- cep->qp ? qp_id(cep->qp) : -1, cep->cm_id);
+ cep->qp ? qp_id(cep->qp) : UINT_MAX);
siw_cancel_mpatimer(cep);
@@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
else
delay = MPAREP_TIMEOUT;
}
- siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n",
- cep->qp ? qp_id(cep->qp) : -1, type, work, delay);
+ siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
+ cep->qp ? qp_id(cep->qp) : -1, type, delay);
queue_delayed_work(siw_cm_wq, &work->work, delay);
@@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
}
if (v4)
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
+ pd_len,
&((struct sockaddr_in *)(laddr))->sin_addr,
ntohs(((struct sockaddr_in *)(laddr))->sin_port),
&((struct sockaddr_in *)(raddr))->sin_addr,
ntohs(((struct sockaddr_in *)(raddr))->sin_port));
else
siw_dbg_qp(qp,
- "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- id, pd_len,
+ "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
+ pd_len,
&((struct sockaddr_in6 *)(laddr))->sin6_addr,
ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
&((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (rv >= 0) {
rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
if (!rv) {
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id,
- qp_id(qp));
+ siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
siw_cep_set_free(cep);
return 0;
}
}
error:
- siw_dbg_qp(qp, "failed: %d\n", rv);
+ siw_dbg(id->device, "failed: %d\n", rv);
if (cep) {
siw_socket_disassoc(s);
@@ -1540,7 +1538,8 @@ error:
} else if (s) {
sock_release(s);
}
- siw_qp_put(qp);
+ if (qp)
+ siw_qp_put(qp);
return rv;
}
@@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep);
@@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
up_write(&qp->state_lock);
goto error;
}
- siw_dbg_cep(cep, "id 0x%p\n", id);
+ siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
params->ird > sdev->attrs.max_ird) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n",
- id, qp_id(qp), params->ord, sdev->attrs.max_ord,
+ "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
+ qp_id(qp), params->ord, sdev->attrs.max_ord,
params->ird, sdev->attrs.max_ird);
rv = -EINVAL;
up_write(&qp->state_lock);
@@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (params->private_data_len > max_priv_data) {
siw_dbg_cep(
cep,
- "id 0x%p, [QP %u]: private data length: %d (max %d)\n",
- id, qp_id(qp), params->private_data_len, max_priv_data);
+ "[QP %u]: private data length: %d (max %d)\n",
+ qp_id(qp), params->private_data_len, max_priv_data);
rv = -EINVAL;
up_write(&qp->state_lock);
goto error;
@@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
qp_attrs.flags = SIW_MPA_CRC;
qp_attrs.state = SIW_QP_STATE_RTS;
- siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp));
+ siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
/* Associate QP with CEP */
siw_cep_get(cep);
@@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
if (rv)
goto error;
- siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n",
- id, qp_id(qp), params->private_data_len);
+ siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
+ qp_id(qp), params->private_data_len);
rv = siw_send_mpareqrep(cep, params->private_data,
params->private_data_len);
@@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
siw_cancel_mpatimer(cep);
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
- siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
+ siw_dbg_cep(cep, "out of state\n");
siw_cep_set_free(cep);
siw_cep_put(cep); /* put last reference */
return -ECONNRESET;
}
- siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state,
+ siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
pd_len);
if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
sizeof(s_val));
if (rv) {
- siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv);
+ siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
if (rv) {
- siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv);
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
}
cep = siw_cep_alloc(sdev);
@@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
rv = siw_cm_alloc_work(cep, backlog);
if (rv) {
siw_dbg(id->device,
- "id 0x%p: alloc_work error %d, backlog %d\n", id,
+ "alloc_work error %d, backlog %d\n",
rv, backlog);
goto error;
}
rv = s->ops->listen(s, backlog);
if (rv) {
- siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv);
+ siw_dbg(id->device, "listen error %d\n", rv);
goto error;
}
cep->cm_id = id;
@@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
list_del(p);
- siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id,
- cep->state);
+ siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
siw_cep_set_inuse(cep);
@@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct net_device *dev = to_siw_dev(id->device)->netdev;
int rv = 0, listeners = 0;
- siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog);
+ siw_dbg(id->device, "backlog %d\n", backlog);
/*
* For each attached address of the interface, create a
@@ -1964,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in s_laddr, *s_raddr;
const struct in_ifaddr *ifa;
+ if (!in_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
s_raddr = (struct sockaddr_in *)&id->remote_addr;
siw_dbg(id->device,
- "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n",
- id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
+ "laddr %pI4:%d, raddr %pI4:%d\n",
+ &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
rtnl_lock();
@@ -1993,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
*s_raddr = &to_sockaddr_in6(id->remote_addr);
+ if (!in6_dev) {
+ rv = -ENODEV;
+ goto out;
+ }
siw_dbg(id->device,
- "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n",
- id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
+ "laddr %pI6:%d, raddr %pI6:%d\n",
+ &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
- read_lock_bh(&in6_dev->lock);
+ rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
- struct sockaddr_in6 bind_addr;
-
+ if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+ continue;
if (ipv6_addr_any(&s_laddr->sin6_addr) ||
ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
- bind_addr.sin6_family = AF_INET6;
- bind_addr.sin6_port = s_laddr->sin6_port;
- bind_addr.sin6_flowinfo = 0;
- bind_addr.sin6_addr = ifp->addr;
- bind_addr.sin6_scope_id = dev->ifindex;
+ struct sockaddr_in6 bind_addr = {
+ .sin6_family = AF_INET6,
+ .sin6_port = s_laddr->sin6_port,
+ .sin6_flowinfo = 0,
+ .sin6_addr = ifp->addr,
+ .sin6_scope_id = dev->ifindex };
rv = siw_listen_address(id, backlog,
(struct sockaddr *)&bind_addr,
@@ -2017,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
listeners++;
}
}
- read_unlock_bh(&in6_dev->lock);
-
+ rtnl_unlock();
in6_dev_put(in6_dev);
} else {
- return -EAFNOSUPPORT;
+ rv = -EAFNOSUPPORT;
}
+out:
if (listeners)
rv = 0;
else if (!rv)
rv = -EINVAL;
- siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK");
+ siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
return rv;
}
int siw_destroy_listen(struct iw_cm_id *id)
{
- siw_dbg(id->device, "id 0x%p\n", id);
-
if (!id->provider_data) {
- siw_dbg(id->device, "id 0x%p: no cep(s)\n", id);
+ siw_dbg(id->device, "no cep(s)\n");
return 0;
}
siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
wc->wc_flags = IB_WC_WITH_INVALIDATE;
}
wc->qp = cqe->base_qp;
- siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n",
+ siw_dbg_cq(cq,
+ "idx %u, type %d, flags %2x, id 0x%pK\n",
cq->cq_get % cq->num_cqe, cqe->opcode,
- cqe->flags, (void *)cqe->id);
+ cqe->flags, (void *)(uintptr_t)cqe->id);
}
WRITE_ONCE(cqe->flags, 0);
cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
*/
if (addr < mem->va || addr + len > mem->va + mem->len) {
siw_dbg_pd(pd, "MEM interval len %d\n", len);
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n",
- (unsigned long long)addr,
- (unsigned long long)(addr + len));
- siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n",
- (unsigned long long)mem->va,
- (unsigned long long)(mem->va + mem->len),
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
+ (void *)(uintptr_t)addr,
+ (void *)(uintptr_t)(addr + len));
+ siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
+ (void *)(uintptr_t)mem->va,
+ (void *)(uintptr_t)(mem->va + mem->len),
mem->stag);
return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
* Optionally, provides remaining len within current element, and
* current PBL index for later resume at same element.
*/
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
{
int i = idx ? *idx : 0;
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
void siw_umem_release(struct siw_umem *umem, bool dirty);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
-u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
+dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 0990307c5d2c..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -949,7 +949,7 @@ skip_irq:
rv = -EINVAL;
goto out;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
wqe->sqe.sge[0].lkey = 0;
wqe->sqe.num_sge = 1;
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
p = siw_get_upage(umem, dest_addr);
if (unlikely(!p)) {
- pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
+ pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
__func__, qp_id(rx_qp(srx)),
- (void *)dest_addr, (void *)umem->fp_addr);
+ (void *)(uintptr_t)dest_addr,
+ (void *)(uintptr_t)umem->fp_addr);
/* siw internal error */
srx->skb_copied += copied;
srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
pg_off = dest_addr & ~PAGE_MASK;
bytes = min(len, (int)PAGE_SIZE - pg_off);
- siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
+ siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
dest = kmap_atomic(p);
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
{
int rv;
- siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
+ siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
if (unlikely(rv)) {
- pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
+ pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
qp_id(rx_qp(srx)), __func__, len, kva, rv);
return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
while (len) {
int bytes;
- u64 buf_addr =
+ dma_addr_t buf_addr =
siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
if (!buf_addr)
break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
mem_p = *mem;
if (mem_p->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(sge->laddr + frx->sge_off),
- sge_bytes);
+ (void *)(uintptr_t)(sge->laddr + frx->sge_off),
+ sge_bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem,
sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
if (mem->mem_obj == NULL)
rv = siw_rx_kva(srx,
- (void *)(srx->ddp_to + srx->fpdu_part_rcvd),
- bytes);
+ (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
+ bytes);
else if (!mem->is_pbl)
rv = siw_rx_umem(srx, mem->umem,
srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
bytes = min(srx->fpdu_part_rem, srx->skb_new);
if (mem_p->mem_obj == NULL)
- rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed),
- bytes);
+ rv = siw_rx_kva(srx,
+ (void *)(uintptr_t)(sge->laddr + wqe->processed),
+ bytes);
else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index eb83fe183318..8e72f955921d 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
{
struct siw_pbl *pbl = mem->pbl;
u64 offset = addr - mem->va;
- u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
+ dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
/*
* Copy short payload at provided destination payload address
*/
-static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
+static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
{
struct siw_wqe *wqe = &c_tx->wqe_active;
struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
return 0;
if (tx_flags(wqe) & SIW_WQE_INLINE) {
- memcpy((void *)paddr, &wqe->sqe.sge[1], bytes);
+ memcpy(paddr, &wqe->sqe.sge[1], bytes);
} else {
struct siw_mem *mem = wqe->mem[0];
if (!mem->mem_obj) {
/* Kernel client using kva */
- memcpy((void *)paddr, (void *)sge->laddr, bytes);
+ memcpy(paddr,
+ (const void *)(uintptr_t)sge->laddr, bytes);
} else if (c_tx->in_syscall) {
- if (copy_from_user((void *)paddr,
- (const void __user *)sge->laddr,
+ if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
bytes))
return -EFAULT;
} else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
buffer = kmap_atomic(p);
if (likely(PAGE_SIZE - off >= bytes)) {
- memcpy((void *)paddr, buffer + off, bytes);
+ memcpy(paddr, buffer + off, bytes);
kunmap_atomic(buffer);
} else {
unsigned long part = bytes - (PAGE_SIZE - off);
- memcpy((void *)paddr, buffer + off, part);
+ memcpy(paddr, buffer + off, part);
kunmap_atomic(buffer);
if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
return -EFAULT;
buffer = kmap_atomic(p);
- memcpy((void *)(paddr + part), buffer,
+ memcpy(paddr + part, buffer,
bytes - part);
kunmap_atomic(buffer);
}
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_send);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
crc = (char *)&c_tx->pkt.send_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
crc = (char *)&c_tx->pkt.write_pkt.crc;
- data = siw_try_1seg(c_tx, (u64)crc);
+ data = siw_try_1seg(c_tx, crc);
break;
default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
-static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
+static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
{
- if (hdr_len) {
- ++pages;
- --num_maps;
- }
- while (num_maps-- > 0) {
- kunmap(*pages);
- pages++;
+ while (kmap_mask) {
+ if (kmap_mask & BIT(0))
+ kunmap(*pp);
+ pp++;
+ kmap_mask >>= 1;
}
}
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
pbl_idx = c_tx->pbl_idx;
+ unsigned long kmap_mask = 0L;
if (c_tx->state == SIW_SEND_HDR) {
if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
mem = wqe->mem[sge_idx];
- if (!mem->mem_obj)
- is_kva = 1;
+ is_kva = mem->mem_obj == NULL ? 1 : 0;
} else {
is_kva = 1;
}
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
* tx from kernel virtual address: either inline data
* or memory region with assigned kernel buffer
*/
- iov[seg].iov_base = (void *)(sge->laddr + sge_off);
+ iov[seg].iov_base =
+ (void *)(uintptr_t)(sge->laddr + sge_off);
iov[seg].iov_len = sge_len;
if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
p = siw_get_upage(mem->umem,
sge->laddr + sge_off);
if (unlikely(!p)) {
- if (hdr_len)
- seg--;
- if (!c_tx->use_sendpage && seg) {
- siw_unmap_pages(page_array,
- hdr_len, seg);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT;
goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (!c_tx->use_sendpage) {
iov[seg].iov_base = kmap(p) + fp_off;
iov[seg].iov_len = plen;
+
+ /* Remember for later kunmap() */
+ kmap_mask |= BIT(seg);
+
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
@@ -527,13 +525,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap(p);
}
} else {
- u64 pa = ((sge->laddr + sge_off) & PAGE_MASK);
+ u64 va = sge->laddr + sge_off;
- page_array[seg] = virt_to_page(pa);
+ page_array[seg] = virt_to_page(va & PAGE_MASK);
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(sge->laddr + sge_off),
+ (void *)(uintptr_t)va,
plen);
}
@@ -544,10 +542,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
- if (!is_kva && !c_tx->use_sendpage) {
- siw_unmap_pages(page_array, hdr_len,
- seg - 1);
- }
+ siw_unmap_pages(page_array, kmap_mask);
wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE;
goto done_crc;
@@ -598,8 +593,7 @@ sge_done:
} else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len);
- if (!is_kva)
- siw_unmap_pages(page_array, hdr_len, seg);
+ siw_unmap_pages(page_array, kmap_mask);
}
if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */
@@ -830,7 +824,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
rv = -EINVAL;
goto tx_error;
}
- wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
+ wqe->sqe.sge[0].laddr =
+ (u64)(uintptr_t)&wqe->sqe.sge[1];
}
}
wqe->wr_status = SIW_WR_INPROGRESS;
@@ -925,7 +920,7 @@ tx_error:
static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{
- struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr;
+ struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
int rv = 0;
@@ -955,8 +950,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
mem->stag = sqe->rkey;
mem->perms = sqe->access;
- siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n",
- mem->va, base_mr->iova);
+ siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
mem->va = base_mr->iova;
mem->stag_valid = 1;
out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 03176a3d1e18..869e02b69a01 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -425,8 +425,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
*/
qp->srq = to_siw_srq(attrs->srq);
qp->attrs.rq_size = 0;
- siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n",
- qp->qp_num, qp->srq);
+ siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
} else if (num_rqe) {
if (qp->kernel_verbs)
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -611,7 +610,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
base_ucontext);
struct siw_qp_attrs qp_attrs;
- siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep);
+ siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
/*
* Mark QP as in process of destruction to prevent from
@@ -663,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
void *kbuf = &sqe->sge[1];
int num_sge = core_wr->num_sge, bytes = 0;
- sqe->sge[0].laddr = (u64)kbuf;
+ sqe->sge[0].laddr = (uintptr_t)kbuf;
sqe->sge[0].lkey = 0;
while (num_sge--) {
@@ -826,7 +825,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
break;
case IB_WR_REG_MR:
- sqe->base_mr = (uint64_t)reg_wr(wr)->mr;
+ sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
sqe->rkey = reg_wr(wr)->key;
sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
sqe->opcode = SIW_OP_REG_MR;
@@ -843,8 +842,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
rv = -EINVAL;
break;
}
- siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
- sqe->opcode, sqe->flags, (void *)sqe->id);
+ siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
+ sqe->opcode, sqe->flags,
+ (void *)(uintptr_t)sqe->id);
if (unlikely(rv < 0))
break;
@@ -1206,8 +1206,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
int rv;
- siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n",
- (unsigned long long)start, (unsigned long long)rnic_va,
+ siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
+ (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
(unsigned long long)len);
if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1364,7 +1364,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
struct siw_mem *mem = mr->mem;
struct siw_pbl *pbl = mem->pbl;
struct siw_pble *pble;
- u64 pbl_size;
+ unsigned long pbl_size;
int i, rv;
if (!pbl) {
@@ -1403,16 +1403,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
pbl_size += sg_dma_len(slp);
}
siw_dbg_mem(mem,
- "sge[%d], size %llu, addr 0x%016llx, total %llu\n",
- i, pble->size, pble->addr, pbl_size);
+ "sge[%d], size %u, addr 0x%p, total %lu\n",
+ i, pble->size, (void *)(uintptr_t)pble->addr,
+ pbl_size);
}
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
if (rv > 0) {
mem->len = base_mr->length;
mem->va = base_mr->iova;
siw_dbg_mem(mem,
- "%llu bytes, start 0x%016llx, %u SLE to %u entries\n",
- mem->len, mem->va, num_sle, pbl->num_buf);
+ "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
+ mem->len, (void *)(uintptr_t)mem->va, num_sle,
+ pbl->num_buf);
}
return rv;
}
@@ -1530,7 +1532,7 @@ int siw_create_srq(struct ib_srq *base_srq,
}
spin_lock_init(&srq->lock);
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
return 0;
@@ -1651,8 +1653,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
if (unlikely(!srq->kernel_verbs)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: no kernel post_recv for mapped srq\n",
- srq);
+ "[SRQ]: no kernel post_recv for mapped srq\n");
rv = -EINVAL;
goto out;
}
@@ -1674,8 +1675,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
}
if (unlikely(wr->num_sge > srq->max_sge)) {
siw_dbg_pd(base_srq->pd,
- "[SRQ 0x%p]: too many sge's: %d\n", srq,
- wr->num_sge);
+ "[SRQ]: too many sge's: %d\n", wr->num_sge);
rv = -EINVAL;
break;
}
@@ -1694,7 +1694,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&srq->lock, flags);
out:
if (unlikely(rv < 0)) {
- siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv);
+ siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
*bad_wr = wr;
}
return rv;