From da343b6d90e11132f1e917d865d88ee35d6e6d00 Mon Sep 17 00:00:00 2001 From: Sergey Gorenko Date: Sun, 25 Feb 2018 13:39:48 +0200 Subject: IB/mlx5: Fix incorrect size of klms in the memory region The value of mr->ndescs greater than mr->max_descs is set in the function mlx5_ib_sg_to_klms() if sg_nents is greater than mr->max_descs. This is an invalid value and it causes the following error when registering mr: mlx5_0:dump_cqe:276:(pid 193): dump error cqe 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000030: 00 00 00 00 0f 00 78 06 25 00 00 8b 08 1e 8f d3 Cc: # 4.5 Fixes: b005d3164713 ("mlx5: Add arbitrary sg list support") Signed-off-by: Sergey Gorenko Tested-by: Laurence Oberman Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..1961c6a45437 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; - mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { if (unlikely(i >= mr->max_descs)) @@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, sg_offset = 0; } + mr->ndescs = i; if (sg_offset_p) *sg_offset_p = sg_offset; -- cgit v1.2.3-59-g8ed1b From e7b169f34403becd3c9fd3b6e46614ab788f2187 Mon Sep 17 00:00:00 2001 From: Noa Osherovich Date: Sun, 25 Feb 2018 13:39:51 +0200 Subject: IB/mlx5: Avoid passing an invalid QP type to firmware During QP creation, the mlx5 driver translates the QP type to an internal value which is passed on to FW. There was no check to make sure that the translated value is valid, and -EINVAL was coerced into the mailbox command. Current firmware refuses this as an invalid QP type, but future/past firmware may do something else. Fixes: 09a7d9eca1a6c ('{net,IB}/mlx5: QP/XRCD commands via mlx5 ifc') Reviewed-by: Ilya Lesokhin Signed-off-by: Noa Osherovich Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/qp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..e8d7eaf0670c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 uidx = MLX5_IB_DEFAULT_UIDX; struct mlx5_ib_create_qp ucmd; struct mlx5_ib_qp_base *base; + int mlx5_st; void *qpc; u32 *in; int err; @@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); + mlx5_st = to_mlx5_st(init_attr->qp_type); + if (mlx5_st < 0) + return -EINVAL; + if (init_attr->rwq_ind_tbl) { if (!udata) return -ENOSYS; @@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); - MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); + MLX5_SET(qpc, qpc, st, mlx5_st); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) -- cgit v1.2.3-59-g8ed1b From aba462134634b502d720e15b23154f21cfa277e5 Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Sun, 25 Feb 2018 13:39:53 +0200 Subject: {net, IB}/mlx5: Raise fatal IB event when sys error occurs All other mlx5_events report the port number as 1 based, which is how FW reports it in the port event EQE. Reporting 0 for this event causes mlx5_ib to not raise a fatal event notification to registered clients due to a seemingly invalid port. All switch cases in mlx5_ib_event that go through the port check are supposed to set the port now, so just do it once at variable declaration. Fixes: 89d44f0a6c73("net/mlx5_core: Add pci error handlers to mlx5_core driver") Reviewed-by: Majd Dibbiny Signed-off-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 11 ++--------- drivers/net/ethernet/mellanox/mlx5/core/health.c | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..bab38c6647d7 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3263,7 +3263,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) struct mlx5_ib_dev *ibdev; struct ib_event ibev; bool fatal = false; - u8 port = 0; + u8 port = (u8)work->param; if (mlx5_core_is_mp_slave(work->dev)) { ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3283,8 +3283,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_DEV_EVENT_PORT_INITIALIZED: - port = (u8)work->param; - /* In RoCE, port up/down events are handled in * mlx5_netdev_event(). */ @@ -3298,24 +3296,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) case MLX5_DEV_EVENT_LID_CHANGE: ibev.event = IB_EVENT_LID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; - port = (u8)work->param; - schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_DEV_EVENT_GUID_CHANGE: ibev.event = IB_EVENT_GID_CHANGE; - port = (u8)work->param; break; case MLX5_DEV_EVENT_CLIENT_REREG: ibev.event = IB_EVENT_CLIENT_REREGISTER; - port = (u8)work->param; break; case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3327,7 +3320,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) ibev.device = &ibdev->ib_dev; ibev.element.port_num = port; - if (port < 1 || port > ibdev->num_ports) { + if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) trigger_cmd_completions(dev); } - mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); + mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); mlx5_core_err(dev, "end\n"); unlock: -- cgit v1.2.3-59-g8ed1b From 65389322b28f81cc137b60a41044c2d958a7b950 Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Sun, 25 Feb 2018 13:39:54 +0200 Subject: IB/mlx: Set slid to zero in Ethernet completion struct IB spec says that a lid should be ignored when link layer is Ethernet, for example when building or parsing a CM request message (CA17-34). However, since ib_lid_be16() and ib_lid_cpu16() validates the slid, not only when link layer is IB, we set the slid to zero to prevent false warnings in the kernel log. Fixes: 62ede7779904 ("Add OPA extended LID support") Reviewed-by: Majd Dibbiny Signed-off-by: Moni Shoua Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/cq.c | 4 +++- drivers/infiniband/hw/mlx5/cq.c | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct wc->dlid_path_bits = 0; if (is_eth) { + wc->slid = 0; wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); @@ -851,7 +852,6 @@ repoll: } } - wc->slid = be16_to_cpu(cqe->rlid); g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); wc->src_qp = g_mlpath_rqpn & 0xffffff; wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; @@ -860,6 +860,7 @@ repoll: wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (is_eth) { + wc->slid = 0; wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_CVLAN_PRESENT_MASK) { @@ -871,6 +872,7 @@ repoll: memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); } else { + wc->slid = be16_to_cpu(cqe->rlid); wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; wc->vlan_id = 0xffff; } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..b5cfdaa9c7c8 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); break; } - wc->slid = be16_to_cpu(cqe->slid); wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; wc->dlid_path_bits = cqe->ml_path; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, } if (ll != IB_LINK_LAYER_ETHERNET) { + wc->slid = be16_to_cpu(cqe->slid); wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; return; } + wc->slid = 0; vlan_present = cqe->l4_l3_hdr_type & 0x1; roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; if (vlan_present) { -- cgit v1.2.3-59-g8ed1b From 2fb4f4eadd180a50112618dd9c5fef7fc50d4f08 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 25 Feb 2018 13:39:56 +0200 Subject: IB/core: Fix missing RDMA cgroups release in case of failure to register device During IB device registration process, if query_device() fails or if ib_core fails to registers sysfs entries, rdma cgroup cleanup is skipped. Cc: # v4.2+ Fixes: 4be3a4fa51f4 ("IB/core: Fix kernel crash during fail to initialize device") Reviewed-by: Daniel Jurgens Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/device.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, ret = device->query_device(device, &device->attrs, &uhw); if (ret) { pr_warn("Couldn't query the device attributes\n"); - goto cache_cleanup; + goto cg_cleanup; } ret = ib_device_register_sysfs(device, port_callback); if (ret) { pr_warn("Couldn't register device %s with driver model\n", device->name); - goto cache_cleanup; + goto cg_cleanup; } device->reg_state = IB_DEV_REGISTERED; @@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, mutex_unlock(&device_mutex); return 0; +cg_cleanup: + ib_device_unregister_rdmacg(device); cache_cleanup: ib_cache_cleanup_one(device); ib_cache_release_one(device); -- cgit v1.2.3-59-g8ed1b From a45bc17b360d75fac9ced85e99fda14bf38b4dc3 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Mon, 26 Feb 2018 01:51:37 -0800 Subject: RDMA/bnxt_re: Unconditionly fence non wire memory operations HW requires an unconditonal fence for all non-wire memory operations through SQ. This guarantees the completions of these memory operations. Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 643174d949a8..755f1ccd82bb 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -2227,10 +2227,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; + /* Need unconditional fence for local invalidate + * opcode to work as expected. + */ + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; - if (wr->send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; if (wr->send_flags & IB_SEND_SOLICITED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; @@ -2251,8 +2254,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, wqe->frmr.levels = qplib_frpl->hwq.level + 1; wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; - if (wr->wr.send_flags & IB_SEND_FENCE) - wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + /* Need unconditional fence for reg_mr + * opcode to function as expected. + */ + + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + if (wr->wr.send_flags & IB_SEND_SIGNALED) wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; -- cgit v1.2.3-59-g8ed1b From c354dff00db8df80f271418d8392065e10ffffb6 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Mon, 26 Feb 2018 01:51:38 -0800 Subject: RDMA/bnxt_re: Fix incorrect DB offset calculation To support host systems with non 4K page size, l2_db_size shall be calculated with 4096 instead of PAGE_SIZE. Also, supply the host page size to FW during initialization. Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 6 +++++- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 1 + drivers/infiniband/hw/bnxt_re/qplib_sp.c | 3 ++- drivers/infiniband/hw/bnxt_re/roce_hsi.h | 25 ++++++++++++++++++++++++- 4 files changed, 32 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..14d153d4013c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -460,7 +460,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); - + /* Supply (log-base-2-of-host-page-size - base-page-shift) + * to bono to adjust the doorbell page sizes. + */ + req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - + RCFW_DBR_BASE_PAGE_SHIFT); /* * VFs need not setup the HW context area, PF * shall setup this area for VF. Skipping the diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -49,6 +49,7 @@ #define RCFW_COMM_SIZE 0x104 #define RCFW_DBR_PCI_BAR_REGION 2 +#define RCFW_DBR_BASE_PAGE_SHIFT 12 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ do { \ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 03057983341f..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_pkey = le32_to_cpu(sb->max_pkeys); attr->max_inline_data = le32_to_cpu(sb->max_inline_data); - attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; + attr->l2_db_size = (sb->l2_db_space_size + 1) * + (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = le32_to_cpu(sb->max_gid); bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) - __le16 reserved16; + /* This value is (log-base-2-of-DBR-page-size - 12). + * 0 for 4KB. HW supported values are enumerated below. + */ + __le16 log2_dbr_pg_size; + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL + #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ + CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M __le64 qpc_page_dir; __le64 mrw_page_dir; __le64 srq_page_dir; -- cgit v1.2.3-59-g8ed1b From 497158aa5f520db50452ef928c0f955cb42f2e77 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Mon, 26 Feb 2018 01:51:39 -0800 Subject: RDMA/bnxt_re: Fix the ib_reg failure cleanup Release the netdev references in the cleanup path. Invokes the cleanup routines if bnxt_re_ib_reg fails. Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 33a448036c2e..604c805ceaa7 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1416,9 +1416,12 @@ static void bnxt_re_task(struct work_struct *work) switch (re_work->event) { case NETDEV_REGISTER: rc = bnxt_re_ib_reg(rdev); - if (rc) + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to register with IB: %#x", rc); + bnxt_re_remove_one(rdev); + bnxt_re_dev_unreg(rdev); + } break; case NETDEV_UP: bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, -- cgit v1.2.3-59-g8ed1b From 4cd482c12be473ae507eba232a8374c798233e42 Mon Sep 17 00:00:00 2001 From: Muneendra Kumar M Date: Tue, 27 Feb 2018 21:51:49 -0800 Subject: IB/core : Add null pointer check in addr_resolve dev_get_by_index is being called in addr_resolve function which returns NULL and NULL pointer access leads to kernel crash. Following call trace is observed while running rdma_lat test application [ 146.173149] BUG: unable to handle kernel NULL pointer dereference at 00000000000004a0 [ 146.173198] IP: addr_resolve+0x9e/0x3e0 [ib_core] [ 146.173221] PGD 0 P4D 0 [ 146.173869] Oops: 0000 [#1] SMP PTI [ 146.182859] CPU: 8 PID: 127 Comm: kworker/8:1 Tainted: G O 4.15.0-rc6+ #18 [ 146.183758] Hardware name: LENOVO System x3650 M5: -[8871AC1]-/01KN179, BIOS-[TCE132H-2.50]- 10/11/2017 [ 146.184691] Workqueue: ib_cm cm_work_handler [ib_cm] [ 146.185632] RIP: 0010:addr_resolve+0x9e/0x3e0 [ib_core] [ 146.186584] RSP: 0018:ffffc9000362faa0 EFLAGS: 00010246 [ 146.187521] RAX: 000000000000001b RBX: ffffc9000362fc08 RCX: 0000000000000006 [ 146.188472] RDX: 0000000000000000 RSI: 0000000000000096 RDI : ffff88087fc16990 [ 146.189427] RBP: ffffc9000362fb18 R08: 00000000ffffff9d R09: 00000000000004ac [ 146.190392] R10: 00000000000001e7 R11: 0000000000000001 R12: ffff88086af2e090 [ 146.191361] R13: 0000000000000000 R14: 0000000000000001 R15: 00000000ffffff9d [ 146.192327] FS: 0000000000000000(0000) GS:ffff88087fc00000(0000) knlGS:0000000000000000 [ 146.193301] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 146.194274] CR2: 00000000000004a0 CR3: 000000000220a002 CR4: 00000000003606e0 [ 146.195258] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 146.196256] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 146.197231] Call Trace: [ 146.198209] ? rdma_addr_register_client+0x30/0x30 [ib_core] [ 146.199199] rdma_resolve_ip+0x1af/0x280 [ib_core] [ 146.200196] rdma_addr_find_l2_eth_by_grh+0x154/0x2b0 [ib_core] The below patch adds the missing NULL pointer check returned by dev_get_by_index before accessing the netdev to avoid kernel crash. We observed the below crash when we try to do the below test. server client --------- --------- |1.1.1.1|<----rxe-channel--->|1.1.1.2| --------- --------- On server: rdma_lat -c -n 2 -s 1024 On client:rdma_lat 1.1.1.1 -c -n 2 -s 1024 Fixes: 200298326b27 ("IB/core: Validate route when we init ah") Signed-off-by: Muneendra Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/addr.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, dst_release(dst); } - if (ndev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip(dst_in, addr); - /* - * Put the loopback device and get the translated - * device instead. - */ + if (ndev) { + if (ndev->flags & IFF_LOOPBACK) + ret = rdma_translate_ip(dst_in, addr); + else + addr->bound_dev_if = ndev->ifindex; dev_put(ndev); - ndev = dev_get_by_index(addr->net, addr->bound_dev_if); - } else { - addr->bound_dev_if = ndev->ifindex; } - dev_put(ndev); return ret; } -- cgit v1.2.3-59-g8ed1b From 11052696fdbf673d422e92e6149eaad78ae0c252 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Mon, 5 Mar 2018 10:50:08 +0200 Subject: RDMA/qedr: Fix ipv6 destination address resolution The wrong parameter was passed to dst_neigh_lookup Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..1fc97157def8 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, } return -EINVAL; } - neigh = dst_neigh_lookup(dst, &dst_in); - + neigh = dst_neigh_lookup(dst, &fl6.daddr); if (neigh) { rcu_read_lock(); if (neigh->nud_state & NUD_VALID) { -- cgit v1.2.3-59-g8ed1b From ea0ed47803df93d0904b838d6b5afceec3ad0ba4 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Mon, 5 Mar 2018 10:50:09 +0200 Subject: RDMA/qedr: Fix iWARP connect with port mapper Fix iWARP connect and listen to use the mapped port for ipv4 and ipv6. Without this fixed, running on a server that has iwpmd enabled will not use the correct port Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 1fc97157def8..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -493,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) qp = idr_find(&dev->qpidr, conn_param->qpn); - laddr = (struct sockaddr_in *)&cm_id->local_addr; - raddr = (struct sockaddr_in *)&cm_id->remote_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; - raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + + DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", + ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), + ntohs(raddr->sin_port)); DP_DEBUG(dev, QEDR_MSG_IWARP, "Connect source address: %pISpc, remote address: %pISpc\n", @@ -598,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) int rc; int i; - laddr = (struct sockaddr_in *)&cm_id->local_addr; - laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; DP_DEBUG(dev, QEDR_MSG_IWARP, "Create Listener address: %pISpc\n", &cm_id->local_addr); -- cgit v1.2.3-59-g8ed1b From e3fd112cbf21d049faf64ba1471d72b93c22109a Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Mon, 5 Mar 2018 10:50:10 +0200 Subject: RDMA/qedr: Fix kernel panic when running fio over NFSoRDMA Race in qedr_poll_cq, lastest_cqe wasn't protected by lock, leading to a case where two context's accessing poll_cq at the same time lead to one of them having a pointer to an old latest_cqe and reading an invalid cqe element Signed-off-by: Amit Radzi Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..102b9e0efe9a 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3724,7 +3724,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); - union rdma_cqe *cqe = cq->latest_cqe; + union rdma_cqe *cqe; u32 old_cons, new_cons; unsigned long flags; int update = 0; @@ -3741,6 +3741,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) return qedr_gsi_poll_cq(ibcq, num_entries, wc); spin_lock_irqsave(&cq->cq_lock, flags); + cqe = cq->latest_cqe; old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); while (num_entries && is_valid_cqe(cq, cqe)) { struct qedr_qp *qp; -- cgit v1.2.3-59-g8ed1b From 551e1c67b4207455375a2e7a285dea1c7e8fc361 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Mon, 5 Mar 2018 10:50:11 +0200 Subject: RDMA/qedr: Fix iWARP write and send with immediate iWARP does not support RDMA WRITE or SEND with immediate data. Driver should check this before submitting to FW and return an immediate error Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 102b9e0efe9a..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; swqe = (struct rdma_sq_send_wqe_1st *)wqe; swqe->wqe_size = 2; @@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_RDMA_WRITE_WITH_IMM: + if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { + rc = -EINVAL; + *bad_wr = wr; + break; + } wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; -- cgit v1.2.3-59-g8ed1b From 0077416a3d529baccbe07ab3242e8db541cfadf6 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 5 Mar 2018 20:09:45 +0200 Subject: IB/mlx4: Fix corruption of RoCEv2 IPv4 GIDs When using IPv4 addresses in RoCEv2, the GID format for the mapped IPv4 address should be: ::ffff:<4-byte IPv4 address>. In the cited commit, IPv4 mapped IPV6 addresses had the 3 upper dwords zeroed out by memset, which resulted in deleting the ffff field. However, since procedure ipv6_addr_v4mapped() already verifies that the gid has format ::ffff:, no change is needed for the gid, and the memset can simply be removed. Fixes: 7e57b85c444c ("IB/mlx4: Add support for setting RoCEv2 gids in hardware") Reviewed-by: Moni Shoua Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..90790818d655 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, gid_tbl[i].version = 2; if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) gid_tbl[i].type = 1; - else - memset(&gid_tbl[i].gid, 0, 12); } } -- cgit v1.2.3-59-g8ed1b From a18177925c252da7801149abe217c05b80884798 Mon Sep 17 00:00:00 2001 From: Jack M Date: Mon, 5 Mar 2018 20:09:46 +0200 Subject: IB/mlx4: Include GID type when deleting GIDs from HW table under RoCE The commit cited below added a gid_type field (RoCEv1 or RoCEv2) to GID properties. When adding GIDs, this gid_type field was copied over to the hardware gid table. However, when deleting GIDs, the gid_type field was not copied over to the hardware gid table. As a result, when running RoCEv2, all RoCEv2 gids in the hardware gid table were set to type RoCEv1 when any gid was deleted. This problem would persist until the next gid was added (which would again restore the gid_type field for all the gids in the hardware gid table). Fix this by copying over the gid_type field to the hardware gid table when deleting gids, so that the gid_type of all remaining gids is preserved when a gid is deleted. Fixes: b699a859d17b ("IB/mlx4: Add gid_type to GID properties") Reviewed-by: Moni Shoua Signed-off-by: Jack Morgenstein Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 90790818d655..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -364,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, if (!gids) { ret = -ENOMEM; } else { - for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) - memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); + for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { + memcpy(&gids[i].gid, + &port_gid_table->gids[i].gid, + sizeof(union ib_gid)); + gids[i].gid_type = + port_gid_table->gids[i].gid_type; + } } } spin_unlock_bh(&iboe->lock); -- cgit v1.2.3-59-g8ed1b From 210b1f78076f88cad25b333fffafbac6ae870fcc Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Mon, 5 Mar 2018 20:09:47 +0200 Subject: IB/mlx5: When not in dual port RoCE mode, use provided port as native The series that introduced dual port RoCE mode assumed that we don't have a dual port HCA that use the mlx5 driver, this is not the case for Connect-IB HCAs. This reasoning led to assigning 1 as the native port index which causes issue when the second port is used. For example query_pkey() when called on the second port will return values of the first port. Make sure that we assign the right port index as the native port index. Fixes: 32f69e4be269 ("{net, IB}/mlx5: Manage port association for multiport RoCE") Reviewed-by: Daniel Jurgens Signed-off-by: Mark Bloch Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index bab38c6647d7..033b6af90de9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_port *port; + if (!mlx5_core_mp_enabled(ibdev->mdev) || + ll != IB_LINK_LAYER_ETHERNET) { + if (native_port_num) + *native_port_num = ib_port_num; + return ibdev->mdev; + } + if (native_port_num) *native_port_num = 1; - if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) - return ibdev->mdev; - port = &ibdev->port[ib_port_num - 1]; if (!port) return NULL; -- cgit v1.2.3-59-g8ed1b From 5d414b178e950ce9685c253994cc730893d5d887 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 6 Mar 2018 13:00:31 +0300 Subject: IB/mlx5: Fix an error code in __mlx5_ib_modify_qp() "err" is either zero or possibly uninitialized here. It should be -EINVAL. Fixes: 427c1e7bcd7e ("{IB, net}/mlx5: Move the modify QP operation table to mlx5_ib") Signed-off-by: Dan Carpenter Acked-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/qp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e8d7eaf0670c..36197fbac63a 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3100,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, goto out; if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || - !optab[mlx5_cur][mlx5_new]) + !optab[mlx5_cur][mlx5_new]) { + err = -EINVAL; goto out; + } op = optab[mlx5_cur][mlx5_new]; optpar = ib_mask_to_mlx5_opt(attr_mask); -- cgit v1.2.3-59-g8ed1b From d3b9e8ad425cfd5b9116732e057f1b48e4d3bcb8 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Mon, 5 Mar 2018 20:09:48 +0200 Subject: RDMA/core: Reduce poll batch for direct cq polling Fix warning limit for kernel stack consumption: drivers/infiniband/core/cq.c: In function 'ib_process_cq_direct': drivers/infiniband/core/cq.c:78:1: error: the frame size of 1032 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] Using smaller ib_wc array on the stack brings us comfortably below that limit again. Fixes: 246d8b184c10 ("IB/cq: Don't force IB_POLL_DIRECT poll context for ib_process_cq_direct") Reported-by: Arnd Bergmann Reviewed-by: Sergey Gorenko Signed-off-by: Max Gurtovoy Signed-off-by: Leon Romanovsky Reviewed-by: Bart Van Assche Acked-by: Arnd Bergmann Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cq.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -17,6 +17,7 @@ /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 +#define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 @@ -25,18 +26,18 @@ #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) -static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, + int batch) { int i, n, completed = 0; - struct ib_wc *wcs = poll_wc ? : cq->wc; /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ - while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, - budget - completed), wcs)) > 0) { + while ((n = ib_poll_cq(cq, min_t(u32, batch, + budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; @@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) completed += n; - if (n != IB_POLL_BATCH || - (budget != -1 && completed >= budget)) + if (n != batch || (budget != -1 && completed >= budget)) break; } @@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { - struct ib_wc wcs[IB_POLL_BATCH]; + struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; - return __ib_process_cq(cq, budget, wcs); + return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); @@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) struct ib_cq *cq = container_of(iop, struct ib_cq, iop); int completed; - completed = __ib_process_cq(cq, budget, NULL); + completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) @@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; - completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); + completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, + IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(ib_comp_wq, &cq->work); -- cgit v1.2.3-59-g8ed1b From 942c9b6ca8de5b7ad675e9b2e0e964449c10c18a Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Mon, 5 Mar 2018 21:49:28 -0800 Subject: RDMA/bnxt_re: Avoid Hard lockup during error CQE processing Hitting the following hardlockup due to a race condition in error CQE processing. [26146.879798] bnxt_en 0000:04:00.0: QPLIB: FP: CQ Processed Req [26146.886346] bnxt_en 0000:04:00.0: QPLIB: wr_id[1251] = 0x0 with status 0xa [26156.350935] NMI watchdog: Watchdog detected hard LOCKUP on cpu 4 [26156.357470] Modules linked in: nfsd auth_rpcgss nfs_acl lockd grace [26156.447957] CPU: 4 PID: 3413 Comm: kworker/4:1H Kdump: loaded [26156.457994] Hardware name: Dell Inc. PowerEdge R430/0CN7X8, [26156.466390] Workqueue: ib-comp-wq ib_cq_poll_work [ib_core] [26156.472639] Call Trace: [26156.475379] [] dump_stack+0x19/0x1b [26156.481833] [] watchdog_overflow_callback+0x135/0x140 [26156.489341] [] __perf_event_overflow+0x57/0x100 [26156.496256] [] perf_event_overflow+0x14/0x20 [26156.502887] [] intel_pmu_handle_irq+0x220/0x510 [26156.509813] [] perf_event_nmi_handler+0x31/0x50 [26156.516738] [] nmi_handle.isra.0+0x8c/0x150 [26156.523273] [] do_nmi+0x218/0x460 [26156.528834] [] end_repeat_nmi+0x1e/0x7e [26156.534980] [] ? native_queued_spin_lock_slowpath+0x1d0/0x200 [26156.543268] [] ? native_queued_spin_lock_slowpath+0x1d0/0x200 [26156.551556] [] ? native_queued_spin_lock_slowpath+0x1d0/0x200 [26156.559842] [] queued_spin_lock_slowpath+0xb/0xf [26156.567555] [] _raw_spin_lock+0x20/0x30 [26156.573696] [] bnxt_qplib_lock_buddy_cq+0x31/0x40 [bnxt_re] [26156.581789] [] bnxt_qplib_poll_cq+0x43a/0xf10 [bnxt_re] [26156.589493] [] bnxt_re_poll_cq+0x9b/0x760 [bnxt_re] The issue happens if RQ poll_cq or SQ poll_cq or Async error event tries to put the error QP in flush list. Since SQ and RQ of each error qp are added to two different flush list, we need to protect it using locks of corresponding CQs. Difference in order of acquiring the lock in SQ poll_cq and RQ poll_cq can cause a hard lockup. Revisits the locking strategy and removes the usage of qplib_cq.hwq.lock. Instead of this lock, introduces qplib_cq.flush_lock to handle addition/deletion of QPs in flush list. Also, always invoke the flush_lock in order (SQ CQ lock first and then RQ CQ lock) to avoid any potential deadlock. Other than the poll_cq context, the movement of QP to/from flush list can be done in modify_qp context or from an async error event from HW. Synchronize these operations using the bnxt_re verbs layer CQ locks. To achieve this, adds a call back to the HW abstraction layer(qplib) to bnxt_re ib_verbs layer in case of async error event. Also, removes the buddy cq functions as it is no longer required. Signed-off-by: Sriharsha Basavapatna Signed-off-by: Somnath Kotur Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 11 ++- drivers/infiniband/hw/bnxt_re/ib_verbs.h | 3 + drivers/infiniband/hw/bnxt_re/main.c | 7 ++ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 109 +++++++---------------------- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 12 ++++ drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 3 +- 6 files changed, 55 insertions(+), 90 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 755f1ccd82bb..0dd75f449872 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) return 0; } -static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) { unsigned long flags; @@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) return flags; } -static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, - unsigned long flags) +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, + unsigned long flags) __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) { if (qp->rcq != qp->scq) @@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, int status; union ib_gid sgid; struct ib_gid_attr sgid_attr; + unsigned int flags; u8 nw_type; qp->qplib_qp.modify_flags = 0; @@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, dev_dbg(rdev_to_dev(rdev), "Move QP = %p to flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } if (!qp->sumem && qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { dev_dbg(rdev_to_dev(rdev), "Move QP = %p out of flush list\n", qp); + flags = bnxt_re_lock_cqs(qp); bnxt_qplib_clean_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); } } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b88a48d43a9d..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata); int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); +void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); #endif /* __BNXT_RE_IB_VERBS_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 604c805ceaa7..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct bnxt_re_qp *qp) { struct ib_event event; + unsigned int flags; + + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + flags = bnxt_re_lock_cqs(qp); + bnxt_qplib_add_flush_qp(&qp->qplib_qp); + bnxt_re_unlock_cqs(qp, flags); + } memset(&event, 0, sizeof(event)); if (qp->qplib_qp.srq) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3ea5b9624f6b..06b42c880fd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) } } -void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) +static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) { - spin_lock_irqsave(&qp->scq->hwq.lock, *flags); + spin_lock_irqsave(&qp->scq->flush_lock, *flags); if (qp->scq == qp->rcq) - __acquire(&qp->rcq->hwq.lock); + __acquire(&qp->rcq->flush_lock); else - spin_lock(&qp->rcq->hwq.lock); + spin_lock(&qp->rcq->flush_lock); } -void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, - unsigned long *flags) - __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) +static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, + unsigned long *flags) + __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) { if (qp->scq == qp->rcq) - __release(&qp->rcq->hwq.lock); + __release(&qp->rcq->flush_lock); else - spin_unlock(&qp->rcq->hwq.lock); - spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); -} - -static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - if (qp->scq == qp->rcq) - buddy_cq = NULL; - else if (qp->scq == cq) - buddy_cq = qp->rcq; - else - buddy_cq = qp->scq; - return buddy_cq; -} - -static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __acquires(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __acquire(&cq->hwq.lock); - else - spin_lock(&buddy_cq->hwq.lock); -} - -static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, - struct bnxt_qplib_cq *cq) - __releases(&buddy_cq->hwq.lock) -{ - struct bnxt_qplib_cq *buddy_cq = NULL; - - buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); - if (!buddy_cq) - __release(&cq->hwq.lock); - else - spin_unlock(&buddy_cq->hwq.lock); + spin_unlock(&qp->rcq->flush_lock); + spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); } void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) @@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) { unsigned long flags; - bnxt_qplib_acquire_cq_locks(qp, &flags); + bnxt_qplib_acquire_cq_flush_locks(qp, &flags); __clean_cq(qp->scq, (u64)(unsigned long)qp); qp->sq.hwq.prod = 0; qp->sq.hwq.cons = 0; @@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) qp->rq.hwq.cons = 0; __bnxt_qplib_del_flush_qp(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + bnxt_qplib_release_cq_flush_locks(qp, &flags); } static void bnxt_qpn_cqn_sched_task(struct work_struct *work) @@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; bnxt_qplib_cancel_phantom_processing(qp); - - /* Add qp to flush list of the CQ */ - __bnxt_qplib_add_flush_qp(qp); } /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) @@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; - bnxt_qplib_lock_buddy_cq(qp, cq); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + /* Add qp to flush list of the CQ */ + bnxt_qplib_add_flush_qp(qp); } else { if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { /* Before we complete, do WA 9060 */ @@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } done: @@ -2501,11 +2454,9 @@ done: bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; bool rc = true; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; sw_cons = HWQ_CMP(raw_cons, &cq->hwq); hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; @@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) /* Check for Valid bit. If the CQE is valid, return false */ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); - spin_unlock_irqrestore(&cq->hwq.lock, flags); return rc; } @@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, if (hwcqe->status != CQ_RES_RC_STATUS_OK) { qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); } } @@ -2719,9 +2667,7 @@ do_rq: */ /* Add qp to flush list of the CQ */ - bnxt_qplib_lock_buddy_cq(qp, cq); - __bnxt_qplib_add_flush_qp(qp); - bnxt_qplib_unlock_buddy_cq(qp, cq); + bnxt_qplib_add_flush_qp(qp); done: return rc; } @@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, u32 budget = num_cqes; unsigned long flags; - spin_lock_irqsave(&cq->hwq.lock, flags); + spin_lock_irqsave(&cq->flush_lock, flags); list_for_each_entry(qp, &cq->sqf_head, sq_flush) { dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: Flushing SQ QP= %p", @@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, qp); __flush_rq(&qp->rq, qp, &cqe, &budget); } - spin_unlock_irqrestore(&cq->hwq.lock, flags); + spin_unlock_irqrestore(&cq->flush_lock, flags); return num_cqes - budget; } @@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; - unsigned long flags; u32 sw_cons, raw_cons; int budget, rc = 0; - spin_lock_irqsave(&cq->hwq.lock, flags); raw_cons = cq->hwq.cons; budget = num_cqes; @@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); } exit: - spin_unlock_irqrestore(&cq->hwq.lock, flags); return num_cqes - budget; } void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) { - unsigned long flags; - - spin_lock_irqsave(&cq->hwq.lock, flags); if (arm_type) bnxt_qplib_arm_cq(cq, arm_type); /* Using cq->arm_state variable to track whether to issue cq handler */ atomic_set(&cq->arm_state, 1); - spin_unlock_irqrestore(&cq->hwq.lock, flags); } void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ca0a2ffa3509..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -389,6 +389,18 @@ struct bnxt_qplib_cq { struct list_head sqf_head, rqf_head; atomic_t arm_state; spinlock_t compl_lock; /* synch CQ handlers */ +/* Locking Notes: + * QP can move to error state from modify_qp, async error event or error + * CQE as part of poll_cq. When QP is moved to error state, it gets added + * to two flush lists, one each for SQ and RQ. + * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq + * flush_locks should be acquired when QP is moved to error. The control path + * operations(modify_qp and async error events) are synchronized with poll_cq + * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. + * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq + * of the same QP while manipulating the flush list. + */ + spinlock_t flush_lock; /* QP flush management */ }; #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 14d153d4013c..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, err_event->res_err_state_reason); if (!qp) break; - bnxt_qplib_acquire_cq_locks(qp, &flags); bnxt_qplib_mark_qp_error(qp); - bnxt_qplib_release_cq_locks(qp, &flags); + rcfw->aeq_handler(rcfw, qp_event, qp); break; default: /* Command Response */ -- cgit v1.2.3-59-g8ed1b From bb7f8f199c354c4cf155b1d6d55f86eaaed7fa5a Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Wed, 7 Mar 2018 08:07:41 +0200 Subject: IB/core: Fix possible crash to access NULL netdev resolved_dev returned might be NULL as ifindex is transient number. Ignoring NULL check of resolved_dev might crash the kernel. Therefore perform NULL check before accessing resolved_dev. Additionally rdma_resolve_ip_route() invokes addr_resolve() which performs check and address translation for loopback ifindex. Therefore, checking it again in rdma_resolve_ip_route() is not helpful. Therefore, the code is simplified to avoid IFF_LOOPBACK check. Fixes: 200298326b27 ("IB/core: Validate route when we init ah") Reviewed-by: Daniel Jurgens Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/sa_query.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, resolved_dev = dev_get_by_index(dev_addr.net, dev_addr.bound_dev_if); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); + if (!resolved_dev) { + dev_put(idev); + return -ENODEV; } ndev = ib_get_ndev_from_path(rec); rcu_read_lock(); -- cgit v1.2.3-59-g8ed1b From 6a21dfc0d0db7b7e0acedce67ca533a6eb19283c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 7 Mar 2018 14:49:09 +0200 Subject: RDMA/ucma: Limit possible option size Users of ucma are supposed to provide size of option level, in most paths it is supposed to be equal to u8 or u16, but it is not the case for the IB path record, where it can be multiple of struct ib_path_rec_data. This patch takes simplest possible approach and prevents providing values more than possible to allocate. Reported-by: syzbot+a38b0e9f694c379ca7ce@syzkaller.appspotmail.com Fixes: 7ce86409adcd ("RDMA/ucma: Allow user space to set service type") Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/ucma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..1817dfea8345 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1294,6 +1294,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { -- cgit v1.2.3-59-g8ed1b From aa0de36a40f446f5a21a7c1e677b98206e242edb Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 7 Mar 2018 15:29:09 +0200 Subject: RDMA/mlx5: Fix integer overflow while resizing CQ The user can provide very large cqe_size which will cause to integer overflow as it can be seen in the following UBSAN warning: Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/cq.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b5cfdaa9c7c8..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1178,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); -- cgit v1.2.3-59-g8ed1b From a5880b84430316e3e1c1f5d23aa32ec6000cc717 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 7 Mar 2018 18:49:16 +0200 Subject: RDMA/ucma: Check that user doesn't overflow QP state The QP state is limited and declared in enum ib_qp_state, but ucma user was able to supply any possible (u32) value. Reported-by: syzbot+0df1ab766f8924b1edba@syzkaller.appspotmail.com Fixes: 75216638572f ("RDMA/cma: Export rdma cm interface to userspace") Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/core/ucma.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 1817dfea8345..3a9d0f5b5881 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); -- cgit v1.2.3-59-g8ed1b From 212a0cbc5670c6dd7be74e17168fbf6d9bd8473a Mon Sep 17 00:00:00 2001 From: Doug Ledford Date: Fri, 9 Mar 2018 18:07:46 -0500 Subject: Revert "RDMA/mlx5: Fix integer overflow while resizing CQ" The original commit of this patch has a munged log message that is missing several of the tags the original author intended to be on the patch. This was due to patchworks misinterpreting a cut-n-paste separator line as an end of message line and munging the mbox that was used to import the patch: https://patchwork.kernel.org/patch/10264089/ The original patch will be reapplied with a fixed commit message so the proper tags are applied. This reverts commit aa0de36a40f446f5a21a7c1e677b98206e242edb. Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/cq.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 15457c9569a7..b5cfdaa9c7c8 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1178,12 +1178,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - /* check multiplication overflow */ - if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) - return -EINVAL; - - umem = ib_umem_get(context, ucmd.buf_addr, - (size_t)ucmd.cqe_size * entries, + umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); -- cgit v1.2.3-59-g8ed1b From 28e9091e3119933c38933cb8fc48d5618eb784c8 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Wed, 7 Mar 2018 15:29:09 +0200 Subject: RDMA/mlx5: Fix integer overflow while resizing CQ The user can provide very large cqe_size which will cause to integer overflow as it can be seen in the following UBSAN warning: ======================================================================= UBSAN: Undefined behaviour in drivers/infiniband/hw/mlx5/cq.c:1192:53 signed integer overflow: 64870 * 65536 cannot be represented in type 'int' CPU: 0 PID: 267 Comm: syzkaller605279 Not tainted 4.15.0+ #90 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 Call Trace: dump_stack+0xde/0x164 ? dma_virt_map_sg+0x22c/0x22c ubsan_epilogue+0xe/0x81 handle_overflow+0x1f3/0x251 ? __ubsan_handle_negate_overflow+0x19b/0x19b ? lock_acquire+0x440/0x440 mlx5_ib_resize_cq+0x17e7/0x1e40 ? cyc2ns_read_end+0x10/0x10 ? native_read_msr_safe+0x6c/0x9b ? cyc2ns_read_end+0x10/0x10 ? mlx5_ib_modify_cq+0x220/0x220 ? sched_clock_cpu+0x18/0x200 ? lookup_get_idr_uobject+0x200/0x200 ? rdma_lookup_get_uobject+0x145/0x2f0 ib_uverbs_resize_cq+0x207/0x3e0 ? ib_uverbs_ex_create_cq+0x250/0x250 ib_uverbs_write+0x7f9/0xef0 ? cyc2ns_read_end+0x10/0x10 ? print_irqtrace_events+0x280/0x280 ? ib_uverbs_ex_create_cq+0x250/0x250 ? uverbs_devnode+0x110/0x110 ? sched_clock_cpu+0x18/0x200 ? do_raw_spin_trylock+0x100/0x100 ? __lru_cache_add+0x16e/0x290 __vfs_write+0x10d/0x700 ? uverbs_devnode+0x110/0x110 ? kernel_read+0x170/0x170 ? sched_clock_cpu+0x18/0x200 ? security_file_permission+0x93/0x260 vfs_write+0x1b0/0x550 SyS_write+0xc7/0x1a0 ? SyS_read+0x1a0/0x1a0 ? trace_hardirqs_on_thunk+0x1a/0x1c entry_SYSCALL_64_fastpath+0x1e/0x8b RIP: 0033:0x433549 RSP: 002b:00007ffe63bd1ea8 EFLAGS: 00000217 ======================================================================= Cc: syzkaller Cc: # 3.13 Fixes: bde51583f49b ("IB/mlx5: Add support for resize CQ") Reported-by: Noa Osherovich Reviewed-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/cq.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index b5cfdaa9c7c8..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -1178,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); -- cgit v1.2.3-59-g8ed1b