diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma')
-rw-r--r-- | drivers/infiniband/hw/irdma/defs.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/hw.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/type.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/user.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/irdma/verbs.c | 69 |
6 files changed, 98 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index e03e03082a5f..c1906cab5c8a 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -314,6 +314,7 @@ enum irdma_cqp_op_type { #define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d #define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e #define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220 +#define IRDMA_AE_INVALID_REQUEST 0x0223 #define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 #define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 #define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 4f132c6fb653..ab246447520b 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; switch (info->ae_id) { - case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BOUNDS_VIOLATION: case IRDMA_AE_AMP_INVALID_STAG: - qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; - fallthrough; + case IRDMA_AE_AMP_RIGHTS_VIOLATION: + case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BAD_PD: - case IRDMA_AE_UDA_XMIT_BAD_PD: + case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_AMP_BAD_STAG_KEY: + case IRDMA_AE_AMP_BAD_STAG_INDEX: + case IRDMA_AE_AMP_TO_WRAP: + case IRDMA_AE_PRIV_OPERATION_DENIED: qp->flush_code = FLUSH_PROT_ERR; + qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; - case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_UDA_XMIT_BAD_PD: case IRDMA_AE_WQE_UNEXPECTED_OPCODE: qp->flush_code = FLUSH_LOC_QP_OP_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; + break; + case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: + case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: + case IRDMA_AE_UDA_L4LEN_INVALID: + case IRDMA_AE_DDP_UBE_INVALID_MO: + case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + qp->flush_code = FLUSH_LOC_LEN_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; - case IRDMA_AE_AMP_BAD_STAG_KEY: - case IRDMA_AE_AMP_BAD_STAG_INDEX: - case IRDMA_AE_AMP_TO_WRAP: - case IRDMA_AE_AMP_RIGHTS_VIOLATION: case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: - case IRDMA_AE_PRIV_OPERATION_DENIED: - case IRDMA_AE_IB_INVALID_REQUEST: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: - case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: - case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: - case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: - case IRDMA_AE_UDA_L4LEN_INVALID: + case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: - qp->flush_code = FLUSH_LOC_LEN_ERR; + case IRDMA_AE_IB_REMOTE_OP_ERROR: + qp->flush_code = FLUSH_REM_OP_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_LCE_QP_CATASTROPHIC: qp->flush_code = FLUSH_FATAL_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; - case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_IB_RREQ_AND_Q1_FULL: - case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: qp->flush_code = FLUSH_GENERAL_ERR; break; case IRDMA_AE_LLP_TOO_MANY_RETRIES: qp->flush_code = FLUSH_RETRY_EXC_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: qp->flush_code = FLUSH_MW_BIND_ERR; + qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; - case IRDMA_AE_IB_REMOTE_OP_ERROR: - qp->flush_code = FLUSH_REM_OP_ERR; + case IRDMA_AE_IB_INVALID_REQUEST: + qp->flush_code = FLUSH_REM_INV_REQ_ERR; + qp->event_type = IRDMA_QP_EVENT_REQ_ERR; break; default: - qp->flush_code = FLUSH_FATAL_ERR; + qp->flush_code = FLUSH_GENERAL_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; } } diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 9e7b8ecb137a..517d41a1c289 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -98,6 +98,7 @@ enum irdma_term_mpa_errors { enum irdma_qp_event_type { IRDMA_QP_EVENT_CATASTROPHIC, IRDMA_QP_EVENT_ACCESS_ERR, + IRDMA_QP_EVENT_REQ_ERR, }; enum irdma_hw_stats_index_32b { diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index ddd0ebbdd7d5..2ef61923c926 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -103,6 +103,7 @@ enum irdma_flush_opcode { FLUSH_FATAL_ERR, FLUSH_RETRY_EXC_ERR, FLUSH_MW_BIND_ERR, + FLUSH_REM_INV_REQ_ERR, }; enum irdma_cmpl_status { diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 075defaabee5..8dfc9e154d73 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -2479,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) case IRDMA_QP_EVENT_ACCESS_ERR: ibevent.event = IB_EVENT_QP_ACCESS_ERR; break; + case IRDMA_QP_EVENT_REQ_ERR: + ibevent.event = IB_EVENT_QP_REQ_ERR; + break; } ibevent.device = iwqp->ibqp.device; ibevent.element.qp = &iwqp->ibqp; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 9b207f5084eb..a22afbb25bc5 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -299,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp) static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { +#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) +#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) struct ib_device *ibdev = uctx->device; struct irdma_device *iwdev = to_iwdev(ibdev); - struct irdma_alloc_ucontext_req req; + struct irdma_alloc_ucontext_req req = {}; struct irdma_alloc_ucontext_resp uresp = {}; struct irdma_ucontext *ucontext = to_ucontext(uctx); struct irdma_uk_attrs *uk_attrs; + if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) + return -EINVAL; + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) return -EINVAL; @@ -317,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; /* GEN_1 legacy support with libi40iw */ - if (udata->outlen < sizeof(uresp)) { + if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { if (uk_attrs->hw_rev != IRDMA_GEN_1) return -EOPNOTSUPP; @@ -389,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context) */ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { +#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) struct irdma_pd *iwpd = to_iwpd(pd); struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; @@ -398,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) u32 pd_id = 0; int err; + if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) + return -EINVAL; + err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, &rf->next_pd); if (err) @@ -814,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { +#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) +#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) struct ib_pd *ibpd = ibqp->pd; struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_qp *iwqp = to_iwqp(ibqp); - struct irdma_create_qp_req req; + struct irdma_create_qp_req req = {}; struct irdma_create_qp_resp uresp = {}; u32 qp_num = 0; int err_code; @@ -836,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp, if (err_code) return err_code; + if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) + return -EINVAL; + sq_size = init_attr->cap.max_send_wr; rq_size = init_attr->cap.max_recv_wr; @@ -1120,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_pd *iwpd = to_iwpd(ibqp->pd); struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; @@ -1138,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, roce_info = &iwqp->roce_info; udp_info = &iwqp->udp_info; + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -1374,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1426,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, } else { iwqp->ibqp_state = attr->qp_state; } - if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { + if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; ucontext = rdma_udata_to_drv_context(udata, @@ -1466,6 +1491,8 @@ exit: int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; @@ -1480,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, int err; unsigned long flags; + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -1565,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1662,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, } } } - if (attr_mask & IB_QP_STATE && udata && + if (attr_mask & IB_QP_STATE && udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; @@ -1797,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { +#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) struct irdma_cq *iwcq = to_iwcq(ibcq); struct irdma_sc_dev *dev = iwcq->sc_cq.dev; struct irdma_cqp_request *cqp_request; @@ -1819,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, IRDMA_FEATURE_CQ_RESIZE)) return -EOPNOTSUPP; + if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) + return -EINVAL; + if (entries > rf->max_cqe) return -EINVAL; @@ -1951,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { +#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) +#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) struct ib_device *ibdev = ibcq->device; struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_pci_f *rf = iwdev->rf; @@ -1969,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq, err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) return err_code; + + if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) + return -EINVAL; + err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, &rf->next_cq); if (err_code) @@ -2746,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata) { +#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_ucontext *ucontext; struct irdma_pble_alloc *palloc; @@ -2763,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) return ERR_PTR(-EINVAL); + if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) + return ERR_PTR(-EINVAL); + region = ib_umem_get(pd->device, start, len, access); if (IS_ERR(region)) { @@ -3315,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode return IB_WC_RETRY_EXC_ERR; case FLUSH_MW_BIND_ERR: return IB_WC_MW_BIND_ERR; + case FLUSH_REM_INV_REQ_ERR: + return IB_WC_REM_INV_REQ_ERR; case FLUSH_FATAL_ERR: default: return IB_WC_FATAL_ERR; @@ -4296,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { +#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); struct irdma_device *iwdev = to_iwdev(ibah->pd->device); struct irdma_create_ah_resp uresp; struct irdma_ah *parent_ah; int err; + if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + return -EINVAL; + err = irdma_setup_ah(ibah, attr); if (err) return err; |