diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma')
30 files changed, 1671 insertions, 1642 deletions
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 6dea0a49d171..7b086fe63a24 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -1477,12 +1477,13 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port, list_for_each_entry (listen_node, &cm_core->listen_list, list) { memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr)); listen_port = listen_node->loc_port; + if (listen_port != dst_port || + !(listener_state & listen_node->listener_state)) + continue; /* compare node pair, return node handle if a match */ - if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) || - !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) && - listen_port == dst_port && - vlan_id == listen_node->vlan_id && - (listener_state & listen_node->listener_state)) { + if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) || + (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) && + vlan_id == listen_node->vlan_id)) { refcount_inc(&listen_node->refcnt); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); @@ -1501,15 +1502,14 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port, * @cm_info: CM info for parent listen node * @cm_parent_listen_node: The parent listen node */ -static enum irdma_status_code -irdma_del_multiple_qhash(struct irdma_device *iwdev, - struct irdma_cm_info *cm_info, - struct irdma_cm_listener *cm_parent_listen_node) +static int irdma_del_multiple_qhash(struct irdma_device *iwdev, + struct irdma_cm_info *cm_info, + struct irdma_cm_listener *cm_parent_listen_node) { struct irdma_cm_listener *child_listen_node; - enum irdma_status_code ret = IRDMA_ERR_CFG; struct list_head *pos, *tpos; unsigned long flags; + int ret = -EINVAL; spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); list_for_each_safe (pos, tpos, @@ -1618,16 +1618,16 @@ u16 irdma_get_vlan_ipv4(u32 *addr) * Adds a qhash and a child listen node for every IPv6 address * on the adapter and adds the associated qhash filter */ -static enum irdma_status_code -irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, - struct irdma_cm_listener *cm_parent_listen_node) +static int irdma_add_mqh_6(struct irdma_device *iwdev, + struct irdma_cm_info *cm_info, + struct irdma_cm_listener *cm_parent_listen_node) { struct net_device *ip_dev; struct inet6_dev *idev; struct inet6_ifaddr *ifp, *tmp; - enum irdma_status_code ret = 0; struct irdma_cm_listener *child_listen_node; unsigned long flags; + int ret = 0; rtnl_lock(); for_each_netdev(&init_net, ip_dev) { @@ -1653,7 +1653,7 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, child_listen_node); if (!child_listen_node) { ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); - ret = IRDMA_ERR_NO_MEMORY; + ret = -ENOMEM; goto exit; } @@ -1700,16 +1700,16 @@ exit: * Adds a qhash and a child listen node for every IPv4 address * on the adapter and adds the associated qhash filter */ -static enum irdma_status_code -irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, - struct irdma_cm_listener *cm_parent_listen_node) +static int irdma_add_mqh_4(struct irdma_device *iwdev, + struct irdma_cm_info *cm_info, + struct irdma_cm_listener *cm_parent_listen_node) { struct net_device *ip_dev; struct in_device *idev; struct irdma_cm_listener *child_listen_node; - enum irdma_status_code ret = 0; unsigned long flags; const struct in_ifaddr *ifa; + int ret = 0; rtnl_lock(); for_each_netdev(&init_net, ip_dev) { @@ -1734,7 +1734,7 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, if (!child_listen_node) { ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); in_dev_put(idev); - ret = IRDMA_ERR_NO_MEMORY; + ret = -ENOMEM; goto exit; } @@ -1781,9 +1781,9 @@ exit: * @cm_info: CM info for parent listen node * @cm_listen_node: The parent listen node */ -static enum irdma_status_code -irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, - struct irdma_cm_listener *cm_listen_node) +static int irdma_add_mqh(struct irdma_device *iwdev, + struct irdma_cm_info *cm_info, + struct irdma_cm_listener *cm_listen_node) { if (cm_info->ipv4) return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node); @@ -2200,7 +2200,7 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, /* set our node specific transport info */ cm_node->ipv4 = cm_info->ipv4; cm_node->vlan_id = cm_info->vlan_id; - if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb) + if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) cm_node->vlan_id = 0; cm_node->tos = cm_info->tos; cm_node->user_pri = cm_info->user_pri; @@ -2209,8 +2209,12 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev, ibdev_warn(&iwdev->ibdev, "application TOS[%d] and remote client TOS[%d] mismatch\n", listener->tos, cm_info->tos); - cm_node->tos = max(listener->tos, cm_info->tos); - cm_node->user_pri = rt_tos2priority(cm_node->tos); + if (iwdev->vsi.dscp_mode) { + cm_node->user_pri = listener->user_pri; + } else { + cm_node->tos = max(listener->tos, cm_info->tos); + cm_node->user_pri = rt_tos2priority(cm_node->tos); + } ibdev_dbg(&iwdev->ibdev, "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos, cm_node->user_pri); @@ -2305,10 +2309,8 @@ err: return NULL; } -static void irdma_cm_node_free_cb(struct rcu_head *rcu_head) +static void irdma_destroy_connection(struct irdma_cm_node *cm_node) { - struct irdma_cm_node *cm_node = - container_of(rcu_head, struct irdma_cm_node, rcu_head); struct irdma_cm_core *cm_core = cm_node->cm_core; struct irdma_qp *iwqp; struct irdma_cm_info nfo; @@ -2356,7 +2358,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head) } cm_core->cm_free_ah(cm_node); - kfree(cm_node); } /** @@ -2384,8 +2385,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node) spin_unlock_irqrestore(&cm_core->ht_lock, flags); - /* wait for all list walkers to exit their grace period */ - call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb); + irdma_destroy_connection(cm_node); + + kfree_rcu(cm_node, rcu_head); } /** @@ -3201,8 +3203,7 @@ static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node) * @iwdev: iwarp device structure * @rdma_ver: HW version */ -enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, - u8 rdma_ver) +int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver) { struct irdma_cm_core *cm_core = &iwdev->cm_core; @@ -3212,7 +3213,7 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, /* Handles CM event work items send to Iwarp core */ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0); if (!cm_core->event_wq) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; INIT_LIST_HEAD(&cm_core->listen_list); @@ -3244,15 +3245,10 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, */ void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core) { - unsigned long flags; - if (!cm_core) return; - spin_lock_irqsave(&cm_core->ht_lock, flags); - if (timer_pending(&cm_core->tcp_timer)) - del_timer_sync(&cm_core->tcp_timer); - spin_unlock_irqrestore(&cm_core->ht_lock, flags); + del_timer_sync(&cm_core->tcp_timer); destroy_workqueue(cm_core->event_wq); cm_core->dev->ws_reset(&cm_core->iwdev->vsi); @@ -3465,12 +3461,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp) } cm_id = iwqp->cm_id; - /* make sure we havent already closed this connection */ - if (!cm_id) { - spin_unlock_irqrestore(&iwqp->lock, flags); - return; - } - original_hw_tcp_state = iwqp->hw_tcp_state; original_ibqp_state = iwqp->ibqp_state; last_ae = iwqp->last_aeq; @@ -3492,11 +3482,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp) disconn_status = -ECONNRESET; } - if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED || - original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || - last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || - last_ae == IRDMA_AE_BAD_CLOSE || - last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) { + if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED || + original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || + last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || + last_ae == IRDMA_AE_BAD_CLOSE || + last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) { issue_close = 1; iwqp->cm_id = NULL; qp->term_flags = 0; @@ -3835,7 +3825,11 @@ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_info.cm_id = cm_id; cm_info.qh_qpid = iwdev->vsi.ilq->qp_id; cm_info.tos = cm_id->tos; - cm_info.user_pri = rt_tos2priority(cm_id->tos); + if (iwdev->vsi.dscp_mode) + cm_info.user_pri = + iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)]; + else + cm_info.user_pri = rt_tos2priority(cm_id->tos); if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri)) return -ENOMEM; @@ -3915,10 +3909,10 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog) struct irdma_device *iwdev; struct irdma_cm_listener *cm_listen_node; struct irdma_cm_info cm_info = {}; - enum irdma_status_code err; struct sockaddr_in *laddr; struct sockaddr_in6 *laddr6; bool wildcard = false; + int err; iwdev = to_iwdev(cm_id->device); if (!iwdev) @@ -3959,7 +3953,7 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog) } } - if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb) + if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) cm_info.vlan_id = 0; cm_info.backlog = backlog; cm_info.cm_id = cm_id; @@ -3977,7 +3971,11 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog) cm_id->provider_data = cm_listen_node; cm_listen_node->tos = cm_id->tos; - cm_listen_node->user_pri = rt_tos2priority(cm_id->tos); + if (iwdev->vsi.dscp_mode) + cm_listen_node->user_pri = + iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)]; + else + cm_listen_node->user_pri = rt_tos2priority(cm_id->tos); cm_info.user_pri = cm_listen_node->user_pri; if (!cm_listen_node->reused_node) { if (wildcard) { @@ -4234,10 +4232,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, struct irdma_cm_node *cm_node; struct list_head teardown_list; struct ib_qp_attr attr; - struct irdma_sc_vsi *vsi = &iwdev->vsi; - struct irdma_sc_qp *sc_qp; - struct irdma_qp *qp; - int i; INIT_LIST_HEAD(&teardown_list); @@ -4254,52 +4248,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, irdma_cm_disconn(cm_node->iwqp); irdma_rem_ref_cm_node(cm_node); } - if (!iwdev->roce_mode) - return; - - INIT_LIST_HEAD(&teardown_list); - for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { - mutex_lock(&vsi->qos[i].qos_mutex); - list_for_each_safe (list_node, list_core_temp, - &vsi->qos[i].qplist) { - u32 qp_ip[4]; - - sc_qp = container_of(list_node, struct irdma_sc_qp, - list); - if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC) - continue; - - qp = sc_qp->qp_uk.back_qp; - if (!disconnect_all) { - if (nfo->ipv4) - qp_ip[0] = qp->udp_info.local_ipaddr[3]; - else - memcpy(qp_ip, - &qp->udp_info.local_ipaddr[0], - sizeof(qp_ip)); - } - - if (disconnect_all || - (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) && - !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) { - spin_lock(&iwdev->rf->qptable_lock); - if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) { - irdma_qp_add_ref(&qp->ibqp); - list_add(&qp->teardown_entry, - &teardown_list); - } - spin_unlock(&iwdev->rf->qptable_lock); - } - } - mutex_unlock(&vsi->qos[i].qos_mutex); - } - - list_for_each_safe (list_node, list_core_temp, &teardown_list) { - qp = container_of(list_node, struct irdma_qp, teardown_entry); - attr.qp_state = IB_QPS_ERR; - irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); - irdma_qp_rem_ref(&qp->ibqp); - } } /** @@ -4325,11 +4273,11 @@ static void irdma_qhash_ctrl(struct irdma_device *iwdev, struct list_head *child_listen_list = &parent_listen_node->child_listen_list; struct irdma_cm_listener *child_listen_node; struct list_head *pos, *tpos; - enum irdma_status_code err; bool node_allocated = false; enum irdma_quad_hash_manage_type op = ifup ? IRDMA_QHASH_MANAGE_TYPE_ADD : IRDMA_QHASH_MANAGE_TYPE_DELETE; + int err; list_for_each_safe (pos, tpos, child_listen_list) { child_listen_node = list_entry(pos, struct irdma_cm_listener, diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h index 3bf42728e9b7..19c284975fc7 100644 --- a/drivers/infiniband/hw/irdma/cm.h +++ b/drivers/infiniband/hw/irdma/cm.h @@ -384,6 +384,13 @@ int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node, struct irdma_puda_buf *sqbuf, enum irdma_timer_type type, int send_retrans, int close_when_complete); + +static inline u8 irdma_tos2dscp(u8 tos) +{ +#define IRDMA_DSCP_VAL GENMASK(7, 2) + return (u8)FIELD_GET(IRDMA_DSCP_VAL, tos); +} + int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index 7264f8c2f7d5..a41e0d21143a 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ +#include <linux/etherdevice.h> + #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" @@ -68,6 +69,31 @@ void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op) } } +static void irdma_set_qos_info(struct irdma_sc_vsi *vsi, + struct irdma_l2params *l2p) +{ + u8 i; + + vsi->qos_rel_bw = l2p->vsi_rel_bw; + vsi->qos_prio_type = l2p->vsi_prio_type; + vsi->dscp_mode = l2p->dscp_mode; + if (l2p->dscp_mode) { + memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map)); + for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) + l2p->up2tc[i] = i; + } + for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { + if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) + vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; + vsi->qos[i].traffic_class = l2p->up2tc[i]; + vsi->qos[i].rel_bw = + l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; + vsi->qos[i].prio_type = + l2p->tc_info[vsi->qos[i].traffic_class].prio_type; + vsi->qos[i].valid = false; + } +} + /** * irdma_change_l2params - given the new l2 parameters, change all qp * @vsi: RDMA VSI pointer @@ -86,6 +112,7 @@ void irdma_change_l2params(struct irdma_sc_vsi *vsi, return; vsi->tc_change_pending = false; + irdma_set_qos_info(vsi, l2params); irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME); } @@ -152,17 +179,16 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, - struct irdma_add_arp_cache_entry_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, + struct irdma_add_arp_cache_entry_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 8, info->reach_max); set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); @@ -190,16 +216,15 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, * @arp_index: arp index to delete arp entry * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, - u16 arp_index, bool post_sq) +static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, + u16 arp_index, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; hdr = arp_index | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) | @@ -224,17 +249,16 @@ irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, - struct irdma_apbvt_info *info, u64 scratch, - bool post_sq) +static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, + struct irdma_apbvt_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, info->port); @@ -272,7 +296,7 @@ irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, * quad hash entry in the hardware will point to iwarp's qp * number and requires no calls from the driver. */ -static enum irdma_status_code +static int irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, struct irdma_qhash_table_info *info, u64 scratch, bool post_sq) @@ -285,7 +309,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr)); @@ -348,10 +372,9 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, * @qp: sc qp * @info: initialization qp info */ -enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, - struct irdma_qp_init_info *info) +int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info) { - enum irdma_status_code ret_code; + int ret_code; u32 pble_obj_cnt; u16 wqe_size; @@ -359,7 +382,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || info->qp_uk_init_info.max_rq_frag_cnt > info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; qp->dev = info->pd->dev; qp->vsi = info->vsi; @@ -382,7 +405,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || (info->virtual_map && info->rq_pa >= pble_obj_cnt)) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; qp->llp_stream_handle = (void *)(-1); qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, @@ -422,8 +445,8 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, - u64 scratch, bool post_sq) +int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, + u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; @@ -431,12 +454,12 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c cqp = qp->dev->cqp; if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || - qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1)) - return IRDMA_ERR_INVALID_QP_ID; + qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt) + return -EINVAL; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); @@ -473,9 +496,8 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, - struct irdma_modify_qp_info *info, - u64 scratch, bool post_sq) +int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -486,7 +508,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, cqp = qp->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) { if (info->dont_send_fin) @@ -544,9 +566,8 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, * @ignore_mw_bnd: memory window bind flag * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, - bool remove_hash_idx, bool ignore_mw_bnd, - bool post_sq) +int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, + bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -555,7 +576,7 @@ enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, cqp = qp->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); @@ -737,16 +758,15 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, - bool post_sq) +static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, + bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) | @@ -772,17 +792,16 @@ irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, - struct irdma_local_mac_entry_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, + struct irdma_local_mac_entry_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 header; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr)); @@ -811,16 +830,16 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, * @ignore_ref_count: to force mac adde delete * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, - u16 entry_idx, u8 ignore_ref_count, bool post_sq) +static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, + u16 entry_idx, u8 ignore_ref_count, + bool post_sq) { __le64 *wqe; u64 header; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) | @@ -1033,10 +1052,9 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_alloc_stag(struct irdma_sc_dev *dev, - struct irdma_allocate_stag_info *info, u64 scratch, - bool post_sq) +static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev, + struct irdma_allocate_stag_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -1053,7 +1071,7 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev, cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) | @@ -1095,10 +1113,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, - struct irdma_reg_ns_stag_info *info, u64 scratch, - bool post_sq) +static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, + struct irdma_reg_ns_stag_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 fbo; @@ -1116,7 +1133,7 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, else if (info->page_size == 0x1000) page_size = IRDMA_PAGE_SIZE_4K; else - return IRDMA_ERR_PARAM; + return -EINVAL; if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY | IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY)) @@ -1126,12 +1143,12 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; fbo = info->va & (info->page_size - 1); set_64bit_val(wqe, 0, @@ -1184,10 +1201,9 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, - struct irdma_dealloc_stag_info *info, u64 scratch, - bool post_sq) +static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, + struct irdma_dealloc_stag_info *info, + u64 scratch, bool post_sq) { u64 hdr; __le64 *wqe; @@ -1196,7 +1212,7 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); @@ -1225,9 +1241,9 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev, + struct irdma_mw_alloc_info *info, u64 scratch, + bool post_sq) { u64 hdr; struct irdma_sc_cqp *cqp; @@ -1236,7 +1252,7 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info, cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 8, FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); @@ -1266,9 +1282,9 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info, * @info: fast mr info * @post_sq: flag for cqp db to ring */ -enum irdma_status_code -irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, - struct irdma_fast_reg_stag_info *info, bool post_sq) +int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, + struct irdma_fast_reg_stag_info *info, + bool post_sq) { u64 temp, hdr; __le64 *wqe; @@ -1290,7 +1306,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(&qp->qp_uk, wqe_idx); @@ -1819,8 +1835,7 @@ void irdma_terminate_received(struct irdma_sc_qp *qp, } } -static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi, - u8 user_pri) +static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) { return 0; } @@ -1843,7 +1858,6 @@ static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi) void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_init_info *info) { - struct irdma_l2params *l2p; int i; vsi->dev = info->dev; @@ -1856,18 +1870,8 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) vsi->fcn_id = info->dev->hmc_fn_id; - l2p = info->params; - vsi->qos_rel_bw = l2p->vsi_rel_bw; - vsi->qos_prio_type = l2p->vsi_prio_type; + irdma_set_qos_info(vsi, info->params); for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { - if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) - vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; - vsi->qos[i].traffic_class = info->params->up2tc[i]; - vsi->qos[i].rel_bw = - l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; - vsi->qos[i].prio_type = - l2p->tc_info[vsi->qos[i].traffic_class].prio_type; - vsi->qos[i].valid = false; mutex_init(&vsi->qos[i].qos_mutex); INIT_LIST_HEAD(&vsi->qos[i].qplist); } @@ -1916,8 +1920,8 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi) * @vsi: pointer to the vsi structure * @info: The info structure used for initialization */ -enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, - struct irdma_vsi_stats_info *info) +int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, + struct irdma_vsi_stats_info *info) { u8 fcn_id = info->fcn_id; struct irdma_dma_mem *stats_buff_mem; @@ -1932,7 +1936,7 @@ enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, &stats_buff_mem->pa, GFP_KERNEL); if (!stats_buff_mem->va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; vsi->pestat->gather_info.last_gather_stats_va = @@ -1959,7 +1963,7 @@ stats_error: stats_buff_mem->va, stats_buff_mem->pa); stats_buff_mem->va = NULL; - return IRDMA_ERR_CQP_COMPL_ERROR; + return -EIO; } /** @@ -2021,19 +2025,19 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type) * @info: gather stats info structure * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, - struct irdma_stats_gather_info *info, u64 scratch) +static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, + struct irdma_stats_gather_info *info, + u64 scratch) { __le64 *wqe; u64 temp; if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE) - return IRDMA_ERR_BUF_TOO_SHORT; + return -ENOMEM; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index)); @@ -2068,17 +2072,16 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, * @alloc: alloc vs. delete flag * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, - struct irdma_stats_inst_info *info, bool alloc, - u64 scratch) +static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, + struct irdma_stats_inst_info *info, + bool alloc, u64 scratch) { __le64 *wqe; u64 temp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 40, FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id)); @@ -2106,9 +2109,8 @@ irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, * @info: User priority map info * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, - struct irdma_up_info *info, - u64 scratch) +static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, + struct irdma_up_info *info, u64 scratch) { __le64 *wqe; u64 temp = 0; @@ -2116,7 +2118,7 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) temp |= (u64)info->map[i] << (i * 8); @@ -2149,17 +2151,16 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, * @node_op: 0 for add 1 for modify, 2 for delete * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, - struct irdma_ws_node_info *info, - enum irdma_ws_node_op node_op, u64 scratch) +static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, + struct irdma_ws_node_info *info, + enum irdma_ws_node_op node_op, u64 scratch) { __le64 *wqe; u64 temp = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 32, FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) | @@ -2192,9 +2193,9 @@ irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, - struct irdma_qp_flush_info *info, - u64 scratch, bool post_sq) +int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, + struct irdma_qp_flush_info *info, u64 scratch, + bool post_sq) { u64 temp = 0; __le64 *wqe; @@ -2213,13 +2214,13 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, ibdev_dbg(to_ibdev(qp->dev), "CQP: Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id); - return IRDMA_ERR_FLUSHED_Q; + return -EALREADY; } cqp = qp->pd->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; if (info->userflushcode) { if (flush_rq) @@ -2266,9 +2267,9 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp, - struct irdma_gen_ae_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_gen_ae(struct irdma_sc_qp *qp, + struct irdma_gen_ae_info *info, u64 scratch, + bool post_sq) { u64 temp; __le64 *wqe; @@ -2278,7 +2279,7 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp, cqp = qp->pd->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE, info->ae_src); @@ -2306,10 +2307,9 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, - struct irdma_upload_context_info *info, u64 scratch, - bool post_sq) +static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, + struct irdma_upload_context_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -2318,7 +2318,7 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, info->buf_pa); @@ -2347,21 +2347,20 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, - struct irdma_cqp_manage_push_page_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, + struct irdma_cqp_manage_push_page_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; if (info->free_page && info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages) - return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX; + return -EINVAL; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, info->qs_handle); hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) | @@ -2387,16 +2386,15 @@ irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, * @qp: sc qp struct * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, - struct irdma_sc_qp *qp, - u64 scratch) +static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, + u64 scratch) { u64 hdr; __le64 *wqe; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) | @@ -2418,16 +2416,15 @@ static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, * @qp: sc qp struct * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, - struct irdma_sc_qp *qp, - u64 scratch) +static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, + u64 scratch) { u64 hdr; __le64 *wqe; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); @@ -2460,14 +2457,13 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq) * @cq: cq struct * @info: cq initialization info */ -enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, - struct irdma_cq_init_info *info) +int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) { u32 pble_obj_cnt; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; cq->cq_pa = info->cq_base_pa; cq->dev = info->dev; @@ -2498,23 +2494,21 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, * @check_overflow: flag for overflow check * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq, - u64 scratch, - bool check_overflow, - bool post_sq) +static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, + bool check_overflow, bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; struct irdma_sc_ceq *ceq; - enum irdma_status_code ret_code = 0; + int ret_code = 0; cqp = cq->dev->cqp; - if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1)) - return IRDMA_ERR_INVALID_CQ_ID; + if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt) + return -EINVAL; - if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1)) - return IRDMA_ERR_INVALID_CEQ_ID; + if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs) + return -EINVAL; ceq = cq->dev->ceq[cq->ceq_id]; if (ceq && ceq->reg_cq) @@ -2527,7 +2521,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq, if (!wqe) { if (ceq && ceq->reg_cq) irdma_sc_remove_cq_ctx(ceq, cq); - return IRDMA_ERR_RING_FULL; + return -ENOMEM; } set_64bit_val(wqe, 0, cq->cq_uk.cq_size); @@ -2573,8 +2567,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, - bool post_sq) +int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; @@ -2584,7 +2577,7 @@ enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, cqp = cq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; ceq = cq->dev->ceq[cq->ceq_id]; if (ceq && ceq->reg_cq) @@ -2640,9 +2633,9 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf * @scratch: u64 saved to be used during cqp completion * @post_sq: flag to post to sq */ -static enum irdma_status_code -irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_cq_modify(struct irdma_sc_cq *cq, + struct irdma_modify_cq_info *info, u64 scratch, + bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; @@ -2652,12 +2645,12 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info, pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->cq_resize && info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; cqp = cq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, info->cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); @@ -2731,8 +2724,8 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, * @tail: wqtail register value * @count: how many times to try for completion */ -static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, - u32 tail, u32 count) +static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, + u32 count) { u32 i = 0; u32 newtail, error, val; @@ -2744,7 +2737,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, ibdev_dbg(to_ibdev(cqp->dev), "CQP: CQPERRCODES error_code[x%08X]\n", error); - return IRDMA_ERR_CQP_COMPL_ERROR; + return -EIO; } if (newtail != tail) { /* SUCCESS */ @@ -2755,7 +2748,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, udelay(cqp->dev->hw_attrs.max_sleep_count); } - return IRDMA_ERR_TIMEOUT; + return -ETIMEDOUT; } /** @@ -2910,10 +2903,9 @@ static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx, * parses fpm query buffer and copy max_cnt and * size value of hmc objects in hmc_info */ -static enum irdma_status_code -irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, - struct irdma_hmc_info *hmc_info, - struct irdma_hmc_fpm_misc *hmc_fpm_misc) +static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, + struct irdma_hmc_info *hmc_info, + struct irdma_hmc_fpm_misc *hmc_fpm_misc) { struct irdma_hmc_obj_info *obj_info; u64 temp; @@ -2952,7 +2944,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, obj_info[IRDMA_HMC_IW_XFFL].size = 4; hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp); if (!hmc_fpm_misc->xf_block_size) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1); get_64bit_val(buf, 80, &temp); @@ -2961,7 +2953,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp); if (!hmc_fpm_misc->q1_block_size) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER); @@ -2985,7 +2977,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp); if (!hmc_fpm_misc->rrf_block_size && obj_info[IRDMA_HMC_IW_RRFFL].max_cnt) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR); irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD); @@ -2997,7 +2989,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp); if (!hmc_fpm_misc->ooiscf_block_size && obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; return 0; } @@ -3025,8 +3017,7 @@ static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, * @ceq: ceq sc structure * @cq: cq sc structure */ -enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, - struct irdma_sc_cq *cq) +int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) { unsigned long flags; @@ -3034,7 +3025,7 @@ enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, if (ceq->reg_cq_size == ceq->elem_cnt) { spin_unlock_irqrestore(&ceq->req_cq_lock, flags); - return IRDMA_ERR_REG_CQ_FULL; + return -ENOMEM; } ceq->reg_cq[ceq->reg_cq_size++] = cq; @@ -3075,15 +3066,15 @@ exit: * * Initializes the object and context buffers for a control Queue Pair. */ -enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, - struct irdma_cqp_init_info *info) +int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, + struct irdma_cqp_init_info *info) { u8 hw_sq_size; if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 || info->sq_size < IRDMA_CQP_SW_SQSIZE_4 || ((info->sq_size & (info->sq_size - 1)))) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size, IRDMA_QUEUE_TYPE_CQP); @@ -3133,13 +3124,12 @@ enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, * @maj_err: If error, major err number * @min_err: If error, minor err number */ -enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, - u16 *min_err) +int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) { u64 temp; u8 hw_rev; u32 cnt = 0, p1, p2, val = 0, err_code; - enum irdma_status_code ret_code; + int ret_code; hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev; cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size, @@ -3148,7 +3138,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er cqp->sdbuf.size, &cqp->sdbuf.pa, GFP_KERNEL); if (!cqp->sdbuf.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; spin_lock_init(&cqp->dev->cqp_lock); @@ -3203,7 +3193,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er do { if (cnt++ > cqp->dev->hw_attrs.max_done_count) { - ret_code = IRDMA_ERR_TIMEOUT; + ret_code = -ETIMEDOUT; goto err; } udelay(cqp->dev->hw_attrs.max_sleep_count); @@ -3211,7 +3201,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er } while (!val); if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) { - ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED; + ret_code = -EOPNOTSUPP; goto err; } @@ -3252,7 +3242,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch u32 *wqe_idx) { __le64 *wqe = NULL; - enum irdma_status_code ret_code; + int ret_code; if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) { ibdev_dbg(to_ibdev(cqp->dev), @@ -3279,16 +3269,16 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch * irdma_sc_cqp_destroy - destroy cqp during close * @cqp: struct for cqp hw */ -enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) +int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) { u32 cnt = 0, val; - enum irdma_status_code ret_code = 0; + int ret_code = 0; writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]); do { if (cnt++ > cqp->dev->hw_attrs.max_done_count) { - ret_code = IRDMA_ERR_TIMEOUT; + ret_code = -ETIMEDOUT; break; } udelay(cqp->dev->hw_attrs.max_sleep_count); @@ -3333,8 +3323,8 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) * @ccq: ccq sc struct * @info: completion q entry to return */ -enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, - struct irdma_ccq_cqe_info *info) +int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, + struct irdma_ccq_cqe_info *info) { u64 qp_ctx, temp, temp1; __le64 *cqe; @@ -3342,7 +3332,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, u32 wqe_idx; u32 error; u8 polarity; - enum irdma_status_code ret_code = 0; + int ret_code = 0; if (ccq->cq_uk.avoid_mem_cflct) cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk); @@ -3352,7 +3342,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, get_64bit_val(cqe, 24, &temp); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp); if (polarity != ccq->cq_uk.polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; get_64bit_val(cqe, 8, &qp_ctx); cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx; @@ -3399,25 +3389,25 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, * @op_code: cqp opcode for completion * @compl_info: completion q entry to return */ -enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, - struct irdma_ccq_cqe_info *compl_info) +int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, + struct irdma_ccq_cqe_info *compl_info) { struct irdma_ccq_cqe_info info = {}; struct irdma_sc_cq *ccq; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u32 cnt = 0; ccq = cqp->dev->ccq; while (1) { if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) - return IRDMA_ERR_TIMEOUT; + return -ETIMEDOUT; if (irdma_sc_ccq_get_cqe_info(ccq, &info)) { udelay(cqp->dev->hw_attrs.max_sleep_count); continue; } if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) { - ret_code = IRDMA_ERR_CQP_COMPL_ERROR; + ret_code = -EIO; break; } /* make sure op code matches*/ @@ -3441,17 +3431,16 @@ enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u * @info: info for the manage function table operation * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code -irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, - struct irdma_hmc_fcn_info *info, - u64 scratch, bool post_sq) +static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, + struct irdma_hmc_fcn_info *info, + u64 scratch, bool post_sq) { __le64 *wqe; u64 hdr; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, 0); set_64bit_val(wqe, 8, 0); @@ -3484,8 +3473,7 @@ irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, * for fpm commit * @cqp: struct for cqp hw */ -static enum irdma_status_code -irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) +static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL, NULL); @@ -3500,19 +3488,19 @@ irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) * @post_sq: flag for cqp db to ring * @wait_type: poll ccq or cqp registers for cqp completion */ -static enum irdma_status_code -irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, - struct irdma_dma_mem *commit_fpm_mem, bool post_sq, - u8 wait_type) +static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, + u8 hmc_fn_id, + struct irdma_dma_mem *commit_fpm_mem, + bool post_sq, u8 wait_type) { __le64 *wqe; u64 hdr; u32 tail, val, error; - enum irdma_status_code ret_code = 0; + int ret_code = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, hmc_fn_id); set_64bit_val(wqe, 32, commit_fpm_mem->pa); @@ -3546,8 +3534,7 @@ irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, * query fpm * @cqp: struct for cqp hw */ -static enum irdma_status_code -irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) +static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL, NULL); @@ -3562,19 +3549,19 @@ irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) * @post_sq: flag for cqp db to ring * @wait_type: poll ccq or cqp registers for cqp completion */ -static enum irdma_status_code -irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, - struct irdma_dma_mem *query_fpm_mem, bool post_sq, - u8 wait_type) +static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, + u8 hmc_fn_id, + struct irdma_dma_mem *query_fpm_mem, + bool post_sq, u8 wait_type) { __le64 *wqe; u64 hdr; u32 tail, val, error; - enum irdma_status_code ret_code = 0; + int ret_code = 0; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, hmc_fn_id); set_64bit_val(wqe, 32, query_fpm_mem->pa); @@ -3606,21 +3593,21 @@ irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, * @ceq: ceq sc structure * @info: ceq initialization info */ -enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, - struct irdma_ceq_init_info *info) +int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, + struct irdma_ceq_init_info *info) { u32 pble_obj_cnt; if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; - if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) - return IRDMA_ERR_INVALID_CEQ_ID; + if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs) + return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; ceq->size = sizeof(*ceq); ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base; @@ -3653,8 +3640,8 @@ enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, - bool post_sq) +static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, + bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; @@ -3663,7 +3650,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 cqp = ceq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, ceq->elem_cnt); set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); @@ -3695,8 +3682,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete * @ceq: ceq sc structure */ -static enum irdma_status_code -irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) +static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) { struct irdma_sc_cqp *cqp; @@ -3709,7 +3695,7 @@ irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete * @ceq: ceq sc structure */ -enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) +int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) { struct irdma_sc_cqp *cqp; @@ -3728,9 +3714,9 @@ enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) * @ceq: ceq sc structure * @scratch: u64 saved to be used during cqp completion */ -enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) +int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) { - enum irdma_status_code ret_code; + int ret_code; struct irdma_sc_dev *dev = ceq->dev; dev->ccq->vsi = ceq->vsi; @@ -3753,8 +3739,7 @@ enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratc * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, - bool post_sq) +int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; @@ -3763,7 +3748,7 @@ enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratc cqp = ceq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, ceq->elem_cnt); set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx); @@ -3882,19 +3867,19 @@ void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq) * @aeq: aeq structure ptr * @info: aeq initialization info */ -enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, - struct irdma_aeq_init_info *info) +int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, + struct irdma_aeq_init_info *info) { u32 pble_obj_cnt; if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; aeq->size = sizeof(*aeq); aeq->polarity = 1; @@ -3919,8 +3904,8 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, - u64 scratch, bool post_sq) +static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch, + bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -3929,7 +3914,7 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, cqp = aeq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, aeq->elem_cnt); set_64bit_val(wqe, 32, (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); @@ -3958,8 +3943,8 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, - u64 scratch, bool post_sq) +static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, + bool post_sq) { __le64 *wqe; struct irdma_sc_cqp *cqp; @@ -3972,7 +3957,7 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, aeq->elem_cnt); set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx); hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) | @@ -3995,8 +3980,8 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, * @aeq: aeq structure ptr * @info: aeqe info to be returned */ -enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, - struct irdma_aeqe_info *info) +int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, + struct irdma_aeqe_info *info) { u64 temp, compl_ctx; __le64 *aeqe; @@ -4010,7 +3995,7 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); if (aeq->polarity != polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, aeqe, 16, false); @@ -4155,22 +4140,21 @@ void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) * @cq: sc's cq ctruct * @info: info for control cq initialization */ -enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq, - struct irdma_ccq_init_info *info) +int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info) { u32 pble_obj_cnt; if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; - if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) - return IRDMA_ERR_INVALID_CEQ_ID; + if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs) + return -EINVAL; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) - return IRDMA_ERR_INVALID_PBLE_INDEX; + return -EINVAL; cq->cq_pa = info->cq_pa; cq->cq_uk.cq_base = info->cq_base; @@ -4207,7 +4191,7 @@ enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq, * irdma_sc_ccq_create_done - poll cqp for ccq create * @ccq: ccq sc struct */ -static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) +static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) { struct irdma_sc_cqp *cqp; @@ -4223,10 +4207,10 @@ static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq * @check_overflow: overlow flag for ccq * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, - bool check_overflow, bool post_sq) +int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, + bool check_overflow, bool post_sq) { - enum irdma_status_code ret_code; + int ret_code; ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq); if (ret_code) @@ -4248,19 +4232,18 @@ enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, * @scratch: u64 saved to be used during cqp completion * @post_sq: flag for cqp db to ring */ -enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, - bool post_sq) +int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u32 tail, val, error; cqp = ccq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1); @@ -4299,13 +4282,12 @@ enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch * @dev : ptr to irdma_dev struct * @hmc_fn_id: hmc function id */ -enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, - u8 hmc_fn_id) +int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id) { struct irdma_hmc_info *hmc_info; struct irdma_hmc_fpm_misc *hmc_fpm_misc; struct irdma_dma_mem query_fpm_mem; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u8 wait_type; hmc_info = dev->hmc_info; @@ -4336,14 +4318,13 @@ enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, * @dev : ptr to irdma_dev struct * @hmc_fn_id: hmc function id */ -static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, - u8 hmc_fn_id) +static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id) { struct irdma_hmc_info *hmc_info; struct irdma_hmc_obj_info *obj_info; __le64 *buf; struct irdma_dma_mem commit_fpm_mem; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u8 wait_type; hmc_info = dev->hmc_info; @@ -4406,9 +4387,8 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, * @info: sd info for wqe * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info, - u64 scratch) +static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, + struct irdma_update_sds_info *info, u64 scratch) { u64 data; u64 hdr; @@ -4420,7 +4400,7 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info, wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; wqe_entries = (info->cnt > 3) ? 3 : info->cnt; mem_entries = info->cnt - wqe_entries; @@ -4486,12 +4466,11 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info, * @info: sd info for sd's * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -irdma_update_pe_sds(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info, u64 scratch) +static int irdma_update_pe_sds(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info, u64 scratch) { struct irdma_sc_cqp *cqp = dev->cqp; - enum irdma_status_code ret_code; + int ret_code; ret_code = cqp_sds_wqe_fill(cqp, info, scratch); if (!ret_code) @@ -4505,13 +4484,12 @@ irdma_update_pe_sds(struct irdma_sc_dev *dev, * @dev: sc device struct * @info: sd info for sd's */ -enum irdma_status_code -irdma_update_sds_noccq(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info) +int irdma_update_sds_noccq(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info) { u32 error, val, tail; struct irdma_sc_cqp *cqp = dev->cqp; - enum irdma_status_code ret_code; + int ret_code; ret_code = cqp_sds_wqe_fill(cqp, info, 0); if (ret_code) @@ -4532,10 +4510,9 @@ irdma_update_sds_noccq(struct irdma_sc_dev *dev, * @post_sq: flag for cqp db to ring * @poll_registers: flag to poll register for cqp completion */ -enum irdma_status_code -irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, - u8 hmc_fn_id, bool post_sq, - bool poll_registers) +int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, + u8 hmc_fn_id, bool post_sq, + bool poll_registers) { u64 hdr; __le64 *wqe; @@ -4543,7 +4520,7 @@ irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id)); @@ -4618,8 +4595,7 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev, * irdma_sc_query_rdma_features_done - poll cqp for query features done * @cqp: struct for cqp hw */ -static enum irdma_status_code -irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp) +static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp) { return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_RDMA_FEATURES, @@ -4632,16 +4608,15 @@ irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp) * @buf: buffer to hold query info * @scratch: u64 saved to be used during cqp completion */ -static enum irdma_status_code -irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, - struct irdma_dma_mem *buf, u64 scratch) +static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, + struct irdma_dma_mem *buf, u64 scratch) { __le64 *wqe; u64 temp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; temp = buf->pa; set_64bit_val(wqe, 32, temp); @@ -4665,9 +4640,9 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, * irdma_get_rdma_features - get RDMA features * @dev: sc device struct */ -enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev) +int irdma_get_rdma_features(struct irdma_sc_dev *dev) { - enum irdma_status_code ret_code; + int ret_code; struct irdma_dma_mem feat_buf; u64 temp; u16 byte_idx, feat_type, feat_cnt, feat_idx; @@ -4677,7 +4652,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev) feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size, &feat_buf.pa, GFP_KERNEL); if (!feat_buf.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); if (!ret_code) @@ -4688,7 +4663,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev) get_64bit_val(feat_buf.va, 0, &temp); feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); if (feat_cnt < 2) { - ret_code = IRDMA_ERR_INVALID_FEAT_CNT; + ret_code = -EINVAL; goto exit; } else if (feat_cnt > IRDMA_MAX_FEATURES) { ibdev_dbg(to_ibdev(dev), @@ -4702,7 +4677,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev) feat_buf.size, &feat_buf.pa, GFP_KERNEL); if (!feat_buf.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); if (!ret_code) @@ -4713,7 +4688,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev) get_64bit_val(feat_buf.va, 0, &temp); feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp); if (feat_cnt < 2) { - ret_code = IRDMA_ERR_INVALID_FEAT_CNT; + ret_code = -EINVAL; goto exit; } } @@ -4792,7 +4767,7 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev, * @dev: sc device struct * @qp_count: desired qp count */ -enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) +int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) { struct irdma_virt_mem virt_mem; u32 i, mem_size; @@ -4803,7 +4778,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) u32 loop_count = 0; struct irdma_hmc_info *hmc_info; struct irdma_hmc_fpm_misc *hmc_fpm_misc; - enum irdma_status_code ret_code = 0; + int ret_code = 0; hmc_info = dev->hmc_info; hmc_fpm_misc = &dev->hmc_fpm_misc; @@ -4897,10 +4872,12 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) sd_diff = sd_needed - hmc_fpm_misc->max_sds; if (sd_diff > 128) { - if (qpwanted > 128 && sd_diff > 144) + if (!(loop_count % 2) && qpwanted > 128) { qpwanted /= 2; - mrwanted /= 2; - pblewanted /= 2; + } else { + mrwanted /= 2; + pblewanted /= 2; + } continue; } if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF && @@ -4930,7 +4907,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) ibdev_dbg(to_ibdev(dev), "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n", loop_count, sd_needed, hmc_info->sd_table.sd_cnt); - return IRDMA_ERR_CFG; + return -EINVAL; } if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) { @@ -4966,7 +4943,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) if (!virt_mem.va) { ibdev_dbg(to_ibdev(dev), "HMC: failed to allocate memory for sd_entry buffer\n"); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } hmc_info->sd_table.sd_entry = virt_mem.va; @@ -4978,10 +4955,10 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) * @dev: rdma device * @pcmdinfo: cqp command info */ -static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, - struct cqp_cmds_info *pcmdinfo) +static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) { - enum irdma_status_code status; + int status; struct irdma_dma_mem val_mem; bool alloc = false; @@ -5243,7 +5220,7 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, pcmdinfo->in.u.mc_modify.scratch); break; default: - status = IRDMA_NOT_SUPPORTED; + status = -EOPNOTSUPP; break; } @@ -5255,10 +5232,10 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, * @dev: sc device struct * @pcmdinfo: cqp command info */ -enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev, - struct cqp_cmds_info *pcmdinfo) +int irdma_process_cqp_cmd(struct irdma_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) { - enum irdma_status_code status = 0; + int status = 0; unsigned long flags; spin_lock_irqsave(&dev->cqp_lock, flags); @@ -5274,9 +5251,9 @@ enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev, * irdma_process_bh - called from tasklet for cqp list * @dev: sc device struct */ -enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev) +int irdma_process_bh(struct irdma_sc_dev *dev) { - enum irdma_status_code status = 0; + int status = 0; struct cqp_cmds_info *pcmdinfo; unsigned long flags; @@ -5364,12 +5341,11 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev) * @dev: Device pointer * @info: Device init info */ -enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver, - struct irdma_sc_dev *dev, - struct irdma_device_init_info *info) +int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev, + struct irdma_device_init_info *info) { u32 val; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u8 db_size; INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ @@ -5413,7 +5389,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver, irdma_sc_init_hw(dev); if (irdma_wait_pe_ready(dev)) - return IRDMA_ERR_TIMEOUT; + return -ETIMEDOUT; val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]); db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val); @@ -5421,7 +5397,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver, ibdev_dbg(to_ibdev(dev), "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n", val, db_size); - return IRDMA_ERR_PE_DOORBELL_NOT_ENA; + return -ENODEV; } dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET]; diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index cc3d9a365b35..c1906cab5c8a 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -314,6 +314,7 @@ enum irdma_cqp_op_type { #define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d #define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e #define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220 +#define IRDMA_AE_INVALID_REQUEST 0x0223 #define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 #define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 #define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 @@ -964,7 +965,7 @@ enum irdma_cqp_op_type { (_ring).head = ((_ring).head + 1) % size; \ (_retcode) = 0; \ } else { \ - (_retcode) = IRDMA_ERR_RING_FULL; \ + (_retcode) = -ENOMEM; \ } \ } #define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ @@ -975,7 +976,7 @@ enum irdma_cqp_op_type { (_ring).head = ((_ring).head + (_count)) % size; \ (_retcode) = 0; \ } else { \ - (_retcode) = IRDMA_ERR_RING_FULL; \ + (_retcode) = -ENOMEM; \ } \ } #define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \ @@ -986,7 +987,7 @@ enum irdma_cqp_op_type { (_ring).head = ((_ring).head + 1) % size; \ (_retcode) = 0; \ } else { \ - (_retcode) = IRDMA_ERR_RING_FULL; \ + (_retcode) = -ENOMEM; \ } \ } #define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ @@ -997,7 +998,7 @@ enum irdma_cqp_op_type { (_ring).head = ((_ring).head + (_count)) % size; \ (_retcode) = 0; \ } else { \ - (_retcode) = IRDMA_ERR_RING_FULL; \ + (_retcode) = -ENOMEM; \ } \ } #define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \ diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c index ecffcb93c05a..49307ce8c4da 100644 --- a/drivers/infiniband/hw/irdma/hmc.c +++ b/drivers/infiniband/hw/irdma/hmc.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" @@ -121,10 +120,8 @@ static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_i * @type: paged or direct sd * @setsd: flag to set or clear sd */ -enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, - u64 pa, u32 sd_idx, - enum irdma_sd_entry_type type, - bool setsd) +int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx, + enum irdma_sd_entry_type type, bool setsd) { struct irdma_update_sds_info sdinfo; @@ -145,16 +142,15 @@ enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, * @sd_cnt: number of sd entries * @setsd: flag to set or clear sd */ -static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev, - struct irdma_hmc_info *hmc_info, - u32 sd_index, u32 sd_cnt, - bool setsd) +static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev, + struct irdma_hmc_info *hmc_info, u32 sd_index, + u32 sd_cnt, bool setsd) { struct irdma_hmc_sd_entry *sd_entry; struct irdma_update_sds_info sdinfo = {}; u64 pa; u32 i; - enum irdma_status_code ret_code = 0; + int ret_code = 0; sdinfo.hmc_fn_id = hmc_info->hmc_fn_id; for (i = sd_index; i < sd_index + sd_cnt; i++) { @@ -196,16 +192,15 @@ static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev, * @dev: pointer to the device structure * @info: create obj info */ -static enum irdma_status_code -irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev, - struct irdma_hmc_create_obj_info *info) +static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev, + struct irdma_hmc_create_obj_info *info) { if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) - return IRDMA_ERR_INVALID_HMC_OBJ_INDEX; + return -EINVAL; if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) - return IRDMA_ERR_INVALID_HMC_OBJ_COUNT; + return -EINVAL; if (!info->add_sd_cnt) return 0; @@ -222,9 +217,8 @@ irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev, * This will allocate memory for PDs and backing pages and populate * the sd and pd entries. */ -enum irdma_status_code -irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, - struct irdma_hmc_create_obj_info *info) +int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, + struct irdma_hmc_create_obj_info *info) { struct irdma_hmc_sd_entry *sd_entry; u32 sd_idx, sd_lmt; @@ -232,10 +226,10 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, u32 pd_idx1 = 0, pd_lmt1 = 0; u32 i, j; bool pd_error = false; - enum irdma_status_code ret_code = 0; + int ret_code = 0; if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) - return IRDMA_ERR_INVALID_HMC_OBJ_INDEX; + return -EINVAL; if ((info->start_idx + info->count) > info->hmc_info->hmc_obj[info->rsrc_type].cnt) { @@ -243,7 +237,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n", info->rsrc_type, info->start_idx, info->count, info->hmc_info->hmc_obj[info->rsrc_type].cnt); - return IRDMA_ERR_INVALID_HMC_OBJ_COUNT; + return -EINVAL; } irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type, @@ -251,7 +245,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, &sd_lmt); if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { - return IRDMA_ERR_INVALID_SD_INDEX; + return -EINVAL; } irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type, @@ -312,7 +306,7 @@ exit_sd_error: irdma_prep_remove_pd_page(info->hmc_info, (j - 1)); break; default: - ret_code = IRDMA_ERR_INVALID_SD_TYPE; + ret_code = -EINVAL; break; } j--; @@ -327,12 +321,12 @@ exit_sd_error: * @info: dele obj info * @reset: true if called before reset */ -static enum irdma_status_code -irdma_finish_del_sd_reg(struct irdma_sc_dev *dev, - struct irdma_hmc_del_obj_info *info, bool reset) +static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev, + struct irdma_hmc_del_obj_info *info, + bool reset) { struct irdma_hmc_sd_entry *sd_entry; - enum irdma_status_code ret_code = 0; + int ret_code = 0; u32 i, sd_idx; struct irdma_dma_mem *mem; @@ -373,22 +367,21 @@ irdma_finish_del_sd_reg(struct irdma_sc_dev *dev, * caller should deallocate memory allocated previously for * book-keeping information about PDs and backing storage. */ -enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, - struct irdma_hmc_del_obj_info *info, - bool reset) +int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, + struct irdma_hmc_del_obj_info *info, bool reset) { struct irdma_hmc_pd_table *pd_table; u32 sd_idx, sd_lmt; u32 pd_idx, pd_lmt, rel_pd_idx; u32 i, j; - enum irdma_status_code ret_code = 0; + int ret_code = 0; if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { ibdev_dbg(to_ibdev(dev), "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n", info->start_idx, info->rsrc_type, info->hmc_info->hmc_obj[info->rsrc_type].cnt); - return IRDMA_ERR_INVALID_HMC_OBJ_INDEX; + return -EINVAL; } if ((info->start_idx + info->count) > @@ -397,7 +390,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n", info->start_idx, info->count, info->rsrc_type, info->hmc_info->hmc_obj[info->rsrc_type].cnt); - return IRDMA_ERR_INVALID_HMC_OBJ_COUNT; + return -EINVAL; } irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type, @@ -433,7 +426,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, if (sd_idx >= info->hmc_info->sd_table.sd_cnt || sd_lmt > info->hmc_info->sd_table.sd_cnt) { ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n"); - return IRDMA_ERR_INVALID_SD_INDEX; + return -EINVAL; } for (i = sd_idx; i < sd_lmt; i++) { @@ -477,11 +470,9 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, * @type: what type of segment descriptor we're manipulating * @direct_mode_sz: size to alloc in direct mode */ -enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, - struct irdma_hmc_info *hmc_info, - u32 sd_index, - enum irdma_sd_entry_type type, - u64 direct_mode_sz) +int irdma_add_sd_table_entry(struct irdma_hw *hw, + struct irdma_hmc_info *hmc_info, u32 sd_index, + enum irdma_sd_entry_type type, u64 direct_mode_sz) { struct irdma_hmc_sd_entry *sd_entry; struct irdma_dma_mem dma_mem; @@ -499,7 +490,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, &dma_mem.pa, GFP_KERNEL); if (!dma_mem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; if (type == IRDMA_SD_TYPE_PAGED) { struct irdma_virt_mem *vmem = &sd_entry->u.pd_table.pd_entry_virt_mem; @@ -510,7 +501,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, dma_free_coherent(hw->device, dma_mem.size, dma_mem.va, dma_mem.pa); dma_mem.va = NULL; - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } sd_entry->u.pd_table.pd_entry = vmem->va; @@ -549,10 +540,9 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, * aligned on 4K boundary and zeroed memory. * 2. It should be 4K in size. */ -enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, - struct irdma_hmc_info *hmc_info, - u32 pd_index, - struct irdma_dma_mem *rsrc_pg) +int irdma_add_pd_table_entry(struct irdma_sc_dev *dev, + struct irdma_hmc_info *hmc_info, u32 pd_index, + struct irdma_dma_mem *rsrc_pg) { struct irdma_hmc_pd_table *pd_table; struct irdma_hmc_pd_entry *pd_entry; @@ -563,7 +553,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, u64 page_desc; if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) - return IRDMA_ERR_INVALID_PAGE_DESC_INDEX; + return -EINVAL; sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD); if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != @@ -584,7 +574,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, page->size, &page->pa, GFP_KERNEL); if (!page->va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; pd_entry->rsrc_pg = false; } @@ -621,9 +611,8 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, * 1. Caller can deallocate the memory used by backing storage after this * function returns. */ -enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, - struct irdma_hmc_info *hmc_info, - u32 idx) +int irdma_remove_pd_bp(struct irdma_sc_dev *dev, + struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_pd_entry *pd_entry; struct irdma_hmc_pd_table *pd_table; @@ -635,11 +624,11 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD; rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD; if (sd_idx >= hmc_info->sd_table.sd_cnt) - return IRDMA_ERR_INVALID_PAGE_DESC_INDEX; + return -EINVAL; sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED) - return IRDMA_ERR_INVALID_SD_TYPE; + return -EINVAL; pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; @@ -656,7 +645,7 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, if (!pd_entry->rsrc_pg) { mem = &pd_entry->bp.addr; if (!mem || !mem->va) - return IRDMA_ERR_PARAM; + return -EINVAL; dma_free_coherent(dev->hw->device, mem->size, mem->va, mem->pa); @@ -673,14 +662,13 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index */ -enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, - u32 idx) +int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (--sd_entry->u.bp.use_cnt) - return IRDMA_ERR_NOT_READY; + return -EBUSY; hmc_info->sd_table.use_cnt--; sd_entry->valid = false; @@ -693,15 +681,14 @@ enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, * @hmc_info: pointer to the HMC configuration information structure * @idx: segment descriptor index to find the relevant page descriptor */ -enum irdma_status_code -irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx) +int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx) { struct irdma_hmc_sd_entry *sd_entry; sd_entry = &hmc_info->sd_table.sd_entry[idx]; if (sd_entry->u.pd_table.use_cnt) - return IRDMA_ERR_NOT_READY; + return -EBUSY; sd_entry->valid = false; hmc_info->sd_table.use_cnt--; diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h index e2139c788b1b..f5c5dacc7021 100644 --- a/drivers/infiniband/hw/irdma/hmc.h +++ b/drivers/infiniband/hw/irdma/hmc.h @@ -141,40 +141,29 @@ struct irdma_hmc_del_obj_info { bool privileged; }; -enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf, - struct irdma_dma_mem *src_mem, - u64 src_offset, u64 size); -enum irdma_status_code -irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, - struct irdma_hmc_create_obj_info *info); -enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, - struct irdma_hmc_del_obj_info *info, - bool reset); -enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, - u64 pa, u32 sd_idx, - enum irdma_sd_entry_type type, - bool setsd); -enum irdma_status_code -irdma_update_sds_noccq(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info); +int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf, + struct irdma_dma_mem *src_mem, u64 src_offset, u64 size); +int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, + struct irdma_hmc_create_obj_info *info); +int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, + struct irdma_hmc_del_obj_info *info, bool reset); +int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx, + enum irdma_sd_entry_type type, + bool setsd); +int irdma_update_sds_noccq(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info); struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id); struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id); -enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, - struct irdma_hmc_info *hmc_info, - u32 sd_index, - enum irdma_sd_entry_type type, - u64 direct_mode_sz); -enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, - struct irdma_hmc_info *hmc_info, - u32 pd_index, - struct irdma_dma_mem *rsrc_pg); -enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, - struct irdma_hmc_info *hmc_info, - u32 idx); -enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, - u32 idx); -enum irdma_status_code -irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx); +int irdma_add_sd_table_entry(struct irdma_hw *hw, + struct irdma_hmc_info *hmc_info, u32 sd_index, + enum irdma_sd_entry_type type, u64 direct_mode_sz); +int irdma_add_pd_table_entry(struct irdma_sc_dev *dev, + struct irdma_hmc_info *hmc_info, u32 pd_index, + struct irdma_dma_mem *rsrc_pg); +int irdma_remove_pd_bp(struct irdma_sc_dev *dev, + struct irdma_hmc_info *hmc_info, u32 idx); +int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx); +int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx); #endif /* IRDMA_HMC_H */ diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index b4c657f5f2f9..ab246447520b 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -61,7 +61,7 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) struct irdma_cq *cq = iwcq->back_cq; if (!cq->user_mode) - cq->armed = false; + atomic_set(&cq->armed, 0); if (cq->ibcq.comp_handler) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -75,12 +75,12 @@ static void irdma_puda_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) { struct irdma_sc_dev *dev = &rf->sc_dev; - enum irdma_status_code status; u32 compl_error; + int status; do { status = irdma_puda_poll_cmpl(dev, cq, &compl_error); - if (status == IRDMA_ERR_Q_EMPTY) + if (status == -ENOENT) break; if (status) { ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status); @@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; switch (info->ae_id) { - case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BOUNDS_VIOLATION: case IRDMA_AE_AMP_INVALID_STAG: - qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; - fallthrough; + case IRDMA_AE_AMP_RIGHTS_VIOLATION: + case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BAD_PD: - case IRDMA_AE_UDA_XMIT_BAD_PD: + case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_AMP_BAD_STAG_KEY: + case IRDMA_AE_AMP_BAD_STAG_INDEX: + case IRDMA_AE_AMP_TO_WRAP: + case IRDMA_AE_PRIV_OPERATION_DENIED: qp->flush_code = FLUSH_PROT_ERR; + qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; - case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_UDA_XMIT_BAD_PD: case IRDMA_AE_WQE_UNEXPECTED_OPCODE: qp->flush_code = FLUSH_LOC_QP_OP_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; + break; + case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: + case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: + case IRDMA_AE_UDA_L4LEN_INVALID: + case IRDMA_AE_DDP_UBE_INVALID_MO: + case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + qp->flush_code = FLUSH_LOC_LEN_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; - case IRDMA_AE_AMP_BAD_STAG_KEY: - case IRDMA_AE_AMP_BAD_STAG_INDEX: - case IRDMA_AE_AMP_TO_WRAP: - case IRDMA_AE_AMP_RIGHTS_VIOLATION: case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: - case IRDMA_AE_PRIV_OPERATION_DENIED: - case IRDMA_AE_IB_INVALID_REQUEST: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: - case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: - case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: - case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: - case IRDMA_AE_UDA_L4LEN_INVALID: + case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: - qp->flush_code = FLUSH_LOC_LEN_ERR; + case IRDMA_AE_IB_REMOTE_OP_ERROR: + qp->flush_code = FLUSH_REM_OP_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_LCE_QP_CATASTROPHIC: qp->flush_code = FLUSH_FATAL_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; - case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_IB_RREQ_AND_Q1_FULL: - case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: qp->flush_code = FLUSH_GENERAL_ERR; break; case IRDMA_AE_LLP_TOO_MANY_RETRIES: qp->flush_code = FLUSH_RETRY_EXC_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: qp->flush_code = FLUSH_MW_BIND_ERR; + qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; - case IRDMA_AE_IB_REMOTE_OP_ERROR: - qp->flush_code = FLUSH_REM_OP_ERR; + case IRDMA_AE_IB_INVALID_REQUEST: + qp->flush_code = FLUSH_REM_INV_REQ_ERR; + qp->event_type = IRDMA_QP_EVENT_REQ_ERR; break; default: - qp->flush_code = FLUSH_FATAL_ERR; + qp->flush_code = FLUSH_GENERAL_ERR; + qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; } } @@ -257,10 +266,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf) iwqp->last_aeq = info->ae_id; spin_unlock_irqrestore(&iwqp->lock, flags); ctx_info = &iwqp->ctx_info; - if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) - ctx_info->roce_info->err_rq_idx_valid = true; - else - ctx_info->iwarp_info->err_rq_idx_valid = true; } else { if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR) continue; @@ -370,16 +375,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf) case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: - if (rdma_protocol_roce(&iwdev->ibdev, 1)) - ctx_info->roce_info->err_rq_idx_valid = false; - else - ctx_info->iwarp_info->err_rq_idx_valid = false; - fallthrough; default: - ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n", - info->ae_id, info->qp, info->qp_cq_id); + ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n", + info->ae_id, info->qp, info->qp_cq_id, info->ae_src); if (rdma_protocol_roce(&iwdev->ibdev, 1)) { - if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) { + ctx_info->roce_info->err_rq_idx_valid = info->rq; + if (info->rq) { ctx_info->roce_info->err_rq_idx = info->wqe_idx; irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); @@ -388,7 +389,8 @@ static void irdma_process_aeq(struct irdma_pci_f *rf) irdma_cm_disconn(iwqp); break; } - if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) { + ctx_info->iwarp_info->err_rq_idx_valid = info->rq; + if (info->rq) { ctx_info->iwarp_info->err_rq_idx = info->wqe_idx; ctx_info->tcp_info_valid = false; ctx_info->iwarp_info_valid = true; @@ -456,7 +458,7 @@ static void irdma_ceq_dpc(struct tasklet_struct *t) * Allocate iwdev msix table and copy the msix info to the table * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf) +static int irdma_save_msix_info(struct irdma_pci_f *rf) { struct irdma_qvlist_info *iw_qvlist; struct irdma_qv_info *iw_qvinfo; @@ -466,13 +468,13 @@ static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf) size_t size; if (!rf->msix_count) - return IRDMA_ERR_NO_INTR; + return -EINVAL; size = sizeof(struct irdma_msix_vector) * rf->msix_count; size += struct_size(iw_qvlist, qv_info, rf->msix_count); rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); if (!rf->iw_msixtbl) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; rf->iw_qvlist = (struct irdma_qvlist_info *) (&rf->iw_msixtbl[rf->msix_count]); @@ -550,7 +552,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, struct irdma_sc_dev *dev = &rf->sc_dev; dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); - irq_set_affinity_hint(msix_vec->irq, NULL); + irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); } @@ -564,9 +566,9 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, */ static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp) { - enum irdma_status_code status = 0; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cqp *cqp = &rf->cqp; + int status = 0; if (rf->cqp_cmpl_wq) destroy_workqueue(rf->cqp_cmpl_wq); @@ -606,9 +608,9 @@ static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) */ static void irdma_destroy_aeq(struct irdma_pci_f *rf) { - enum irdma_status_code status = IRDMA_ERR_NOT_READY; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; + int status = -EBUSY; if (!rf->msix_shared) { rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); @@ -642,8 +644,8 @@ exit: */ static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) { - enum irdma_status_code status; struct irdma_sc_dev *dev = &rf->sc_dev; + int status; if (rf->reset) goto exit; @@ -733,7 +735,7 @@ static void irdma_destroy_ccq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_ccq *ccq = &rf->ccq; - enum irdma_status_code status = 0; + int status = 0; if (!rf->reset) status = irdma_sc_ccq_destroy(dev->ccq, 0, true); @@ -796,9 +798,8 @@ static void irdma_del_hmc_objects(struct irdma_sc_dev *dev, * @dev: hardware control device structure * @info: information for the hmc object to create */ -static enum irdma_status_code -irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, - struct irdma_hmc_create_obj_info *info) +static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, + struct irdma_hmc_create_obj_info *info) { return irdma_sc_create_hmc_obj(dev, info); } @@ -812,13 +813,12 @@ irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, * Create the device hmc objects and allocate hmc pages * Return 0 if successful, otherwise clean up and return error */ -static enum irdma_status_code -irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers) +static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, + enum irdma_vers vers) { struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_hmc_create_obj_info info = {}; - enum irdma_status_code status = 0; - int i; + int i, status = 0; info.hmc_info = dev->hmc_info; info.privileged = privileged; @@ -868,9 +868,9 @@ irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers v * update the memptr to point to the new aligned memory * Return 0 if successful, otherwise return no memory error */ -static enum irdma_status_code -irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, - u32 size, u32 mask) +static int irdma_obj_aligned_mem(struct irdma_pci_f *rf, + struct irdma_dma_mem *memptr, u32 size, + u32 mask) { unsigned long va, newva; unsigned long extra; @@ -884,7 +884,7 @@ irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, memptr->pa = rf->obj_next.pa + extra; memptr->size = size; if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; rf->obj_next.va = (u8 *)memptr->va + size; rf->obj_next.pa = memptr->pa + size; @@ -899,25 +899,24 @@ irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, * Return 0, if the cqp and all the resources associated with it * are successfully created, otherwise return error */ -static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf) +static int irdma_create_cqp(struct irdma_pci_f *rf) { - enum irdma_status_code status; u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; struct irdma_dma_mem mem; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cqp_init_info cqp_init_info = {}; struct irdma_cqp *cqp = &rf->cqp; u16 maj_err, min_err; - int i; + int i, status; cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); if (!cqp->cqp_requests) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); if (!cqp->scratch_array) { kfree(cqp->cqp_requests); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } dev->cqp = &cqp->sc_cqp; @@ -929,7 +928,7 @@ static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf) if (!cqp->sq.va) { kfree(cqp->scratch_array); kfree(cqp->cqp_requests); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), @@ -999,12 +998,12 @@ exit: * Return 0, if the ccq and the resources associated with it * are successfully created, otherwise return error */ -static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf) +static int irdma_create_ccq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; - enum irdma_status_code status; struct irdma_ccq_init_info info = {}; struct irdma_ccq *ccq = &rf->ccq; + int status; dev->ccq = &ccq->sc_cq; dev->ccq->dev = dev; @@ -1015,7 +1014,7 @@ static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf) ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size, &ccq->mem_cq.pa, GFP_KERNEL); if (!ccq->mem_cq.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, ccq->shadow_area.size, @@ -1054,9 +1053,9 @@ exit: * Allocate a mac ip entry and add it to the hw table Return 0 * if successful, otherwise return error */ -static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev) +static int irdma_alloc_set_mac(struct irdma_device *iwdev) { - enum irdma_status_code status; + int status; status = irdma_alloc_local_mac_entry(iwdev->rf, &iwdev->mac_ip_table_idx); @@ -1082,9 +1081,8 @@ static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev) * Allocate interrupt resources and enable irq handling * Return 0 if successful, otherwise return error */ -static enum irdma_status_code -irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, - u32 ceq_id, struct irdma_msix_vector *msix_vec) +static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, + u32 ceq_id, struct irdma_msix_vector *msix_vec) { int status; @@ -1100,10 +1098,10 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, } cpumask_clear(&msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); - irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); - return IRDMA_ERR_CFG; + return status; } msix_vec->ceq_id = ceq_id; @@ -1119,7 +1117,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, * Allocate interrupt resources and enable irq handling * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf) +static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) { struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; u32 ret = 0; @@ -1131,7 +1129,7 @@ static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf) } if (ret) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); - return IRDMA_ERR_CFG; + return -EINVAL; } rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); @@ -1149,12 +1147,10 @@ static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf) * Return 0, if the ceq and the resources associated with it * are successfully created, otherwise return error */ -static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf, - struct irdma_ceq *iwceq, - u32 ceq_id, - struct irdma_sc_vsi *vsi) +static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, + u32 ceq_id, struct irdma_sc_vsi *vsi) { - enum irdma_status_code status; + int status; struct irdma_ceq_init_info info = {}; struct irdma_sc_dev *dev = &rf->sc_dev; u64 scratch; @@ -1169,7 +1165,7 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf, iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size, &iwceq->mem.pa, GFP_KERNEL); if (!iwceq->mem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; info.ceq_id = ceq_id; info.ceqe_base = iwceq->mem.va; @@ -1205,18 +1201,18 @@ static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf, * Create the ceq 0 and configure it's msix interrupt vector * Return 0, if successfully set up, otherwise return error */ -static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf) +static int irdma_setup_ceq_0(struct irdma_pci_f *rf) { struct irdma_ceq *iwceq; struct irdma_msix_vector *msix_vec; u32 i; - enum irdma_status_code status = 0; + int status = 0; u32 num_ceqs; num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); if (!rf->ceqlist) { - status = IRDMA_ERR_NO_MEMORY; + status = -ENOMEM; goto exit; } @@ -1262,14 +1258,13 @@ exit: * Create the ceq's and configure their msix interrupt vectors * Return 0, if ceqs are successfully set up, otherwise return error */ -static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf, - struct irdma_sc_vsi *vsi) +static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) { u32 i; u32 ceq_id; struct irdma_ceq *iwceq; struct irdma_msix_vector *msix_vec; - enum irdma_status_code status; + int status; u32 num_ceqs; num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); @@ -1303,22 +1298,21 @@ del_ceqs: return status; } -static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf, - u32 size) +static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) { - enum irdma_status_code status = IRDMA_ERR_NO_MEMORY; struct irdma_aeq *aeq = &rf->aeq; dma_addr_t *pg_arr; u32 pg_cnt; + int status; if (rf->rdma_ver < IRDMA_GEN_2) - return IRDMA_NOT_SUPPORTED; + return -EOPNOTSUPP; aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size; aeq->mem.va = vzalloc(aeq->mem.size); if (!aeq->mem.va) - return status; + return -ENOMEM; pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); @@ -1345,15 +1339,15 @@ static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf, * Return 0, if the aeq and the resources associated with it * are successfully created, otherwise return error */ -static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf) +static int irdma_create_aeq(struct irdma_pci_f *rf) { - enum irdma_status_code status; struct irdma_aeq_init_info info = {}; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; u32 aeq_size; u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; + int status; aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt + hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; @@ -1412,10 +1406,10 @@ err: * Create the aeq and configure its msix interrupt vector * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf) +static int irdma_setup_aeq(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; - enum irdma_status_code status; + int status; status = irdma_create_aeq(rf); if (status) @@ -1439,10 +1433,10 @@ static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf) * * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev) +static int irdma_initialize_ilq(struct irdma_device *iwdev) { struct irdma_puda_rsrc_info info = {}; - enum irdma_status_code status; + int status; info.type = IRDMA_PUDA_RSRC_TYPE_ILQ; info.cq_id = 1; @@ -1469,10 +1463,10 @@ static enum irdma_status_code irdma_initialize_ilq(struct irdma_device *iwdev) * * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_initialize_ieq(struct irdma_device *iwdev) +static int irdma_initialize_ieq(struct irdma_device *iwdev) { struct irdma_puda_rsrc_info info = {}; - enum irdma_status_code status; + int status; info.type = IRDMA_PUDA_RSRC_TYPE_IEQ; info.cq_id = 2; @@ -1515,15 +1509,12 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi) * the hmc objects and create the objects * Return 0 if successful, otherwise return error */ -static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf) +static int irdma_hmc_setup(struct irdma_pci_f *rf) { - enum irdma_status_code status; + int status; u32 qpcnt; - if (rf->rdma_ver == IRDMA_GEN_1) - qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2; - else - qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; + qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; rf->sd_type = IRDMA_SD_TYPE_DIRECT; status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); @@ -1551,7 +1542,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf) rf->obj_mem.pa); rf->obj_mem.va = NULL; if (rf->rdma_ver != IRDMA_GEN_1) { - kfree(rf->allocated_ws_nodes); + bitmap_free(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; } kfree(rf->ceqlist); @@ -1570,9 +1561,9 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf) * Return 0 if successful, otherwise clean up the resources * and return error */ -static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) +static int irdma_initialize_dev(struct irdma_pci_f *rf) { - enum irdma_status_code status; + int status; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_device_init_info info = {}; struct irdma_dma_mem mem; @@ -1584,7 +1575,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); if (!rf->hmc_info_mem) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; dev->hmc_info = &rf->hw.hmc; @@ -1608,7 +1599,7 @@ static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) info.fpm_commit_buf = mem.va; info.bar0 = rf->hw.hw_addr; - info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn); + info.hmc_fn_id = rf->pf_id; info.hw = &rf->hw; status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); if (status) @@ -1667,9 +1658,9 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev) destroy_workqueue(iwdev->cleanup_wq); } -static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf) +static int irdma_setup_init_state(struct irdma_pci_f *rf) { - enum irdma_status_code status; + int status; status = irdma_save_msix_info(rf); if (status) @@ -1680,7 +1671,7 @@ static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf) rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, &rf->obj_mem.pa, GFP_KERNEL); if (!rf->obj_mem.va) { - status = IRDMA_ERR_NO_MEMORY; + status = -ENOMEM; goto clean_msixtbl; } @@ -1709,14 +1700,14 @@ clean_msixtbl: */ static void irdma_get_used_rsrc(struct irdma_device *iwdev) { - iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, - iwdev->rf->max_pd, 0); - iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, - iwdev->rf->max_qp, 0); - iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, - iwdev->rf->max_cq, 0); - iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, - iwdev->rf->max_mr, 0); + iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, + iwdev->rf->max_pd); + iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, + iwdev->rf->max_qp); + iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, + iwdev->rf->max_cq); + iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, + iwdev->rf->max_mr); } void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) @@ -1763,14 +1754,14 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma * device resource objects. */ -enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, - struct irdma_l2params *l2params) +int irdma_rt_init_hw(struct irdma_device *iwdev, + struct irdma_l2params *l2params) { struct irdma_pci_f *rf = iwdev->rf; struct irdma_sc_dev *dev = &rf->sc_dev; - enum irdma_status_code status; struct irdma_vsi_init_info vsi_info = {}; struct irdma_vsi_stats_info stats_info = {}; + int status; vsi_info.dev = dev; vsi_info.back_vsi = iwdev; @@ -1788,7 +1779,7 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); if (!stats_info.pestat) { irdma_cleanup_cm_core(&iwdev->cm_core); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } stats_info.fcn_id = dev->hmc_fn_id; status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info); @@ -1835,10 +1826,6 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, rf->rsrc_created = true; } - iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | - IB_DEVICE_MEM_WINDOW | - IB_DEVICE_MEM_MGT_EXTENSIONS; - if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) irdma_alloc_set_mac(iwdev); irdma_add_ip(iwdev); @@ -1850,7 +1837,7 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); if (!iwdev->cleanup_wq) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; irdma_get_used_rsrc(iwdev); init_waitqueue_head(&iwdev->suspend_wq); @@ -1870,10 +1857,10 @@ enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, * * Create admin queues, HMC obejcts and RF resource objects */ -enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) +int irdma_ctrl_init_hw(struct irdma_pci_f *rf) { struct irdma_sc_dev *dev = &rf->sc_dev; - enum irdma_status_code status; + int status; do { status = irdma_setup_init_state(rf); if (status) @@ -1915,7 +1902,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI | WQ_UNBOUND); if (!rf->cqp_cmpl_wq) { - status = IRDMA_ERR_NO_MEMORY; + status = -ENOMEM; break; } INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); @@ -1984,9 +1971,8 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) u32 ret; if (rf->rdma_ver != IRDMA_GEN_1) { - rf->allocated_ws_nodes = - kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES), - sizeof(unsigned long), GFP_KERNEL); + rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES, + GFP_KERNEL); if (!rf->allocated_ws_nodes) return -ENOMEM; @@ -2035,7 +2021,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) return 0; mem_rsrc_kzalloc_fail: - kfree(rf->allocated_ws_nodes); + bitmap_free(rf->allocated_ws_nodes); rf->allocated_ws_nodes = NULL; return ret; @@ -2202,11 +2188,11 @@ int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 id struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; @@ -2238,11 +2224,11 @@ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status = 0; + int status = 0; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY; @@ -2264,18 +2250,17 @@ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) * @accel_local_port: port for apbvt * @add_port: add ordelete port */ -static enum irdma_status_code -irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, u16 accel_local_port, - bool add_port) +static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, + u16 accel_local_port, bool add_port) { struct irdma_apbvt_info *info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_apbvt_entry.info; @@ -2429,22 +2414,21 @@ static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request) * @cmnode: cmnode associated with connection * @wait: wait for completion */ -enum irdma_status_code -irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, - enum irdma_quad_entry_type etype, - enum irdma_quad_hash_manage_type mtype, void *cmnode, - bool wait) +int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, + enum irdma_quad_entry_type etype, + enum irdma_quad_hash_manage_type mtype, void *cmnode, + bool wait) { struct irdma_qhash_table_info *info; - enum irdma_status_code status; struct irdma_cqp *iwcqp = &iwdev->rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_cm_node *cm_node = cmnode; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_qhash_table_entry.info; @@ -2558,12 +2542,10 @@ static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request) * @info: info for flush * @wait: flag wait for completion */ -enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, - struct irdma_sc_qp *qp, - struct irdma_qp_flush_info *info, - bool wait) +int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, + struct irdma_qp_flush_info *info, bool wait) { - enum irdma_status_code status; + int status; struct irdma_qp_flush_info *hw_info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; @@ -2571,7 +2553,7 @@ enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; if (!wait) @@ -2619,7 +2601,7 @@ enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, info->sq = true; new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!new_req) { - status = IRDMA_ERR_NO_MEMORY; + status = -ENOMEM; goto put_cqp; } cqp_info = &new_req->info; @@ -2705,24 +2687,29 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask) info.sq = flush_mask & IRDMA_FLUSH_SQ; info.rq = flush_mask & IRDMA_FLUSH_RQ; - if (flush_mask & IRDMA_REFLUSH) { - if (info.sq) - iwqp->sc_qp.flush_sq = false; - if (info.rq) - iwqp->sc_qp.flush_rq = false; - } - /* Generate userflush errors in CQE */ info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR; info.sq_minor_code = FLUSH_GENERAL_ERR; info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR; info.rq_minor_code = FLUSH_GENERAL_ERR; info.userflushcode = true; - if (flush_code) { - if (info.sq && iwqp->sc_qp.sq_flush_code) - info.sq_minor_code = flush_code; - if (info.rq && iwqp->sc_qp.rq_flush_code) - info.rq_minor_code = flush_code; + + if (flush_mask & IRDMA_REFLUSH) { + if (info.sq) + iwqp->sc_qp.flush_sq = false; + if (info.rq) + iwqp->sc_qp.flush_rq = false; + } else { + if (flush_code) { + if (info.sq && iwqp->sc_qp.sq_flush_code) + info.sq_minor_code = flush_code; + if (info.rq && iwqp->sc_qp.rq_flush_code) + info.rq_minor_code = flush_code; + } + if (!iwqp->user_mode) + queue_delayed_work(iwqp->iwdev->cleanup_wq, + &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } /* Issue flush */ diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c index 64148ad8a604..50299f58b6b3 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.c +++ b/drivers/infiniband/hw/irdma/i40iw_hw.c @@ -3,7 +3,6 @@ #include "osdep.h" #include "type.h" #include "i40iw_hw.h" -#include "status.h" #include "protos.h" static u32 i40iw_regs[IRDMA_MAX_REGS] = { @@ -202,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c index d219f64b2c3d..4053ead32416 100644 --- a/drivers/infiniband/hw/irdma/i40iw_if.c +++ b/drivers/infiniband/hw/irdma/i40iw_if.c @@ -77,6 +77,7 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info rf->rdma_ver = IRDMA_GEN_1; rf->gen_ops.request_reset = i40iw_request_reset; rf->pcidev = cdev_info->pcidev; + rf->pf_id = cdev_info->fid; rf->hw.hw_addr = cdev_info->hw_addr; rf->cdev = cdev_info; rf->msix_count = cdev_info->msix_count; @@ -138,7 +139,7 @@ static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client) if (last_qset == IRDMA_NO_QSET) last_qset = qset; else if ((qset != last_qset) && (qset != IRDMA_NO_QSET)) - iwdev->dcb = true; + iwdev->dcb_vlan_mode = true; } if (irdma_rt_init_hw(iwdev, &l2params)) { @@ -198,7 +199,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev) aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; - return i40e_client_device_unregister(cdev_info); + i40e_client_device_unregister(cdev_info); } static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = { diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c index cf53b17510cd..5986fd906308 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.c +++ b/drivers/infiniband/hw/irdma/icrdma_hw.c @@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->irq_ops = &icrdma_irq_ops; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h index 46c12334c735..4789e85d717b 100644 --- a/drivers/infiniband/hw/irdma/irdma.h +++ b/drivers/infiniband/hw/irdma/irdma.h @@ -127,6 +127,7 @@ struct irdma_hw_attrs { u64 max_hw_outbound_msg_size; u64 max_hw_inbound_msg_size; u64 max_mr_size; + u64 page_size_cap; u32 min_hw_qp_id; u32 min_hw_aeq_size; u32 max_hw_aeq_size; diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 51a41359e0b4..514453777e07 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -79,6 +79,10 @@ static void irdma_fill_qos_info(struct irdma_l2params *l2params, } for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) l2params->up2tc[i] = qos_info->up2tc[i]; + if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) { + l2params->dscp_mode = true; + memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map)); + } } static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event) @@ -108,8 +112,9 @@ static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event l2params.tc_changed = true; ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n"); ice_get_qos_params(pf, &qos_info); - iwdev->dcb = qos_info.num_tc > 1; irdma_fill_qos_info(&l2params, &qos_info); + if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) + iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode; irdma_change_l2params(&iwdev->vsi, &l2params); } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) { ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n", @@ -157,8 +162,8 @@ static void irdma_request_reset(struct irdma_pci_f *rf) * @vsi: vsi structure * @tc_node: Traffic class node */ -static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi, - struct irdma_ws_node *tc_node) +static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi, + struct irdma_ws_node *tc_node) { struct irdma_device *iwdev = vsi->back_vsi; struct ice_pf *pf = iwdev->rf->cdev; @@ -171,7 +176,7 @@ static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi, ret = ice_add_rdma_qset(pf, &qset); if (ret) { ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n"); - return IRDMA_ERR_REG_QSET; + return ret; } tc_node->l2_sched_node_id = qset.teid; @@ -207,7 +212,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev) struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; - struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev); + struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev); irdma_ib_unregister_device(iwdev); ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); @@ -226,16 +231,18 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf rf->hw.hw_addr = pf->hw.hw_addr; rf->pcidev = pf->pdev; rf->msix_count = pf->num_rdma_msix; + rf->pf_id = pf->hw.pf_id; rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; rf->default_vsi.vsi_idx = vsi->vsi_num; - rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY; + rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? + IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; rf->rdma_ver = IRDMA_GEN_2; rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT; rf->rst_to = IRDMA_RST_TIMEOUT_HZ; rf->gen_ops.request_reset = irdma_request_reset; rf->limits_sel = 7; rf->iwdev = iwdev; - + mutex_init(&iwdev->ah_tbl_lock); iwdev->netdev = vsi->netdev; iwdev->vsi_num = vsi->vsi_num; iwdev->init_state = INITIAL_STATE; @@ -274,18 +281,19 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ irdma_fill_device_info(iwdev, pf, vsi); rf = iwdev->rf; - if (irdma_ctrl_init_hw(rf)) { - err = -EIO; + err = irdma_ctrl_init_hw(rf); + if (err) goto err_ctrl_init; - } l2params.mtu = iwdev->netdev->mtu; ice_get_qos_params(pf, &qos_info); irdma_fill_qos_info(&l2params, &qos_info); - if (irdma_rt_init_hw(iwdev, &l2params)) { - err = -EIO; + if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) + iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode; + + err = irdma_rt_init_hw(iwdev, &l2params); + if (err) goto err_rt_init; - } err = irdma_ib_register_device(iwdev); if (err) @@ -294,7 +302,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true); ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn)); - dev_set_drvdata(&aux_dev->dev, iwdev); + auxiliary_set_drvdata(aux_dev, iwdev); return 0; diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index cb218cab79ac..65e966ad3453 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -40,7 +40,6 @@ #include <rdma/ib_umem.h> #include <rdma/ib_cache.h> #include <rdma/uverbs_ioctl.h> -#include "status.h" #include "osdep.h" #include "defs.h" #include "hmc.h" @@ -86,7 +85,7 @@ extern struct auxiliary_driver i40iw_auxiliary_drv; #define IRDMA_NO_QSET 0xffff #define IW_CFG_FPM_QP_COUNT 32768 -#define IRDMA_MAX_PAGES_PER_FMR 512 +#define IRDMA_MAX_PAGES_PER_FMR 262144 #define IRDMA_MIN_PAGES_PER_FMR 1 #define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2 #define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3 @@ -242,8 +241,8 @@ struct irdma_qvlist_info { struct irdma_gen_ops { void (*request_reset)(struct irdma_pci_f *rf); - enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, - struct irdma_ws_node *tc_node); + int (*register_qset)(struct irdma_sc_vsi *vsi, + struct irdma_ws_node *tc_node); void (*unregister_qset)(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node); }; @@ -257,6 +256,7 @@ struct irdma_pci_f { u8 *mem_rsrc; u8 rdma_ver; u8 rst_to; + u8 pf_id; enum irdma_protocol_used protocol_used; u32 sd_type; u32 msix_count; @@ -332,11 +332,12 @@ struct irdma_device { struct workqueue_struct *cleanup_wq; struct irdma_sc_vsi vsi; struct irdma_cm_core cm_core; + DECLARE_HASHTABLE(ah_hash_tbl, 8); + struct mutex ah_tbl_lock; /* protect AH hash table access */ u32 roce_cwnd; u32 roce_ackcreds; u32 vendor_id; u32 vendor_part_id; - u32 device_cap_flags; u32 push_mode; u32 rcv_wnd; u16 mac_ip_table_idx; @@ -345,7 +346,7 @@ struct irdma_device { u8 iw_status; bool roce_mode:1; bool roce_dcqcn_en:1; - bool dcb:1; + bool dcb_vlan_mode:1; bool iw_ooo:1; enum init_completion_state init_state; @@ -457,10 +458,10 @@ static inline void irdma_free_rsrc(struct irdma_pci_f *rf, spin_unlock_irqrestore(&rf->rsrc_lock, flags); } -enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf); +int irdma_ctrl_init_hw(struct irdma_pci_f *rf); void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf); -enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, - struct irdma_l2params *l2params); +int irdma_rt_init_hw(struct irdma_device *iwdev, + struct irdma_l2params *l2params); void irdma_rt_deinit_hw(struct irdma_device *iwdev); void irdma_qp_add_ref(struct ib_qp *ibqp); void irdma_qp_rem_ref(struct ib_qp *ibqp); @@ -489,9 +490,8 @@ void irdma_cm_disconn(struct irdma_qp *qp); bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, u16 min_err_code); -enum irdma_status_code -irdma_handle_cqp_op(struct irdma_pci_f *rf, - struct irdma_cqp_request *cqp_request); +int irdma_handle_cqp_op(struct irdma_pci_f *rf, + struct irdma_cqp_request *cqp_request); int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); @@ -500,21 +500,17 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq); void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf); -enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev, - struct irdma_qp *iwqp, - struct irdma_modify_qp_info *info, - bool wait); -enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp, - bool suspend); -enum irdma_status_code -irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, - enum irdma_quad_entry_type etype, - enum irdma_quad_hash_manage_type mtype, void *cmnode, - bool wait); +int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, + struct irdma_modify_qp_info *info, bool wait); +int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend); +int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, + enum irdma_quad_entry_type etype, + enum irdma_quad_hash_manage_type mtype, void *cmnode, + bool wait); void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf); void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp); void irdma_free_qp_rsrc(struct irdma_qp *iwqp); -enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver); +int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver); void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core); void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, u8 term_len); @@ -523,10 +519,8 @@ int irdma_send_reset(struct irdma_cm_node *cm_node); struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core, u16 rem_port, u32 *rem_addr, u16 loc_port, u32 *loc_addr, u16 vlan_id); -enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, - struct irdma_sc_qp *qp, - struct irdma_qp_flush_info *info, - bool wait); +int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, + struct irdma_qp_flush_info *info, bool wait); void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, struct irdma_gen_ae_info *info, bool wait); void irdma_copy_ip_ntohl(u32 *dst, __be32 *src); diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h index 63d8bb3a6903..fc1ba2a3e6fb 100644 --- a/drivers/infiniband/hw/irdma/osdep.h +++ b/drivers/infiniband/hw/irdma/osdep.h @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/bitfield.h> +#include <linux/net/intel/iidc.h> #include <crypto/hash.h> #include <rdma/ib_verbs.h> @@ -42,32 +43,28 @@ enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev); bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev); void irdma_add_dev_ref(struct irdma_sc_dev *dev); void irdma_put_dev_ref(struct irdma_sc_dev *dev); -enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc, - void *addr, u32 len, u32 val); +int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, + u32 val); struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, struct irdma_puda_buf *buf); void irdma_send_ieq_ack(struct irdma_sc_qp *qp); void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum); void irdma_free_hash_desc(struct shash_desc *hash_desc); -enum irdma_status_code irdma_init_hash_desc(struct shash_desc **hash_desc); -enum irdma_status_code -irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, - struct irdma_puda_buf *buf); -enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info); -enum irdma_status_code -irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev, - struct irdma_hmc_fcn_info *hmcfcninfo, - u16 *pmf_idx); -enum irdma_status_code -irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id); -enum irdma_status_code -irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id); -enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev, - struct irdma_dma_mem *mem); +int irdma_init_hash_desc(struct shash_desc **hash_desc); +int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, + struct irdma_puda_buf *buf); +int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info); +int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev, + struct irdma_hmc_fcn_info *hmcfcninfo, + u16 *pmf_idx); +int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id); +int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id); +int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev, + struct irdma_dma_mem *mem); void *irdma_remove_cqp_head(struct irdma_sc_dev *dev); void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, u8 term_len); @@ -79,7 +76,7 @@ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi); void wr32(struct irdma_hw *hw, u32 reg, u32 val); u32 rd32(struct irdma_hw *hw, u32 reg); u64 rd64(struct irdma_hw *hw, u32 reg); -enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va, - dma_addr_t *pg_dma, u32 pg_cnt); +int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt); void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt); #endif /* IRDMA_OSDEP_H */ diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index fed49da770f3..cdc0b8a6ed48 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -1,15 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" #include "protos.h" #include "pble.h" -static enum irdma_status_code -add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc); +static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc); /** * irdma_destroy_pble_prm - destroy prm during module unload @@ -35,13 +33,12 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) * @dev: irdma_sc_dev struct * @pble_rsrc: pble resources */ -enum irdma_status_code -irdma_hmc_init_pble(struct irdma_sc_dev *dev, - struct irdma_hmc_pble_rsrc *pble_rsrc) +int irdma_hmc_init_pble(struct irdma_sc_dev *dev, + struct irdma_hmc_pble_rsrc *pble_rsrc) { struct irdma_hmc_info *hmc_info; u32 fpm_idx = 0; - enum irdma_status_code status = 0; + int status = 0; hmc_info = dev->hmc_info; pble_rsrc->dev = dev; @@ -60,7 +57,7 @@ irdma_hmc_init_pble(struct irdma_sc_dev *dev, INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); if (add_pble_prm(pble_rsrc)) { irdma_destroy_pble_prm(pble_rsrc); - status = IRDMA_ERR_NO_MEMORY; + status = -ENOMEM; } return status; @@ -84,12 +81,11 @@ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, * @pble_rsrc: pble resource ptr * @info: page info for sd */ -static enum irdma_status_code -add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_add_page_info *info) +static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_add_page_info *info) { struct irdma_sc_dev *dev = pble_rsrc->dev; - enum irdma_status_code ret_code = 0; + int ret_code = 0; struct sd_pd_idx *idx = &info->idx; struct irdma_chunk *chunk = info->chunk; struct irdma_hmc_info *hmc_info = info->hmc_info; @@ -137,9 +133,8 @@ static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr) * @pble_rsrc: pble resource management * @info: page info for sd */ -static enum irdma_status_code -add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_add_page_info *info) +static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_add_page_info *info) { struct irdma_sc_dev *dev = pble_rsrc->dev; u8 *addr; @@ -148,13 +143,13 @@ add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_hmc_sd_entry *sd_entry = info->sd_entry; struct irdma_hmc_info *hmc_info = info->hmc_info; struct irdma_chunk *chunk = info->chunk; - enum irdma_status_code status = 0; + int status = 0; u32 rel_pd_idx = info->idx.rel_pd_idx; u32 pd_idx = info->idx.pd_idx; u32 i; if (irdma_pble_get_paged_mem(chunk, info->pages)) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx, IRDMA_SD_TYPE_PAGED, @@ -207,8 +202,7 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev, * add_pble_prm - add a sd entry for pble resoure * @pble_rsrc: pble resource management */ -static enum irdma_status_code -add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) +static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) { struct irdma_sc_dev *dev = pble_rsrc->dev; struct irdma_hmc_sd_entry *sd_entry; @@ -216,22 +210,22 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) struct irdma_chunk *chunk; struct irdma_add_page_info info; struct sd_pd_idx *idx = &info.idx; - enum irdma_status_code ret_code = 0; + int ret_code = 0; enum irdma_sd_entry_type sd_entry_type; u64 sd_reg_val = 0; struct irdma_virt_mem chunkmem; u32 pages; if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; if (pble_rsrc->next_fpm_addr & 0xfff) - return IRDMA_ERR_INVALID_PAGE_DESC_INDEX; + return -EINVAL; chunkmem.size = sizeof(*chunk); chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL); if (!chunkmem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; chunk = chunkmem.va; chunk->chunkmem = chunkmem; @@ -337,9 +331,8 @@ static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc, * @pble_rsrc: pble resource management * @palloc: level 2 pble allocation */ -static enum irdma_status_code -get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc) +static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_pble_alloc *palloc) { u32 lf4k, lflast, total, i; u32 pblcnt = PBLE_PER_PAGE; @@ -347,7 +340,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_level2 *lvl2 = &palloc->level2; struct irdma_pble_info *root = &lvl2->root; struct irdma_pble_info *leaf; - enum irdma_status_code ret_code; + int ret_code; u64 fpm_addr; /* number of full 512 (4K) leafs) */ @@ -359,7 +352,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, lvl2->leafmem.size = (sizeof(*leaf) * total); lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL); if (!lvl2->leafmem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; lvl2->leaf = lvl2->leafmem.va; leaf = lvl2->leaf; @@ -368,7 +361,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, if (ret_code) { kfree(lvl2->leafmem.va); lvl2->leaf = NULL; - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } root->idx = fpm_to_idx(pble_rsrc, fpm_addr); @@ -397,7 +390,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, error: free_lvl2(pble_rsrc, palloc); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -405,11 +398,10 @@ error: * @pble_rsrc: pble resource management * @palloc: level 1 pble allocation */ -static enum irdma_status_code -get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc) +static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_pble_alloc *palloc) { - enum irdma_status_code ret_code; + int ret_code; u64 fpm_addr; struct irdma_pble_info *lvl1 = &palloc->level1; @@ -417,7 +409,7 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, palloc->total_cnt << 3, &lvl1->addr, &fpm_addr); if (ret_code) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; palloc->level = PBLE_LEVEL_1; lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr); @@ -433,11 +425,10 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, * @palloc: contains all inforamtion regarding pble (idx + pble addr) * @level1_only: flag for a level 1 PBLE */ -static enum irdma_status_code -get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc, bool level1_only) +static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_pble_alloc *palloc, bool level1_only) { - enum irdma_status_code status = 0; + int status = 0; status = get_lvl1_pble(pble_rsrc, palloc); if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) @@ -455,11 +446,11 @@ get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, * @pble_cnt: #of pbles requested * @level1_only: true if only pble level 1 to acquire */ -enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc, - u32 pble_cnt, bool level1_only) +int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_pble_alloc *palloc, u32 pble_cnt, + bool level1_only) { - enum irdma_status_code status = 0; + int status = 0; int max_sds = 0; int i; diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index aa20827dcc9d..29d295463559 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -69,7 +69,7 @@ struct irdma_add_page_info { struct irdma_chunk { struct list_head list; struct irdma_dma_info dmainfo; - void *bitmapbuf; + unsigned long *bitmapbuf; u32 sizeofbitmap; u64 size; @@ -108,20 +108,18 @@ struct irdma_hmc_pble_rsrc { }; void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc); -enum irdma_status_code -irdma_hmc_init_pble(struct irdma_sc_dev *dev, - struct irdma_hmc_pble_rsrc *pble_rsrc); +int irdma_hmc_init_pble(struct irdma_sc_dev *dev, + struct irdma_hmc_pble_rsrc *pble_rsrc); void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc); -enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, - struct irdma_pble_alloc *palloc, - u32 pble_cnt, bool level1_only); -enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, - struct irdma_chunk *pchunk); -enum irdma_status_code -irdma_prm_get_pbles(struct irdma_pble_prm *pprm, - struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, - u64 **vaddr, u64 *fpm_addr); +int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, + struct irdma_pble_alloc *palloc, u32 pble_cnt, + bool level1_only); +int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, + struct irdma_chunk *pchunk); +int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, + struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr); void irdma_prm_return_pbles(struct irdma_pble_prm *pprm, struct irdma_pble_chunkinfo *chunkinfo); void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, @@ -129,7 +127,6 @@ void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, unsigned long *flags); void irdma_pble_free_paged_mem(struct irdma_chunk *chunk); -enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk, - u32 pg_cnt); +int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt); void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk); #endif /* IRDMA_PBLE_H */ diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h index a17c0ffb0cc8..9b6e919ae2a9 100644 --- a/drivers/infiniband/hw/irdma/protos.h +++ b/drivers/infiniband/hw/irdma/protos.h @@ -12,58 +12,51 @@ #define CQP_TIMEOUT_THRESHOLD 500 /* init operations */ -enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver, - struct irdma_sc_dev *dev, - struct irdma_device_init_info *info); +int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev, + struct irdma_device_init_info *info); void irdma_sc_rt_init(struct irdma_sc_dev *dev); void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch); -enum irdma_status_code -irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, - struct irdma_fast_reg_stag_info *info, bool post_sq); +int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, + struct irdma_fast_reg_stag_info *info, + bool post_sq); /* HMC/FPM functions */ -enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, - u8 hmc_fn_id); +int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id); /* stats misc */ -enum irdma_status_code -irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, - struct irdma_vsi_pestat *pestat, bool wait); +int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, + struct irdma_vsi_pestat *pestat, bool wait); void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, struct irdma_vsi_pestat *pestat); void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats, struct irdma_dev_hw_stats *stats_values, u64 *hw_stats_regs_32, u64 *hw_stats_regs_64, u8 hw_rev); -enum irdma_status_code -irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, - struct irdma_ws_node_info *node_info); -enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_ceq *sc_ceq, u8 op); -enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_aeq *sc_aeq, u8 op); -enum irdma_status_code -irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, - struct irdma_stats_inst_info *stats_info); +int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, + struct irdma_ws_node_info *node_info); +int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, + u8 op); +int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq, + u8 op); +int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, + struct irdma_stats_inst_info *stats_info); u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev); void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id); void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, struct irdma_gather_stats *gather_stats, struct irdma_gather_stats *last_gather_stats); /* vsi functions */ -enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, - struct irdma_vsi_stats_info *info); +int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, + struct irdma_vsi_stats_info *info); void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi); void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, struct irdma_vsi_init_info *info); -enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, - struct irdma_sc_cq *cq); +int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq); void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq); /* misc L2 param change functions */ void irdma_change_l2params(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2params); void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend); -enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, - u8 cmd); +int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd); void irdma_qp_add_qos(struct irdma_sc_qp *qp); void irdma_qp_rem_qos(struct irdma_sc_qp *qp); struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head, @@ -81,31 +74,26 @@ void irdma_terminate_received(struct irdma_sc_qp *qp, /* misc */ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type); void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp); -enum irdma_status_code -irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, - u8 hmc_fn_id, bool post_sq, - bool poll_registers); -enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, - u32 qp_count); -enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev); +int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, + u8 hmc_fn_id, bool post_sq, + bool poll_registers); +int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count); +int irdma_get_rdma_features(struct irdma_sc_dev *dev); void free_sd_mem(struct irdma_sc_dev *dev); -enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev, - struct cqp_cmds_info *pcmdinfo); -enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev); -enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info); -enum irdma_status_code -irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id); -enum irdma_status_code -irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id); -enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev, - struct irdma_dma_mem *mem); -enum irdma_status_code -irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev, - struct irdma_hmc_fcn_info *hmcfcninfo, - u16 *pmf_idx); +int irdma_process_cqp_cmd(struct irdma_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo); +int irdma_process_bh(struct irdma_sc_dev *dev); +int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info); +int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id); +int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id); +int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev, + struct irdma_dma_mem *mem); +int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev, + struct irdma_hmc_fcn_info *hmcfcninfo, + u16 *pmf_idx); void irdma_add_dev_ref(struct irdma_sc_dev *dev); void irdma_put_dev_ref(struct irdma_sc_dev *dev); void *irdma_remove_cqp_head(struct irdma_sc_dev *dev); diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c index 58e7d875643b..4ec9639f1bdb 100644 --- a/drivers/infiniband/hw/irdma/puda.c +++ b/drivers/infiniband/hw/irdma/puda.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" @@ -114,8 +113,7 @@ static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx, * @rsrc: resource to use for buffer * @initial: flag if during init time */ -static enum irdma_status_code -irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) +static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) { u32 i; u32 invalid_cnt = rsrc->rxq_invalid_cnt; @@ -124,7 +122,7 @@ irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial) for (i = 0; i < invalid_cnt; i++) { buf = irdma_puda_get_bufpool(rsrc); if (!buf) - return IRDMA_ERR_list_empty; + return -ENOBUFS; irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial); rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); rsrc->rxq_invalid_cnt--; @@ -193,19 +191,16 @@ static void irdma_puda_dele_buf(struct irdma_sc_dev *dev, static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) { - __le64 *wqe = NULL; - enum irdma_status_code ret_code = 0; + int ret_code = 0; *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); if (!*wqe_idx) qp->swqe_polarity = !qp->swqe_polarity; IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code); if (ret_code) - return wqe; - - wqe = qp->sq_base[*wqe_idx].elem; + return NULL; - return wqe; + return qp->sq_base[*wqe_idx].elem; } /** @@ -213,8 +208,8 @@ static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp, * @cq: cq for poll * @info: info return for successful completion */ -static enum irdma_status_code -irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) +static int irdma_puda_poll_info(struct irdma_sc_cq *cq, + struct irdma_puda_cmpl_info *info) { struct irdma_cq_uk *cq_uk = &cq->cq_uk; u64 qword0, qword2, qword3, qword6; @@ -233,7 +228,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) get_64bit_val(cqe, 24, &qword3); valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3); if (valid_bit != cq_uk->polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); @@ -246,7 +241,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) if (!peek_head) polarity ^= 1; if (polarity != cq_uk->polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) @@ -267,7 +262,7 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3)); minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3)); info->compl_error = major_err << 16 | minor_err; - return IRDMA_ERR_CQ_COMPL_ERROR; + return -EIO; } get_64bit_val(cqe, 0, &qword0); @@ -319,14 +314,13 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) * @cq: cq getting interrupt * @compl_err: return any completion err */ -enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, - struct irdma_sc_cq *cq, - u32 *compl_err) +int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, + u32 *compl_err) { struct irdma_qp_uk *qp; struct irdma_cq_uk *cq_uk = &cq->cq_uk; struct irdma_puda_cmpl_info info = {}; - enum irdma_status_code ret = 0; + int ret = 0; struct irdma_puda_buf *buf; struct irdma_puda_rsrc *rsrc; u8 cq_type = cq->cq_type; @@ -337,24 +331,24 @@ enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, cq->vsi->ieq; } else { ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n"); - return IRDMA_ERR_BAD_PTR; + return -EINVAL; } ret = irdma_puda_poll_info(cq, &info); *compl_err = info.compl_error; - if (ret == IRDMA_ERR_Q_EMPTY) + if (ret == -ENOENT) return ret; if (ret) goto done; qp = info.qp; if (!qp || !rsrc) { - ret = IRDMA_ERR_BAD_PTR; + ret = -EFAULT; goto done; } if (qp->qp_id != rsrc->qp_id) { - ret = IRDMA_ERR_BAD_PTR; + ret = -EFAULT; goto done; } @@ -422,8 +416,7 @@ done: * @qp: puda qp for send * @info: buffer information for transmit */ -enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp, - struct irdma_puda_send_info *info) +int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info) { __le64 *wqe; u32 iplen, l4len; @@ -443,7 +436,7 @@ enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp, wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; /* Third line of WQE descriptor */ @@ -503,7 +496,7 @@ void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, struct irdma_puda_buf *buf) { struct irdma_puda_send_info info; - enum irdma_status_code ret = 0; + int ret = 0; unsigned long flags; spin_lock_irqsave(&rsrc->bufpool_lock, flags); @@ -603,19 +596,18 @@ static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc) * @dev: Device * @qp: Resource qp */ -static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev, - struct irdma_sc_qp *qp) +static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; struct irdma_ccq_cqe_info compl_info; - enum irdma_status_code status = 0; + int status = 0; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 40, qp->shadow_area_pa); @@ -643,11 +635,11 @@ static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev, * irdma_puda_qp_create - create qp for resource * @rsrc: resource to use for buffer */ -static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) +static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) { struct irdma_sc_qp *qp = &rsrc->qp; struct irdma_qp_uk *ukqp = &qp->qp_uk; - enum irdma_status_code ret = 0; + int ret = 0; u32 sq_size, rq_size; struct irdma_dma_mem *mem; @@ -659,7 +651,7 @@ static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) rsrc->qpmem.size, &rsrc->qpmem.pa, GFP_KERNEL); if (!rsrc->qpmem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; mem = &rsrc->qpmem; memset(mem->va, 0, rsrc->qpmem.size); @@ -722,19 +714,18 @@ static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) * @dev: Device * @cq: resource for cq */ -static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev, - struct irdma_sc_cq *cq) +static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) { __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; struct irdma_ccq_cqe_info compl_info; - enum irdma_status_code status = 0; + int status = 0; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); @@ -775,11 +766,11 @@ static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev, * irdma_puda_cq_create - create cq for resource * @rsrc: resource for which cq to create */ -static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) +static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) { struct irdma_sc_dev *dev = rsrc->dev; struct irdma_sc_cq *cq = &rsrc->cq; - enum irdma_status_code ret = 0; + int ret = 0; u32 cqsize; struct irdma_dma_mem *mem; struct irdma_cq_init_info info = {}; @@ -792,7 +783,7 @@ static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc) rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size, &rsrc->cqmem.pa, GFP_KERNEL); if (!rsrc->cqmem.va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; mem = &rsrc->cqmem; info.dev = dev; @@ -833,7 +824,7 @@ error: */ static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) { - enum irdma_status_code ret; + int ret; struct irdma_ccq_cqe_info compl_info; struct irdma_sc_dev *dev = rsrc->dev; @@ -865,7 +856,7 @@ static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc) */ static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc) { - enum irdma_status_code ret; + int ret; struct irdma_ccq_cqe_info compl_info; struct irdma_sc_dev *dev = rsrc->dev; @@ -967,8 +958,7 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, * @rsrc: resource for buffer allocation * @count: number of buffers to create */ -static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, - u32 count) +static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count) { u32 i; struct irdma_puda_buf *buf; @@ -978,7 +968,7 @@ static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size); if (!buf) { rsrc->stats_buf_alloc_fail++; - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } irdma_puda_ret_bufpool(rsrc, buf); rsrc->alloc_buf_count++; @@ -1001,11 +991,11 @@ static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, * @vsi: sc VSI struct * @info: resource information */ -enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, - struct irdma_puda_rsrc_info *info) +int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, + struct irdma_puda_rsrc_info *info) { struct irdma_sc_dev *dev = vsi->dev; - enum irdma_status_code ret = 0; + int ret = 0; struct irdma_puda_rsrc *rsrc; u32 pudasize; u32 sqwridsize, rqwridsize; @@ -1023,12 +1013,12 @@ enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, vmem = &vsi->ieq_mem; break; default: - return IRDMA_NOT_SUPPORTED; + return -EOPNOTSUPP; } vmem->size = pudasize + sqwridsize + rqwridsize; vmem->va = kzalloc(vmem->size, GFP_KERNEL); if (!vmem->va) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; rsrc = vmem->va; spin_lock_init(&rsrc->bufpool_lock); @@ -1046,7 +1036,7 @@ enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, rsrc->xmit_complete = irdma_ieq_tx_compl; break; default: - return IRDMA_NOT_SUPPORTED; + return -EOPNOTSUPP; } rsrc->type = info->type; @@ -1323,12 +1313,12 @@ static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq, * @buf: first receive buffer * @fpdu_len: total length of fpdu */ -static enum irdma_status_code -irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist, - struct list_head *pbufl, struct irdma_puda_buf *buf, - u16 fpdu_len) +static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, + struct list_head *rxlist, + struct list_head *pbufl, + struct irdma_puda_buf *buf, u16 fpdu_len) { - enum irdma_status_code status = 0; + int status = 0; struct irdma_puda_buf *nextbuf; u32 nextseqnum; u16 plen = fpdu_len - buf->datalen; @@ -1338,13 +1328,13 @@ irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist, do { nextbuf = irdma_puda_get_listbuf(rxlist); if (!nextbuf) { - status = IRDMA_ERR_list_empty; + status = -ENOBUFS; break; } list_add_tail(&nextbuf->list, pbufl); if (nextbuf->seqnum != nextseqnum) { pfpdu->bad_seq_num++; - status = IRDMA_ERR_SEQ_NUM; + status = -ERANGE; break; } if (nextbuf->datalen >= plen) { @@ -1366,11 +1356,11 @@ irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist, * @buf: receive buffer * @fpdu_len: fpdu len in the buffer */ -static enum irdma_status_code -irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu, - struct irdma_puda_buf *buf, u16 fpdu_len) +static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, + struct irdma_pfpdu *pfpdu, + struct irdma_puda_buf *buf, u16 fpdu_len) { - enum irdma_status_code status = 0; + int status = 0; u8 *crcptr; u32 mpacrc; u32 seqnum = buf->seqnum; @@ -1390,7 +1380,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu, txbuf = irdma_puda_get_bufpool(ieq); if (!txbuf) { pfpdu->no_tx_bufs++; - status = IRDMA_ERR_NO_TXBUFS; + status = -ENOBUFS; goto error; } @@ -1434,9 +1424,9 @@ error: * @pfpdu: partial management per user qp * @buf: receive buffer */ -static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, - struct irdma_pfpdu *pfpdu, - struct irdma_puda_buf *buf) +static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, + struct irdma_pfpdu *pfpdu, + struct irdma_puda_buf *buf) { u16 fpdu_len = 0; u16 datalen = buf->datalen; @@ -1450,7 +1440,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, bool partial = false; struct irdma_puda_buf *txbuf; struct list_head *rxlist = &pfpdu->rxlist; - enum irdma_status_code ret = 0; + int ret = 0; ioffset = (u16)(buf->data - (u8 *)buf->mem.va); while (datalen) { @@ -1459,7 +1449,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad fpdu len\n"); list_add(&buf->list, rxlist); - return IRDMA_ERR_MPA_CRC; + return -EINVAL; } if (datalen < fpdu_len) { @@ -1475,7 +1465,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, list_add(&buf->list, rxlist); ibdev_dbg(to_ibdev(ieq->dev), "ERR: IRDMA_ERR_MPA_CRC\n"); - return IRDMA_ERR_MPA_CRC; + return -EINVAL; } full++; pfpdu->fpdu_processed++; @@ -1490,7 +1480,7 @@ static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq, if (!txbuf) { pfpdu->no_tx_bufs++; list_add(&buf->list, rxlist); - return IRDMA_ERR_NO_TXBUFS; + return -ENOBUFS; } /* modify txbuf's buffer header */ irdma_ieq_setup_tx_buf(buf, txbuf); @@ -1539,7 +1529,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, struct irdma_pfpdu *pfpdu = &qp->pfpdu; struct list_head *rxlist = &pfpdu->rxlist; struct irdma_puda_buf *buf; - enum irdma_status_code status; + int status; do { if (list_empty(rxlist)) @@ -1557,7 +1547,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, } /* keep processing buffers from the head of the list */ status = irdma_ieq_process_buf(ieq, pfpdu, buf); - if (status == IRDMA_ERR_MPA_CRC) { + if (status == -EINVAL) { pfpdu->mpa_crc_err = true; while (!list_empty(rxlist)) { buf = irdma_puda_get_listbuf(rxlist); @@ -1576,8 +1566,7 @@ void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, * @qp: qp pointer * @buf: buf received on IEQ used to create AH */ -static enum irdma_status_code irdma_ieq_create_ah(struct irdma_sc_qp *qp, - struct irdma_puda_buf *buf) +static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf) { struct irdma_ah_info ah_info = {}; diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h index db3a51170020..5f5124db6ddf 100644 --- a/drivers/infiniband/hw/irdma/puda.h +++ b/drivers/infiniband/hw/irdma/puda.h @@ -151,42 +151,33 @@ void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, struct irdma_puda_buf *buf); void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, struct irdma_puda_buf *buf); -enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp, - struct irdma_puda_send_info *info); -enum irdma_status_code -irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, - struct irdma_puda_rsrc_info *info); +int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info); +int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi, + struct irdma_puda_rsrc_info *info); void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, bool reset); -enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, - struct irdma_sc_cq *cq, - u32 *compl_err); +int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq, + u32 *compl_err); struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, struct irdma_puda_buf *buf); -enum irdma_status_code -irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, - struct irdma_puda_buf *buf); -enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc, - void *addr, u32 len, u32 val); -enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc); +int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, + struct irdma_puda_buf *buf); +int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val); +int irdma_init_hash_desc(struct shash_desc **desc); void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); void irdma_free_hash_desc(struct shash_desc *desc); -void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, - u32 seqnum); -enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_qp *qp); -enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_cq *cq); -enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); +void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum); +int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); +int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq); +int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq); void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp, struct irdma_ah_info *ah_info); -enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev, - struct irdma_ah_info *ah_info, - bool wait, enum puda_rsrc_type type, - void *cb_param, - struct irdma_sc_ah **ah); +int irdma_puda_create_ah(struct irdma_sc_dev *dev, + struct irdma_ah_info *ah_info, bool wait, + enum puda_rsrc_type type, void *cb_param, + struct irdma_sc_ah **ah); void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah); void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, struct irdma_puda_rsrc *ieq); diff --git a/drivers/infiniband/hw/irdma/status.h b/drivers/infiniband/hw/irdma/status.h deleted file mode 100644 index 22ea3888253a..000000000000 --- a/drivers/infiniband/hw/irdma/status.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ -/* Copyright (c) 2015 - 2020 Intel Corporation */ -#ifndef IRDMA_STATUS_H -#define IRDMA_STATUS_H - -/* Error Codes */ -enum irdma_status_code { - IRDMA_SUCCESS = 0, - IRDMA_ERR_NVM = -1, - IRDMA_ERR_NVM_CHECKSUM = -2, - IRDMA_ERR_CFG = -4, - IRDMA_ERR_PARAM = -5, - IRDMA_ERR_DEVICE_NOT_SUPPORTED = -6, - IRDMA_ERR_RESET_FAILED = -7, - IRDMA_ERR_SWFW_SYNC = -8, - IRDMA_ERR_NO_MEMORY = -9, - IRDMA_ERR_BAD_PTR = -10, - IRDMA_ERR_INVALID_PD_ID = -11, - IRDMA_ERR_INVALID_QP_ID = -12, - IRDMA_ERR_INVALID_CQ_ID = -13, - IRDMA_ERR_INVALID_CEQ_ID = -14, - IRDMA_ERR_INVALID_AEQ_ID = -15, - IRDMA_ERR_INVALID_SIZE = -16, - IRDMA_ERR_INVALID_ARP_INDEX = -17, - IRDMA_ERR_INVALID_FPM_FUNC_ID = -18, - IRDMA_ERR_QP_INVALID_MSG_SIZE = -19, - IRDMA_ERR_QP_TOOMANY_WRS_POSTED = -20, - IRDMA_ERR_INVALID_FRAG_COUNT = -21, - IRDMA_ERR_Q_EMPTY = -22, - IRDMA_ERR_INVALID_ALIGNMENT = -23, - IRDMA_ERR_FLUSHED_Q = -24, - IRDMA_ERR_INVALID_PUSH_PAGE_INDEX = -25, - IRDMA_ERR_INVALID_INLINE_DATA_SIZE = -26, - IRDMA_ERR_TIMEOUT = -27, - IRDMA_ERR_OPCODE_MISMATCH = -28, - IRDMA_ERR_CQP_COMPL_ERROR = -29, - IRDMA_ERR_INVALID_VF_ID = -30, - IRDMA_ERR_INVALID_HMCFN_ID = -31, - IRDMA_ERR_BACKING_PAGE_ERROR = -32, - IRDMA_ERR_NO_PBLCHUNKS_AVAILABLE = -33, - IRDMA_ERR_INVALID_PBLE_INDEX = -34, - IRDMA_ERR_INVALID_SD_INDEX = -35, - IRDMA_ERR_INVALID_PAGE_DESC_INDEX = -36, - IRDMA_ERR_INVALID_SD_TYPE = -37, - IRDMA_ERR_MEMCPY_FAILED = -38, - IRDMA_ERR_INVALID_HMC_OBJ_INDEX = -39, - IRDMA_ERR_INVALID_HMC_OBJ_COUNT = -40, - IRDMA_ERR_BUF_TOO_SHORT = -43, - IRDMA_ERR_BAD_IWARP_CQE = -44, - IRDMA_ERR_NVM_BLANK_MODE = -45, - IRDMA_ERR_NOT_IMPL = -46, - IRDMA_ERR_PE_DOORBELL_NOT_ENA = -47, - IRDMA_ERR_NOT_READY = -48, - IRDMA_NOT_SUPPORTED = -49, - IRDMA_ERR_FIRMWARE_API_VER = -50, - IRDMA_ERR_RING_FULL = -51, - IRDMA_ERR_MPA_CRC = -61, - IRDMA_ERR_NO_TXBUFS = -62, - IRDMA_ERR_SEQ_NUM = -63, - IRDMA_ERR_list_empty = -64, - IRDMA_ERR_INVALID_MAC_ADDR = -65, - IRDMA_ERR_BAD_STAG = -66, - IRDMA_ERR_CQ_COMPL_ERROR = -67, - IRDMA_ERR_Q_DESTROYED = -68, - IRDMA_ERR_INVALID_FEAT_CNT = -69, - IRDMA_ERR_REG_CQ_FULL = -70, - IRDMA_ERR_VF_MSG_ERROR = -71, - IRDMA_ERR_NO_INTR = -72, - IRDMA_ERR_REG_QSET = -73, -}; -#endif /* IRDMA_STATUS_H */ diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 9483bb3e10ea..517d41a1c289 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -2,7 +2,6 @@ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_TYPE_H #define IRDMA_TYPE_H -#include "status.h" #include "osdep.h" #include "irdma.h" #include "user.h" @@ -99,6 +98,7 @@ enum irdma_term_mpa_errors { enum irdma_qp_event_type { IRDMA_QP_EVENT_CATASTROPHIC, IRDMA_QP_EVENT_ACCESS_ERR, + IRDMA_QP_EVENT_REQ_ERR, }; enum irdma_hw_stats_index_32b { @@ -402,8 +402,8 @@ struct irdma_sc_cqp { u64 host_ctx_pa; void *back_cqp; struct irdma_sc_dev *dev; - enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *info); + int (*process_cqp_sds)(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *info); struct irdma_dma_mem sdbuf; struct irdma_ring sq_ring; struct irdma_cqp_quanta *sq_base; @@ -605,12 +605,14 @@ struct irdma_sc_vsi { struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; struct irdma_vsi_pestat *pestat; atomic_t qp_suspend_reqs; - enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, - struct irdma_ws_node *tc_node); + int (*register_qset)(struct irdma_sc_vsi *vsi, + struct irdma_ws_node *tc_node); void (*unregister_qset)(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node); u8 qos_rel_bw; u8 qos_prio_type; + u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; + bool dscp_mode:1; }; struct irdma_sc_dev { @@ -655,7 +657,7 @@ struct irdma_sc_dev { bool vchnl_up:1; bool ceq_valid:1; u8 pci_rev; - enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri); + int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri); void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri); void (*ws_reset)(struct irdma_sc_vsi *vsi); }; @@ -735,11 +737,13 @@ struct irdma_l2params { u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY]; u16 mtu; u8 up2tc[IRDMA_MAX_USER_PRIORITY]; + u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; u8 num_tc; u8 vsi_rel_bw; u8 vsi_prio_type; bool mtu_changed:1; bool tc_changed:1; + bool dscp_mode:1; }; struct irdma_vsi_init_info { @@ -750,8 +754,8 @@ struct irdma_vsi_init_info { u16 pf_data_vsi_num; enum irdma_vm_vf_type vm_vf_type; u16 vm_id; - enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, - struct irdma_ws_node *tc_node); + int (*register_qset)(struct irdma_sc_vsi *vsi, + struct irdma_ws_node *tc_node); void (*unregister_qset)(struct irdma_sc_vsi *vsi, struct irdma_ws_node *tc_node); }; @@ -1198,29 +1202,27 @@ struct irdma_irq_ops { }; void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq); -enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, - bool check_overflow, bool post_sq); -enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, - bool post_sq); -enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, - struct irdma_ccq_cqe_info *info); -enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq, - struct irdma_ccq_init_info *info); - -enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch); -enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq); - -enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, - bool post_sq); -enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, - struct irdma_ceq_init_info *info); +int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, + bool check_overflow, bool post_sq); +int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq); +int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, + struct irdma_ccq_cqe_info *info); +int irdma_sc_ccq_init(struct irdma_sc_cq *ccq, + struct irdma_ccq_init_info *info); + +int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch); +int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq); + +int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq); +int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, + struct irdma_ceq_init_info *info); void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq); void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq); -enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, - struct irdma_aeq_init_info *info); -enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, - struct irdma_aeqe_info *info); +int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, + struct irdma_aeq_init_info *info); +int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, + struct irdma_aeqe_info *info); void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, @@ -1228,31 +1230,27 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable); void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout, struct irdma_sc_dev *dev); -enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, - u16 *min_err); -enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); -enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, - struct irdma_cqp_init_info *info); +int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err); +int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); +int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, + struct irdma_cqp_init_info *info); void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); -enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, - struct irdma_ccq_cqe_info *cmpl_info); -enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp, - struct irdma_fast_reg_stag_info *info, - bool post_sq); -enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, - struct irdma_create_qp_info *info, - u64 scratch, bool post_sq); -enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, - u64 scratch, bool remove_hash_idx, - bool ignore_mw_bnd, bool post_sq); -enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, - struct irdma_qp_flush_info *info, - u64 scratch, bool post_sq); -enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, - struct irdma_qp_init_info *info); -enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, - struct irdma_modify_qp_info *info, - u64 scratch, bool post_sq); +int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, + struct irdma_ccq_cqe_info *cmpl_info); +int irdma_sc_fast_register(struct irdma_sc_qp *qp, + struct irdma_fast_reg_stag_info *info, bool post_sq); +int irdma_sc_qp_create(struct irdma_sc_qp *qp, + struct irdma_create_qp_info *info, u64 scratch, + bool post_sq); +int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, + bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq); +int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, + struct irdma_qp_flush_info *info, u64 scratch, + bool post_sq); +int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info); +int irdma_sc_qp_modify(struct irdma_sc_qp *qp, + struct irdma_modify_qp_info *info, u64 scratch, + bool post_sq); void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, irdma_stag stag); @@ -1261,14 +1259,12 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, struct irdma_qp_host_ctx_info *info); void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, struct irdma_qp_host_ctx_info *info); -enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, - bool post_sq); -enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, - struct irdma_cq_init_info *info); +int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq); +int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info); void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info); -enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, - u64 scratch, u8 hmc_fn_id, - bool post_sq, bool poll_registers); +int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, + u8 hmc_fn_id, bool post_sq, + bool poll_registers); void sc_vsi_update_stats(struct irdma_sc_vsi *vsi); struct cqp_info { diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c index f5b1b6150cdc..284cec2a74de 100644 --- a/drivers/infiniband/hw/irdma/uda.c +++ b/drivers/infiniband/hw/irdma/uda.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2016 - 2021 Intel Corporation */ +#include <linux/etherdevice.h> + #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" @@ -16,16 +17,15 @@ * @op: Operation * @scratch: u64 saved to be used during cqp completion */ -enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, - struct irdma_ah_info *info, - u32 op, u64 scratch) +int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, + u32 op, u64 scratch) { __le64 *wqe; u64 qw1, qw2; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) - return IRDMA_ERR_RING_FULL; + return -ENOMEM; set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) | @@ -84,8 +84,7 @@ enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, * irdma_create_mg_ctx() - create a mcg context * @info: multicast group context info */ -static enum irdma_status_code -irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) +static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) { struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL; u8 idx = 0; /* index in the array */ @@ -104,8 +103,6 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) ctx_idx++; } } - - return 0; } /** @@ -115,27 +112,24 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) * @op: operation to perform * @scratch: u64 saved to be used during cqp completion */ -enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, - struct irdma_mcast_grp_info *info, - u32 op, u64 scratch) +int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, + struct irdma_mcast_grp_info *info, u32 op, + u64 scratch) { __le64 *wqe; - enum irdma_status_code ret_code = 0; if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) { ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n"); - return IRDMA_ERR_PARAM; + return -EINVAL; } wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) { ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n"); - return IRDMA_ERR_RING_FULL; + return -ENOMEM; } - ret_code = irdma_create_mg_ctx(info); - if (ret_code) - return ret_code; + irdma_create_mg_ctx(info); set_64bit_val(wqe, 32, info->dma_mem_mc.pa); set_64bit_val(wqe, 16, @@ -196,8 +190,8 @@ static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1, * @ctx: Multcast group context * @mg: Multcast group info */ -enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, - struct irdma_mcast_grp_ctx_entry_info *mg) +int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, + struct irdma_mcast_grp_ctx_entry_info *mg) { u32 idx; bool free_entry_found = false; @@ -226,7 +220,7 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, return 0; } - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -237,8 +231,8 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, * Finds and removes a specific mulicast group from context, all * parameters must match to remove a multicast group. */ -enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, - struct irdma_mcast_grp_ctx_entry_info *mg) +int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, + struct irdma_mcast_grp_ctx_entry_info *mg) { u32 idx; @@ -267,5 +261,5 @@ enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, } } - return IRDMA_ERR_PARAM; + return -EINVAL; } diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h index a4ad0367dc96..fe4820ff0cca 100644 --- a/drivers/infiniband/hw/irdma/uda.h +++ b/drivers/infiniband/hw/irdma/uda.h @@ -32,56 +32,54 @@ struct irdma_sc_ah { struct irdma_ah_info ah_info; }; -enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, - struct irdma_mcast_grp_ctx_entry_info *mg); -enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, - struct irdma_mcast_grp_ctx_entry_info *mg); -enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, - u32 op, u64 scratch); -enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, - struct irdma_mcast_grp_info *info, - u32 op, u64 scratch); +int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, + struct irdma_mcast_grp_ctx_entry_info *mg); +int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, + struct irdma_mcast_grp_ctx_entry_info *mg); +int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, + u32 op, u64 scratch); +int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, + struct irdma_mcast_grp_info *info, u32 op, + u64 scratch); static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah) { ah->dev = dev; } -static inline enum irdma_status_code irdma_sc_create_ah(struct irdma_sc_cqp *cqp, - struct irdma_ah_info *info, - u64 scratch) +static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp, + struct irdma_ah_info *info, u64 scratch) { return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE, scratch); } -static inline enum irdma_status_code irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp, - struct irdma_ah_info *info, - u64 scratch) +static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp, + struct irdma_ah_info *info, u64 scratch) { return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE, scratch); } -static inline enum irdma_status_code irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp, - struct irdma_mcast_grp_info *info, - u64 scratch) +static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp, + struct irdma_mcast_grp_info *info, + u64 scratch) { return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP, scratch); } -static inline enum irdma_status_code irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp, - struct irdma_mcast_grp_info *info, - u64 scratch) +static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp, + struct irdma_mcast_grp_info *info, + u64 scratch) { return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP, scratch); } -static inline enum irdma_status_code irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp, - struct irdma_mcast_grp_info *info, - u64 scratch) +static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp, + struct irdma_mcast_grp_info *info, + u64 scratch) { return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP, scratch); diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index 57a9444e9ea7..a6e5d350a94c 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" -#include "status.h" #include "defs.h" #include "user.h" #include "irdma.h" @@ -56,7 +55,7 @@ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, * irdma_nop_1 - insert a NOP wqe * @qp: hw qp ptr */ -static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp) +static int irdma_nop_1(struct irdma_qp_uk *qp) { u64 hdr; __le64 *wqe; @@ -64,7 +63,7 @@ static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp) bool signaled = false; if (!qp->sq_ring.head) - return IRDMA_ERR_PARAM; + return -EINVAL; wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); wqe = qp->sq_base[wqe_idx].elem; @@ -245,7 +244,7 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) { __le64 *wqe; - enum irdma_status_code ret_code; + int ret_code; if (IRDMA_RING_FULL_ERR(qp->rq_ring)) return NULL; @@ -268,16 +267,15 @@ __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx) * @info: post sq information * @post_sq: flag to post sq */ -enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq) +int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq) { u64 hdr; __le64 *wqe; struct irdma_rdma_write *op_info; u32 i, wqe_idx; u32 total_size = 0, byte_off; - enum irdma_status_code ret_code; + int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; @@ -286,7 +284,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, op_info = &info->op.rdma_write; if (op_info->num_lo_sges > qp->max_sq_frag_cnt) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; @@ -305,7 +303,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -370,12 +368,11 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, * @inv_stag: flag for inv_stag * @post_sq: flag to post sq */ -enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool inv_stag, bool post_sq) +int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool inv_stag, bool post_sq) { struct irdma_rdma_read *op_info; - enum irdma_status_code ret_code; + int ret_code; u32 i, byte_off, total_size = 0; bool local_fence = false; u32 addl_frag_cnt; @@ -388,7 +385,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, op_info = &info->op.rdma_read; if (qp->max_sq_frag_cnt < op_info->num_lo_sges) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; for (i = 0; i < op_info->num_lo_sges; i++) total_size += op_info->lo_sg_list[i].length; @@ -400,7 +397,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -457,15 +454,14 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, * @info: post sq information * @post_sq: flag to post sq */ -enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq) +int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq) { __le64 *wqe; struct irdma_post_send *op_info; u64 hdr; u32 i, wqe_idx, total_size = 0, byte_off; - enum irdma_status_code ret_code; + int ret_code; u32 frag_cnt, addl_frag_cnt; bool read_fence = false; u16 quanta; @@ -474,7 +470,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, op_info = &info->op.send; if (qp->max_sq_frag_cnt < op_info->num_sges) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; for (i = 0; i < op_info->num_sges; i++) total_size += op_info->sg_list[i].length; @@ -490,7 +486,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -501,7 +497,8 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); i = 0; } else { - qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list, + qp->wqe_ops.iw_set_fragment(wqe, 0, + frag_cnt ? op_info->sg_list : NULL, qp->swqe_polarity); i = 1; } @@ -678,9 +675,8 @@ static u16 irdma_inline_data_size_to_quanta(u32 data_size) * @info: post sq information * @post_sq: flag to post sq */ -enum irdma_status_code -irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, - bool post_sq) +int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_inline_rdma_write *op_info; @@ -693,13 +689,13 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in op_info = &info->op.inline_rdma_write; if (op_info->len > qp->max_inline_data) - return IRDMA_ERR_INVALID_INLINE_DATA_SIZE; + return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -745,9 +741,8 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in * @info: post sq information * @post_sq: flag to post sq */ -enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq) +int irdma_uk_inline_send(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq) { __le64 *wqe; struct irdma_post_inline_send *op_info; @@ -760,13 +755,13 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, op_info = &info->op.inline_send; if (op_info->len > qp->max_inline_data) - return IRDMA_ERR_INVALID_INLINE_DATA_SIZE; + return -EINVAL; quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len); wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -817,9 +812,9 @@ enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, * @info: post sq information * @post_sq: flag to post sq */ -enum irdma_status_code -irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, bool post_sq) +int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, + bool post_sq) { __le64 *wqe; struct irdma_inv_local_stag *op_info; @@ -835,7 +830,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -871,8 +866,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, * @qp: hw qp ptr * @info: post rq information */ -enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, - struct irdma_post_rq_info *info) +int irdma_uk_post_receive(struct irdma_qp_uk *qp, + struct irdma_post_rq_info *info) { u32 wqe_idx, i, byte_off; u32 addl_frag_cnt; @@ -880,11 +875,11 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, u64 hdr; if (qp->max_rq_frag_cnt < info->num_sges) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; qp->rq_wrid_array[wqe_idx] = info->wr_id; addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0; @@ -1000,17 +995,18 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, * @cq: hw cq * @info: cq poll information returned */ -enum irdma_status_code -irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) +int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, + struct irdma_cq_poll_info *info) { u64 comp_ctx, qword0, qword2, qword3; __le64 *cqe; struct irdma_qp_uk *qp; struct irdma_ring *pring = NULL; u32 wqe_idx, q_type; - enum irdma_status_code ret_code; + int ret_code; bool move_cq_head = true; u8 polarity; + u8 op_type; bool ext_valid; __le64 *ext_cqe; @@ -1022,7 +1018,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) get_64bit_val(cqe, 24, &qword3); polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); if (polarity != cq->polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; /* Ensure CQE contents are read after valid bit is checked */ dma_rmb(); @@ -1045,7 +1041,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) polarity ^= 1; } if (polarity != cq->polarity) - return IRDMA_ERR_Q_EMPTY; + return -ENOENT; /* Ensure ext CQE contents are read after ext valid bit is checked */ dma_rmb(); @@ -1112,7 +1108,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3); qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx; if (!qp || qp->destroy_pending) { - ret_code = IRDMA_ERR_Q_DESTROYED; + ret_code = -EFAULT; goto exit; } wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3); @@ -1126,7 +1122,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED || info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) { if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) { - ret_code = IRDMA_ERR_Q_EMPTY; + ret_code = -ENOENT; goto exit; } @@ -1186,14 +1182,13 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); } else { if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) { - ret_code = IRDMA_ERR_Q_EMPTY; + ret_code = -ENOENT; goto exit; } do { __le64 *sw_wqe; u64 wqe_qword; - u8 op_type; u32 tail; tail = qp->sq_ring.tail; @@ -1210,6 +1205,8 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info) break; } } while (1); + if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR) + info->minor_err = FLUSH_MW_BIND_ERR; qp->sq_flush_seen = true; if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) qp->sq_flush_complete = true; @@ -1303,15 +1300,15 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, * @sqdepth: depth of SQ * */ -enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, - u32 sq_size, u8 shift, u32 *sqdepth) +int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, + u32 *sqdepth) { *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD); if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; else if (*sqdepth > uk_attrs->max_hw_wq_quanta) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; return 0; } @@ -1323,15 +1320,15 @@ enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, * @shift: shift which determines size of WQE * @rqdepth: depth of RQ */ -enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, - u32 rq_size, u8 shift, u32 *rqdepth) +int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, + u32 *rqdepth) { *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD); if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; else if (*rqdepth > uk_attrs->max_hw_rq_quanta) - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; return 0; } @@ -1381,17 +1378,16 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, * allowed. Then size of wqe * the number of wqes should be the * amount of memory allocated for sq and rq. */ -enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, - struct irdma_qp_uk_init_info *info) +int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) { - enum irdma_status_code ret_code = 0; + int ret_code = 0; u32 sq_ring_size; u8 sqshift, rqshift; qp->uk_attrs = info->uk_attrs; if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift); if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) { @@ -1502,8 +1498,7 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) * @signaled: signaled for completion * @post_sq: ring doorbell */ -enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, - bool signaled, bool post_sq) +int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) { __le64 *wqe; u64 hdr; @@ -1515,7 +1510,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, 0, &info); if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; + return -ENOMEM; irdma_clr_wqes(qp, wqe_idx); @@ -1541,7 +1536,7 @@ enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, * @frag_cnt: number of fragments * @quanta: quanta for frag_cnt */ -enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) +int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) { switch (frag_cnt) { case 0: @@ -1577,7 +1572,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) *quanta = 8; break; default: - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; } return 0; @@ -1588,7 +1583,7 @@ enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) * @frag_cnt: number of fragments * @wqe_size: size in bytes given frag_cnt */ -enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) +int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) { switch (frag_cnt) { case 0: @@ -1615,7 +1610,7 @@ enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) *wqe_size = 256; break; default: - return IRDMA_ERR_INVALID_FRAG_COUNT; + return -EINVAL; } return 0; diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index 3c811fb88404..2ef61923c926 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -103,6 +103,7 @@ enum irdma_flush_opcode { FLUSH_FATAL_ERR, FLUSH_RETRY_EXC_ERR, FLUSH_MW_BIND_ERR, + FLUSH_REM_INV_REQ_ERR, }; enum irdma_cmpl_status { @@ -270,29 +271,24 @@ struct irdma_cq_poll_info { bool imm_valid:1; }; -enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); -enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); - -enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, - bool signaled, bool post_sq); -enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, - struct irdma_post_rq_info *info); +int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq); +int irdma_uk_inline_send(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, bool post_sq); +int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, + bool post_sq); +int irdma_uk_post_receive(struct irdma_qp_uk *qp, + struct irdma_post_rq_info *info); void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp); -enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool inv_stag, bool post_sq); -enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); -enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, bool post_sq); -enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); +int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool inv_stag, bool post_sq); +int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq); +int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, + bool post_sq); +int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, + struct irdma_post_sq_info *info, + bool post_sq); struct irdma_wqe_uk_ops { void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity); @@ -303,16 +299,16 @@ struct irdma_wqe_uk_ops { struct irdma_bind_window *op_info); }; -enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, - struct irdma_cq_poll_info *info); +int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, + struct irdma_cq_poll_info *info); void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify); void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt); void irdma_uk_cq_init(struct irdma_cq_uk *cq, struct irdma_cq_uk_init_info *info); -enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, - struct irdma_qp_uk_init_info *info); +int irdma_uk_qp_init(struct irdma_qp_uk *qp, + struct irdma_qp_uk_init_info *info); struct irdma_sq_uk_wr_trk_info { u64 wrid; u32 wr_len; @@ -413,16 +409,15 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, struct irdma_post_sq_info *info); __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx); void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq); -enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, - bool signaled, bool post_sq); -enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); -enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); +int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); +int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); +int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, u32 inline_data, u8 *shift); -enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, - u32 sq_size, u8 shift, u32 *wqdepth); -enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, - u32 rq_size, u8 shift, u32 *wqdepth); +int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, + u32 *wqdepth); +int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, + u32 *wqdepth); void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, u32 wqe_idx, bool post_sq); void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx); diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 398736d8c78a..8dfc9e154d73 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -150,31 +150,35 @@ int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; - struct net_device *netdev = ifa->ifa_dev->dev; + struct net_device *real_dev, *netdev = ifa->ifa_dev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr; - ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); local_ipaddr = ntohl(ifa->ifa_address); ibdev_dbg(&iwdev->ibdev, - "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev, - event, &local_ipaddr, netdev->dev_addr); + "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev, + event, &local_ipaddr, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: - irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, + irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, &local_ipaddr, true, IRDMA_ARP_DELETE); - irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false); + irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: - irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr); - irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true); + irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr); + irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true); irdma_gid_change_event(&iwdev->ibdev); break; default: @@ -196,32 +200,36 @@ int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; - struct net_device *netdev = ifa->idev->dev; + struct net_device *real_dev, *netdev = ifa->idev->dev; struct irdma_device *iwdev; struct ib_device *ibdev; u32 local_ipaddr6[4]; - ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA); + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); if (!ibdev) return NOTIFY_DONE; iwdev = to_iwdev(ibdev); irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); ibdev_dbg(&iwdev->ibdev, - "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev, - event, local_ipaddr6, netdev->dev_addr); + "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev, + event, local_ipaddr6, real_dev->dev_addr); switch (event) { case NETDEV_DOWN: - irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr, + irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr, local_ipaddr6, false, IRDMA_ARP_DELETE); - irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false); + irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false); irdma_gid_change_event(&iwdev->ibdev); break; case NETDEV_UP: case NETDEV_CHANGEADDR: irdma_add_arp(iwdev->rf, local_ipaddr6, false, - netdev->dev_addr); - irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true); + real_dev->dev_addr); + irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true); irdma_gid_change_event(&iwdev->ibdev); break; default: @@ -243,21 +251,23 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; + struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev; struct irdma_device *iwdev; struct ib_device *ibdev; __be32 *p; u32 local_ipaddr[4] = {}; bool ipv4 = true; - ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev, - RDMA_DRIVER_IRDMA); - if (!ibdev) - return NOTIFY_DONE; - - iwdev = to_iwdev(ibdev); - switch (event) { case NETEVENT_NEIGH_UPDATE: + real_dev = rdma_vlan_dev_real_dev(netdev); + if (!real_dev) + real_dev = netdev; + ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); + if (!ibdev) + return NOTIFY_DONE; + + iwdev = to_iwdev(ibdev); p = (__be32 *)neigh->primary_key; if (neigh->tbl->family == AF_INET6) { ipv4 = false; @@ -278,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, irdma_manage_arp_cache(iwdev->rf, neigh->ha, local_ipaddr, ipv4, IRDMA_ARP_DELETE); + ib_device_put(ibdev); break; default: break; } - ib_device_put(ibdev); - return NOTIFY_DONE; } @@ -551,12 +560,12 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf) * @rf: RDMA PCI function * @cqp_request: cqp request to wait */ -static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf, - struct irdma_cqp_request *cqp_request) +static int irdma_wait_event(struct irdma_pci_f *rf, + struct irdma_cqp_request *cqp_request) { struct irdma_cqp_timeout cqp_timeout = {}; bool cqp_error = false; - enum irdma_status_code err_code = 0; + int err_code = 0; cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; do { @@ -575,17 +584,20 @@ static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf, rf->reset = true; rf->gen_ops.request_reset(rf); } - return IRDMA_ERR_TIMEOUT; + return -ETIMEDOUT; } while (1); cqp_error = cqp_request->compl_info.error; if (cqp_error) { - err_code = IRDMA_ERR_CQP_COMPL_ERROR; - if (cqp_request->compl_info.maj_err_code == 0xFFFF && - cqp_request->compl_info.min_err_code == 0x8029) { - if (!rf->reset) { - rf->reset = true; - rf->gen_ops.request_reset(rf); + err_code = -EIO; + if (cqp_request->compl_info.maj_err_code == 0xFFFF) { + if (cqp_request->compl_info.min_err_code == 0x8002) + err_code = -EBUSY; + else if (cqp_request->compl_info.min_err_code == 0x8029) { + if (!rf->reset) { + rf->reset = true; + rf->gen_ops.request_reset(rf); + } } } } @@ -643,6 +655,7 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = { }; static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = { + {0xffff, 0x8002, "Invalid State"}, {0xffff, 0x8006, "Flush No Wqe Pending"}, {0xffff, 0x8007, "Modify QP Bad Close"}, {0xffff, 0x8009, "LLP Closed"}, @@ -680,16 +693,16 @@ bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, * @rf: RDMA PCI function * @cqp_request: cqp request to process */ -enum irdma_status_code irdma_handle_cqp_op(struct irdma_pci_f *rf, - struct irdma_cqp_request *cqp_request) +int irdma_handle_cqp_op(struct irdma_pci_f *rf, + struct irdma_cqp_request *cqp_request) { struct irdma_sc_dev *dev = &rf->sc_dev; struct cqp_cmds_info *info = &cqp_request->info; - enum irdma_status_code status; + int status; bool put_cqp_request = true; if (rf->reset) - return IRDMA_ERR_NOT_READY; + return -EBUSY; irdma_get_cqp_request(cqp_request); status = irdma_process_cqp_cmd(dev, info); @@ -791,17 +804,17 @@ void *irdma_remove_cqp_head(struct irdma_sc_dev *dev) * @sdinfo: information for sd cqp * */ -enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, - struct irdma_update_sds_info *sdinfo) +int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, + struct irdma_update_sds_info *sdinfo) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, @@ -822,19 +835,18 @@ enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, * @qp: hardware control qp * @op: suspend or resume */ -enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, - u8 op) +int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op) { struct irdma_sc_dev *dev = qp->dev; struct irdma_cqp_request *cqp_request; struct irdma_sc_cqp *cqp = dev->cqp; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = op; @@ -940,18 +952,17 @@ void irdma_terminate_del_timer(struct irdma_sc_qp *qp) * @val_mem: buffer for fpm * @hmc_fn_id: function id for fpm */ -enum irdma_status_code -irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id) +int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_request->param = NULL; @@ -975,18 +986,17 @@ irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, * @val_mem: buffer with fpm values * @hmc_fn_id: function id for fpm */ -enum irdma_status_code -irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, - struct irdma_dma_mem *val_mem, u8 hmc_fn_id) +int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, + struct irdma_dma_mem *val_mem, u8 hmc_fn_id) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_request->param = NULL; @@ -1009,18 +1019,17 @@ irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, * @dev: device pointer * @cq: pointer to created cq */ -enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_cq *cq) +int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; @@ -1039,19 +1048,18 @@ enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, * @dev: device pointer * @qp: pointer to created qp */ -enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_qp *qp) +int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_create_qp_info *qp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; @@ -1079,7 +1087,7 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf, { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) return; @@ -1179,12 +1187,10 @@ static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request) * @info: info for modify qp * @wait: flag to wait or not for modify qp completion */ -enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev, - struct irdma_qp *iwqp, - struct irdma_modify_qp_info *info, - bool wait) +int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, + struct irdma_modify_qp_info *info, bool wait) { - enum irdma_status_code status; + int status; struct irdma_pci_f *rf = iwdev->rf; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; @@ -1192,7 +1198,7 @@ enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev, cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; if (!wait) { cqp_request->callback_fcn = irdma_hw_modify_qp_callback; @@ -1230,7 +1236,7 @@ enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev, cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; m_info = &cqp_info->in.u.qp_modify.info; @@ -1271,17 +1277,17 @@ void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) * @dev: device pointer * @qp: pointer to qp */ -enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) +int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); @@ -1317,20 +1323,20 @@ void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) * irdma_init_hash_desc - initialize hash for crc calculation * @desc: cryption type */ -enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc) +int irdma_init_hash_desc(struct shash_desc **desc) { struct crypto_shash *tfm; struct shash_desc *tdesc; tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(tfm)) - return IRDMA_ERR_MPA_CRC; + return -EINVAL; tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!tdesc) { crypto_free_shash(tfm); - return IRDMA_ERR_MPA_CRC; + return -EINVAL; } tdesc->tfm = tfm; @@ -1358,19 +1364,19 @@ void irdma_free_hash_desc(struct shash_desc *desc) * @len: length of buffer * @val: value to be compared */ -enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc, - void *addr, u32 len, u32 val) +int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, + u32 val) { u32 crc = 0; int ret; - enum irdma_status_code ret_code = 0; + int ret_code = 0; crypto_shash_init(desc); ret = crypto_shash_update(desc, addr, len); if (!ret) crypto_shash_final(desc, (u8 *)&crc); if (crc != val) - ret_code = IRDMA_ERR_MPA_CRC; + ret_code = -EINVAL; return ret_code; } @@ -1524,9 +1530,8 @@ void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, * @info: to get information * @buf: puda buffer */ -static enum irdma_status_code -irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, - struct irdma_puda_buf *buf) +static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, + struct irdma_puda_buf *buf) { struct iphdr *iph; struct ipv6hdr *ip6h; @@ -1563,7 +1568,7 @@ irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, ibdev_dbg(to_ibdev(buf->vsi->dev), "ERR: payload_len = 0x%x totallen expected0x%x\n", info->payload_len, buf->totallen); - return IRDMA_ERR_INVALID_SIZE; + return -EINVAL; } buf->tcphlen = tcph->doff << 2; @@ -1580,9 +1585,8 @@ irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, * @info: to get information * @buf: puda buffer */ -enum irdma_status_code -irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, - struct irdma_puda_buf *buf) +int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, + struct irdma_puda_buf *buf) { struct tcphdr *tcph; u32 pkt_len; @@ -1861,20 +1865,19 @@ static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request) * @pestat: pointer to stats info * @wait: flag to wait or not wait for stats */ -enum irdma_status_code -irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, - struct irdma_vsi_pestat *pestat, bool wait) +int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, + struct irdma_vsi_pestat *pestat, bool wait) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); @@ -1900,22 +1903,21 @@ irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, * @cmd: command to allocate or free * @stats_info: pointer to allocate stats info */ -enum irdma_status_code -irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, - struct irdma_stats_inst_info *stats_info) +int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, + struct irdma_stats_inst_info *stats_info) { struct irdma_pci_f *rf = dev_to_rf(vsi->dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; bool wait = false; if (cmd == IRDMA_OP_STATS_ALLOCATE) wait = true; cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); @@ -1938,17 +1940,17 @@ irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, * @sc_ceq: pointer to ceq structure * @op: Create or Destroy */ -enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_ceq *sc_ceq, u8 op) +int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, + u8 op) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; @@ -1968,17 +1970,17 @@ enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, * @sc_aeq: pointer to aeq structure * @op: Create or Destroy */ -enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, - struct irdma_sc_aeq *sc_aeq, u8 op) +int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq, + u8 op) { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_pci_f *rf = dev_to_rf(dev); - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; cqp_info->post_sq = 1; @@ -1998,16 +2000,15 @@ enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, * @cmd: Add, modify or delete * @node_info: pointer to ws node info */ -enum irdma_status_code -irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, - struct irdma_ws_node_info *node_info) +int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, + struct irdma_ws_node_info *node_info) { struct irdma_pci_f *rf = dev_to_rf(dev); struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; bool poll; if (!rf->sc_dev.ceq_valid) @@ -2017,7 +2018,7 @@ irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll); if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; cqp_info = &cqp_request->info; memset(cqp_info, 0, sizeof(*cqp_info)); @@ -2066,7 +2067,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, { struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY) return -EINVAL; @@ -2148,11 +2149,10 @@ static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request) * @ah_ret: Returned pointer to address handle if created * */ -enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev, - struct irdma_ah_info *ah_info, - bool wait, enum puda_rsrc_type type, - void *cb_param, - struct irdma_sc_ah **ah_ret) +int irdma_puda_create_ah(struct irdma_sc_dev *dev, + struct irdma_ah_info *ah_info, bool wait, + enum puda_rsrc_type type, void *cb_param, + struct irdma_sc_ah **ah_ret) { struct irdma_sc_ah *ah; struct irdma_pci_f *rf = dev_to_rf(dev); @@ -2161,7 +2161,7 @@ enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev, ah = kzalloc(sizeof(*ah), GFP_ATOMIC); *ah_ret = ah; if (!ah) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_info->ah_idx, &rf->next_ah); @@ -2187,7 +2187,7 @@ error: err_free: kfree(ah); *ah_ret = NULL; - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -2229,19 +2229,19 @@ void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request) * @pprm: pble resource manager * @pchunk: chunk of memory to add */ -enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, - struct irdma_chunk *pchunk) +int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, + struct irdma_chunk *pchunk) { u64 sizeofbitmap; if (pchunk->size & 0xfff) - return IRDMA_ERR_PARAM; + return -EINVAL; sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); if (!pchunk->bitmapbuf) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; pchunk->sizeofbitmap = sizeofbitmap; /* each pble is 8 bytes hence shift by 3 */ @@ -2259,10 +2259,9 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, * @vaddr: returns virtual address of pble memory * @fpm_addr: returns fpm address of pble memory */ -enum irdma_status_code -irdma_prm_get_pbles(struct irdma_pble_prm *pprm, - struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, - u64 **vaddr, u64 *fpm_addr) +int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, + struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr) { u64 bits_needed; u64 bit_idx = PBLE_INVALID_IDX; @@ -2290,7 +2289,7 @@ irdma_prm_get_pbles(struct irdma_pble_prm *pprm, if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { spin_unlock_irqrestore(&pprm->prm_lock, flags); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); @@ -2325,8 +2324,8 @@ void irdma_prm_return_pbles(struct irdma_pble_prm *pprm, spin_unlock_irqrestore(&pprm->prm_lock, flags); } -enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va, - dma_addr_t *pg_dma, u32 pg_cnt) +int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt) { struct page *vm_page; int i; @@ -2350,7 +2349,7 @@ enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va, err: irdma_unmap_vm_page_list(hw, pg_dma, i); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt) @@ -2386,15 +2385,14 @@ done: * @chunk: chunk to add for paged memory * @pg_cnt: number of pages needed */ -enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk, - u32 pg_cnt) +int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt) { u32 size; void *va; chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); if (!chunk->dmainfo.dmaaddrs) - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; size = PAGE_SIZE * pg_cnt; va = vmalloc(size); @@ -2416,7 +2414,7 @@ err: kfree(chunk->dmainfo.dmaaddrs); chunk->dmainfo.dmaaddrs = NULL; - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } /** @@ -2481,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) case IRDMA_QP_EVENT_ACCESS_ERR: ibevent.event = IB_EVENT_QP_ACCESS_ERR; break; + case IRDMA_QP_EVENT_REQ_ERR: + ibevent.event = IB_EVENT_QP_REQ_ERR; + break; } ibevent.device = iwqp->ibqp.device; ibevent.element.qp = &iwqp->ibqp; @@ -2501,3 +2502,150 @@ bool irdma_cq_empty(struct irdma_cq *iwcq) return polarity != ukcq->polarity; } + +void irdma_remove_cmpls_list(struct irdma_cq *iwcq) +{ + struct irdma_cmpl_gen *cmpl_node; + struct list_head *tmp_node, *list_node; + + list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) { + cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list); + list_del(&cmpl_node->list); + kfree(cmpl_node); + } +} + +int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info) +{ + struct irdma_cmpl_gen *cmpl; + + if (list_empty(&iwcq->cmpl_generated)) + return -ENOENT; + cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list); + list_del(&cmpl->list); + memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); + kfree(cmpl); + + ibdev_dbg(iwcq->ibcq.device, + "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n", + __func__, cq_poll_info->qp_id, cq_poll_info->op_type, + cq_poll_info->wr_id); + + return 0; +} + +/** + * irdma_set_cpi_common_values - fill in values for polling info struct + * @cpi: resulting structure of cq_poll_info type + * @qp: QPair + * @qp_num: id of the QP + */ +static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi, + struct irdma_qp_uk *qp, u32 qp_num) +{ + cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED; + cpi->error = true; + cpi->major_err = IRDMA_FLUSH_MAJOR_ERR; + cpi->minor_err = FLUSH_GENERAL_ERR; + cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp; + cpi->qp_id = qp_num; +} + +static inline void irdma_comp_handler(struct irdma_cq *cq) +{ + if (!cq->ibcq.comp_handler) + return; + if (atomic_cmpxchg(&cq->armed, 1, 0)) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +void irdma_generate_flush_completions(struct irdma_qp *iwqp) +{ + struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; + struct irdma_ring *sq_ring = &qp->sq_ring; + struct irdma_ring *rq_ring = &qp->rq_ring; + struct irdma_cmpl_gen *cmpl; + __le64 *sw_wqe; + u64 wqe_qword; + u32 wqe_idx; + bool compl_generated = false; + unsigned long flags1; + + spin_lock_irqsave(&iwqp->iwscq->lock, flags1); + if (irdma_cq_empty(iwqp->iwscq)) { + unsigned long flags2; + + spin_lock_irqsave(&iwqp->lock, flags2); + while (IRDMA_RING_MORE_WORK(*sq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->lock, flags2); + spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + return; + } + + wqe_idx = sq_ring->tail; + irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE); + /* remove the SQ WR by moving SQ tail*/ + IRDMA_RING_SET_TAIL(*sq_ring, + sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); + + ibdev_dbg(iwqp->iwscq->ibcq.device, + "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", + __func__, cmpl->cpi.wr_id, qp->qp_id); + list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); + compl_generated = true; + } + spin_unlock_irqrestore(&iwqp->lock, flags2); + spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + if (compl_generated) + irdma_comp_handler(iwqp->iwscq); + } else { + spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); + } + + spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); + if (irdma_cq_empty(iwqp->iwrcq)) { + unsigned long flags2; + + spin_lock_irqsave(&iwqp->lock, flags2); + while (IRDMA_RING_MORE_WORK(*rq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->lock, flags2); + spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + return; + } + + wqe_idx = rq_ring->tail; + irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; + cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; + /* remove the RQ WR by moving RQ tail */ + IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); + ibdev_dbg(iwqp->iwrcq->ibcq.device, + "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", + __func__, cmpl->cpi.wr_id, qp->qp_id, + wqe_idx); + list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + + compl_generated = true; + } + spin_unlock_irqrestore(&iwqp->lock, flags2); + spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + if (compl_generated) + irdma_comp_handler(iwqp->iwrcq); + } else { + spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); + } +} diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 8cd5f9261692..a22afbb25bc5 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -21,30 +21,36 @@ static int irdma_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); - props->device_cap_flags = iwdev->device_cap_flags; + props->device_cap_flags = IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_MGT_EXTENSIONS; + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; props->vendor_id = pcidev->vendor; props->vendor_part_id = pcidev->device; props->hw_ver = rf->pcidev->revision; - props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; + props->page_size_cap = hw_attrs->page_size_cap; props->max_mr_size = hw_attrs->max_mr_size; props->max_qp = rf->max_qp - rf->used_qps; props->max_qp_wr = hw_attrs->max_qp_wr; props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags; props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags; props->max_cq = rf->max_cq - rf->used_cqs; - props->max_cqe = rf->max_cqe; + props->max_cqe = rf->max_cqe - 1; props->max_mr = rf->max_mr - rf->used_mrs; props->max_mw = props->max_mr; props->max_pd = rf->max_pd - rf->used_pds; props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; props->max_qp_rd_atom = hw_attrs->max_hw_ird; props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; - if (rdma_protocol_roce(ibdev, 1)) + if (rdma_protocol_roce(ibdev, 1)) { + props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; props->max_pkeys = IRDMA_PKEY_TBL_SZ; + } + props->max_ah = rf->max_ah; props->max_mcast_grp = rf->max_mcg; props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; @@ -255,7 +261,7 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp) struct cqp_cmds_info *cqp_info; struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_qp *qp = &iwqp->sc_qp; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) @@ -293,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp) static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { +#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) +#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) struct ib_device *ibdev = uctx->device; struct irdma_device *iwdev = to_iwdev(ibdev); - struct irdma_alloc_ucontext_req req; + struct irdma_alloc_ucontext_req req = {}; struct irdma_alloc_ucontext_resp uresp = {}; struct irdma_ucontext *ucontext = to_ucontext(uctx); struct irdma_uk_attrs *uk_attrs; + if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || + udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) + return -EINVAL; + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) return -EINVAL; @@ -311,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; /* GEN_1 legacy support with libi40iw */ - if (udata->outlen < sizeof(uresp)) { + if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { if (uk_attrs->hw_rev != IRDMA_GEN_1) return -EOPNOTSUPP; @@ -383,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context) */ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { +#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) struct irdma_pd *iwpd = to_iwpd(pd); struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; @@ -392,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) u32 pd_id = 0; int err; + if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) + return -EINVAL; + err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, &rf->next_pd); if (err) @@ -532,6 +548,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) irdma_modify_qp_to_err(&iwqp->sc_qp); + if (!iwqp->user_mode) + cancel_delayed_work_sync(&iwqp->dwork_flush); + irdma_qp_rem_ref(&iwqp->ibqp); wait_for_completion(&iwqp->free_qp); irdma_free_lsmm_rsrc(iwqp); @@ -591,7 +610,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, u32 sqdepth, rqdepth; u8 sqshift, rqshift; u32 size; - enum irdma_status_code status; + int status; struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; @@ -602,7 +621,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift, &sqdepth); if (status) - return -ENOMEM; + return status; if (uk_attrs->hw_rev == IRDMA_GEN_1) rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; @@ -613,7 +632,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift, &rqdepth); if (status) - return -ENOMEM; + return status; iwqp->kqp.sq_wrid_mem = kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); @@ -667,7 +686,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_create_qp_info *qp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); if (!cqp_request) @@ -687,7 +706,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) status = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); - return status ? -ENOMEM : 0; + return status; } static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, @@ -787,6 +806,14 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, return 0; } +static void irdma_flush_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); + + irdma_generate_flush_completions(iwqp); +} + /** * irdma_create_qp - create qp * @ibqp: ptr of qp @@ -797,15 +824,16 @@ static int irdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { +#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) +#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) struct ib_pd *ibpd = ibqp->pd; struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_device *iwdev = to_iwdev(ibpd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_qp *iwqp = to_iwqp(ibqp); - struct irdma_create_qp_req req; + struct irdma_create_qp_req req = {}; struct irdma_create_qp_resp uresp = {}; u32 qp_num = 0; - enum irdma_status_code ret; int err_code; int sq_size; int rq_size; @@ -820,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp, if (err_code) return err_code; + if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) + return -EINVAL; + sq_size = init_attr->cap.max_send_wr; rq_size = init_attr->cap.max_recv_wr; @@ -907,6 +939,7 @@ static int irdma_create_qp(struct ib_qp *ibqp, init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; irdma_setup_virt_qp(iwdev, iwqp, &init_info); } else { + INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); } @@ -935,9 +968,8 @@ static int irdma_create_qp(struct ib_qp *ibqp, if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1) init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE; - ret = irdma_sc_qp_init(qp, &init_info); - if (ret) { - err_code = -EPROTO; + err_code = irdma_sc_qp_init(qp, &init_info); + if (err_code) { ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n"); goto error; } @@ -1104,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_pd *iwpd = to_iwpd(ibqp->pd); struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; @@ -1122,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, roce_info = &iwqp->roce_info; udp_info = &iwqp->udp_info; + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -1170,6 +1211,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, udp_info->ttl = attr->ah_attr.grh.hop_limit; udp_info->flow_label = attr->ah_attr.grh.flow_label; udp_info->tos = attr->ah_attr.grh.traffic_class; + udp_info->src_port = + rdma_get_udp_sport(udp_info->flow_label, + ibqp->qp_num, + roce_info->dest_qp); irdma_qp_rem_qos(&iwqp->sc_qp); dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); ctx_info->user_pri = rt_tos2priority(udp_info->tos); @@ -1184,7 +1229,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (ret) return ret; - if (vlan_id >= VLAN_N_VID && iwdev->dcb) + if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) vlan_id = 0; if (vlan_id < VLAN_N_VID) { udp_info->insert_vlan_tag = true; @@ -1197,7 +1242,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, av->attrs = attr->ah_attr; rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); - if (av->sgid_addr.saddr.sa_family == AF_INET6) { + if (av->net_type == RDMA_NETWORK_IPV6) { __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; __be32 *saddr = @@ -1213,7 +1258,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, &local_ip[0], false, NULL, IRDMA_ARP_RESOLVE); - } else { + } else if (av->net_type == RDMA_NETWORK_IPV4) { __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; @@ -1354,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1395,18 +1440,18 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, } if (iwqp->ibqp_state > IB_QPS_RTS && !iwqp->flush_issued) { - iwqp->flush_issued = 1; spin_unlock_irqrestore(&iwqp->lock, flags); irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ | IRDMA_FLUSH_WAIT); + iwqp->flush_issued = 1; } else { spin_unlock_irqrestore(&iwqp->lock, flags); } } else { iwqp->ibqp_state = attr->qp_state; } - if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { + if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; ucontext = rdma_udata_to_drv_context(udata, @@ -1446,6 +1491,8 @@ exit: int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { +#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) +#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) struct irdma_qp *iwqp = to_iwqp(ibqp); struct irdma_device *iwdev = iwqp->iwdev; struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; @@ -1460,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, int err; unsigned long flags; + if (udata) { + /* udata inlen/outlen can be 0 when supporting legacy libi40iw */ + if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || + (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) + return -EINVAL; + } + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) return -EOPNOTSUPP; @@ -1545,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, case IB_QPS_RESET: if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { spin_unlock_irqrestore(&iwqp->lock, flags); - if (udata) { + if (udata && udata->inlen) { if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen))) return -EINVAL; @@ -1615,13 +1669,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { if (dont_wait) { - if (iwqp->cm_id && iwqp->hw_tcp_state) { + if (iwqp->hw_tcp_state) { spin_lock_irqsave(&iwqp->lock, flags); iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; iwqp->last_aeq = IRDMA_AE_RESET_SENT; spin_unlock_irqrestore(&iwqp->lock, flags); - irdma_cm_disconn(iwqp); } + irdma_cm_disconn(iwqp); } else { int close_timer_started; @@ -1642,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, } } } - if (attr_mask & IB_QP_STATE && udata && + if (attr_mask & IB_QP_STATE && udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { struct irdma_ucontext *ucontext; @@ -1752,16 +1806,18 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) unsigned long flags; spin_lock_irqsave(&iwcq->lock, flags); + if (!list_empty(&iwcq->cmpl_generated)) + irdma_remove_cmpls_list(iwcq); if (!list_empty(&iwcq->resize_list)) irdma_process_resize_list(iwcq, iwdev, NULL); spin_unlock_irqrestore(&iwcq->lock, flags); irdma_cq_wq_destroy(iwdev->rf, cq); - irdma_cq_free_rsrc(iwdev->rf, iwcq); spin_lock_irqsave(&iwceq->ce_lock, flags); irdma_sc_cleanup_ceqes(cq, ceq); spin_unlock_irqrestore(&iwceq->ce_lock, flags); + irdma_cq_free_rsrc(iwdev->rf, iwcq); return 0; } @@ -1775,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { +#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) struct irdma_cq *iwcq = to_iwcq(ibcq); struct irdma_sc_dev *dev = iwcq->sc_cq.dev; struct irdma_cqp_request *cqp_request; @@ -1787,7 +1844,6 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct irdma_device *iwdev; struct irdma_pci_f *rf; struct irdma_cq_buf *cq_buf = NULL; - enum irdma_status_code status = 0; unsigned long flags; int ret; @@ -1798,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, IRDMA_FEATURE_CQ_RESIZE)) return -EOPNOTSUPP; + if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) + return -EINVAL; + if (entries > rf->max_cqe) return -EINVAL; @@ -1880,12 +1939,10 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; cqp_info->post_sq = 1; - status = irdma_handle_cqp_op(rf, cqp_request); + ret = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); - if (status) { - ret = -EPROTO; + if (ret) goto error; - } spin_lock_irqsave(&iwcq->lock, flags); if (cq_buf) { @@ -1932,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { +#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) +#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) struct ib_device *ibdev = ibcq->device; struct irdma_device *iwdev = to_iwdev(ibdev); struct irdma_pci_f *rf = iwdev->rf; @@ -1940,7 +1999,6 @@ static int irdma_create_cq(struct ib_cq *ibcq, struct irdma_sc_cq *cq; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_cq_init_info info = {}; - enum irdma_status_code status; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; @@ -1951,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq, err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) return err_code; + + if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || + udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) + return -EINVAL; + err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, &rf->next_cq); if (err_code) @@ -1960,6 +2023,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, cq->back_cq = iwcq; spin_lock_init(&iwcq->lock); INIT_LIST_HEAD(&iwcq->resize_list); + INIT_LIST_HEAD(&iwcq->cmpl_generated); info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; @@ -2090,12 +2154,10 @@ static int irdma_create_cq(struct ib_cq *ibcq, cqp_info->in.u.cq_create.cq = cq; cqp_info->in.u.cq_create.check_overflow = true; cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; - status = irdma_handle_cqp_op(rf, cqp_request); + err_code = irdma_handle_cqp_op(rf, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request); - if (status) { - err_code = -ENOMEM; + if (err_code) goto cq_free_rsrc; - } if (udata) { struct irdma_create_cq_resp resp = {}; @@ -2304,14 +2366,14 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_info *pinfo; u64 *pbl; - enum irdma_status_code status; + int status; enum irdma_pble_level level = PBLE_LEVEL_1; if (use_pbles) { status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, false); if (status) - return -ENOMEM; + return status; iwpbl->pbl_allocated = true; level = palloc->level; @@ -2429,7 +2491,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) @@ -2452,7 +2514,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); - return status ? -ENOMEM : 0; + return status; } /** @@ -2504,7 +2566,7 @@ static int irdma_dealloc_mw(struct ib_mw *ibmw) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); - info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; + info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = false; cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; @@ -2528,8 +2590,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, { struct irdma_allocate_stag_info *info; struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); - enum irdma_status_code status; - int err = 0; + int status; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; @@ -2551,10 +2612,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); - if (status) - err = -ENOMEM; - return err; + return status; } /** @@ -2570,9 +2629,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, struct irdma_pble_alloc *palloc; struct irdma_pbl *iwpbl; struct irdma_mr *iwmr; - enum irdma_status_code status; u32 stag; - int err_code = -ENOMEM; + int err_code; iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); if (!iwmr) @@ -2594,9 +2652,9 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, iwmr->type = IRDMA_MEMREG_TYPE_MEM; palloc = &iwpbl->pble_alloc; iwmr->page_cnt = max_num_sg; - status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, - true); - if (status) + err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, + false); + if (err_code) goto err_get_pble; err_code = irdma_hw_alloc_stag(iwdev, iwmr); @@ -2631,8 +2689,16 @@ static int irdma_set_page(struct ib_mr *ibmr, u64 addr) if (unlikely(iwmr->npages == iwmr->page_cnt)) return -ENOMEM; - pbl = palloc->level1.addr; - pbl[iwmr->npages++] = addr; + if (palloc->level == PBLE_LEVEL_2) { + struct irdma_pble_info *palloc_info = + palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); + + palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; + } else { + pbl = palloc->level1.addr; + pbl[iwmr->npages] = addr; + } + iwmr->npages++; return 0; } @@ -2667,10 +2733,9 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, struct irdma_reg_ns_stag_info *stag_info; struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; - enum irdma_status_code status; - int err = 0; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; + int ret; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) @@ -2707,12 +2772,10 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, cqp_info->post_sq = 1; cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; - status = irdma_handle_cqp_op(iwdev->rf, cqp_request); + ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); - if (status) - err = -ENOMEM; - return err; + return ret; } /** @@ -2728,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata) { +#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_ucontext *ucontext; struct irdma_pble_alloc *palloc; @@ -2745,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) return ERR_PTR(-EINVAL); + if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) + return ERR_PTR(-EINVAL); + region = ib_umem_get(pd->device, start, len, access); if (IS_ERR(region)) { @@ -2774,7 +2841,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { iwmr->page_size = ib_umem_find_best_pgsz(region, - SZ_4K | SZ_2M | SZ_1G, + iwdev->rf->sc_dev.hw_attrs.page_size_cap, virt); if (unlikely(!iwmr->page_size)) { kfree(iwmr); @@ -2892,7 +2959,6 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pbl *iwpbl; struct irdma_mr *iwmr; - enum irdma_status_code status; u32 stag; int ret; @@ -2920,10 +2986,9 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access iwmr->pgaddrmem[0] = addr; iwmr->len = size; iwmr->page_size = SZ_4K; - status = irdma_hwreg_mr(iwdev, iwmr, access); - if (status) { + ret = irdma_hwreg_mr(iwdev, iwmr, access); + if (ret) { irdma_free_stag(iwdev, stag); - ret = -ENOMEM; goto err; } @@ -2996,6 +3061,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; + int status; if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { if (iwmr->region) { @@ -3016,7 +3082,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; memset(info, 0, sizeof(*info)); - info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff; + info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = true; if (iwpbl->pbl_allocated) @@ -3026,8 +3092,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) cqp_info->post_sq = 1; cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; - irdma_handle_cqp_op(iwdev->rf, cqp_request); + status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (status) + return status; + irdma_free_stag(iwdev, iwmr->stag); done: if (iwpbl->pbl_allocated) @@ -3052,20 +3121,16 @@ static int irdma_post_send(struct ib_qp *ibqp, struct irdma_qp_uk *ukqp; struct irdma_sc_dev *dev; struct irdma_post_sq_info info; - enum irdma_status_code ret; int err = 0; unsigned long flags; bool inv_stag; struct irdma_ah *ah; - bool reflush = false; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; dev = &iwqp->iwdev->rf->sc_dev; spin_lock_irqsave(&iwqp->lock, flags); - if (iwqp->flush_issued && ukqp->sq_flush_complete) - reflush = true; while (ib_wr) { memset(&info, 0, sizeof(info)); inv_stag = false; @@ -3111,7 +3176,7 @@ static int irdma_post_send(struct ib_qp *ibqp, info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey; info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn; } - ret = irdma_uk_inline_send(ukqp, &info, false); + err = irdma_uk_inline_send(ukqp, &info, false); } else { info.op.send.num_sges = ib_wr->num_sge; info.op.send.sg_list = ib_wr->sg_list; @@ -3122,14 +3187,7 @@ static int irdma_post_send(struct ib_qp *ibqp, info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; } - ret = irdma_uk_send(ukqp, &info, false); - } - - if (ret) { - if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) - err = -ENOMEM; - else - err = -EINVAL; + err = irdma_uk_send(ukqp, &info, false); } break; case IB_WR_RDMA_WRITE_WITH_IMM: @@ -3155,20 +3213,13 @@ static int irdma_post_send(struct ib_qp *ibqp, rdma_wr(ib_wr)->remote_addr; info.op.inline_rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; - ret = irdma_uk_inline_rdma_write(ukqp, &info, false); + err = irdma_uk_inline_rdma_write(ukqp, &info, false); } else { info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; info.op.rdma_write.num_lo_sges = ib_wr->num_sge; info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; - ret = irdma_uk_rdma_write(ukqp, &info, false); - } - - if (ret) { - if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) - err = -ENOMEM; - else - err = -EINVAL; + err = irdma_uk_rdma_write(ukqp, &info, false); } break; case IB_WR_RDMA_READ_WITH_INV: @@ -3185,21 +3236,12 @@ static int irdma_post_send(struct ib_qp *ibqp, info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; info.op.rdma_read.num_lo_sges = ib_wr->num_sge; - - ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); - if (ret) { - if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) - err = -ENOMEM; - else - err = -EINVAL; - } + err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; - ret = irdma_uk_stag_local_invalidate(ukqp, &info, true); - if (ret) - err = -ENOMEM; + err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; case IB_WR_REG_MR: { struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); @@ -3221,10 +3263,8 @@ static int irdma_post_send(struct ib_qp *ibqp, stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR) stag_info.chunk_size = 1; - ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, + err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, true); - if (ret) - err = -ENOMEM; break; } default: @@ -3240,15 +3280,14 @@ static int irdma_post_send(struct ib_qp *ibqp, ib_wr = ib_wr->next; } - if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) { - irdma_uk_qp_post_wr(ukqp); - spin_unlock_irqrestore(&iwqp->lock, flags); - } else if (reflush) { - ukqp->sq_flush_complete = false; + if (!iwqp->flush_issued) { + if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) + irdma_uk_qp_post_wr(ukqp); spin_unlock_irqrestore(&iwqp->lock, flags); - irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH); } else { spin_unlock_irqrestore(&iwqp->lock, flags); + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } if (err) *bad_wr = ib_wr; @@ -3269,29 +3308,21 @@ static int irdma_post_recv(struct ib_qp *ibqp, struct irdma_qp *iwqp; struct irdma_qp_uk *ukqp; struct irdma_post_rq_info post_recv = {}; - enum irdma_status_code ret = 0; unsigned long flags; int err = 0; - bool reflush = false; iwqp = to_iwqp(ibqp); ukqp = &iwqp->sc_qp.qp_uk; spin_lock_irqsave(&iwqp->lock, flags); - if (iwqp->flush_issued && ukqp->rq_flush_complete) - reflush = true; while (ib_wr) { post_recv.num_sges = ib_wr->num_sge; post_recv.wr_id = ib_wr->wr_id; post_recv.sg_list = ib_wr->sg_list; - ret = irdma_uk_post_receive(ukqp, &post_recv); - if (ret) { + err = irdma_uk_post_receive(ukqp, &post_recv); + if (err) { ibdev_dbg(&iwqp->iwdev->ibdev, - "VERBS: post_recv err %d\n", ret); - if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) - err = -ENOMEM; - else - err = -EINVAL; + "VERBS: post_recv err %d\n", err); goto out; } @@ -3299,13 +3330,10 @@ static int irdma_post_recv(struct ib_qp *ibqp, } out: - if (reflush) { - ukqp->rq_flush_complete = false; - spin_unlock_irqrestore(&iwqp->lock, flags); - irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH); - } else { - spin_unlock_irqrestore(&iwqp->lock, flags); - } + spin_unlock_irqrestore(&iwqp->lock, flags); + if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); if (err) *bad_wr = ib_wr; @@ -3336,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode return IB_WC_RETRY_EXC_ERR; case FLUSH_MW_BIND_ERR: return IB_WC_MW_BIND_ERR; + case FLUSH_REM_INV_REQ_ERR: + return IB_WC_REM_INV_REQ_ERR; case FLUSH_FATAL_ERR: default: return IB_WC_FATAL_ERR; @@ -3478,7 +3508,7 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc struct irdma_cq_buf *last_buf = NULL; struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; struct irdma_cq_buf *cq_buf; - enum irdma_status_code ret; + int ret; struct irdma_device *iwdev; struct irdma_cq_uk *ukcq; bool cq_new_cqe = false; @@ -3498,10 +3528,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc cq_new_cqe = true; continue; } - if (ret == IRDMA_ERR_Q_EMPTY) + if (ret == -ENOENT) break; /* QP using the CQ is destroyed. Skip reporting this CQE */ - if (ret == IRDMA_ERR_Q_DESTROYED) { + if (ret == -EFAULT) { cq_new_cqe = true; continue; } @@ -3517,16 +3547,21 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc /* check the current CQ for new cqes */ while (npolled < num_entries) { ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); + if (ret == -ENOENT) { + ret = irdma_generated_cmpls(iwcq, cur_cqe); + if (!ret) + irdma_process_cqe(entry + npolled, cur_cqe); + } if (!ret) { ++npolled; cq_new_cqe = true; continue; } - if (ret == IRDMA_ERR_Q_EMPTY) + if (ret == -ENOENT) break; /* QP using the CQ is destroyed. Skip reporting this CQE */ - if (ret == IRDMA_ERR_Q_DESTROYED) { + if (ret == -EFAULT) { cq_new_cqe = true; continue; } @@ -3548,7 +3583,7 @@ error: ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", __func__, ret); - return -EINVAL; + return ret; } /** @@ -3598,13 +3633,13 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) promo_event = true; - if (!iwcq->armed || promo_event) { - iwcq->armed = true; + if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { iwcq->last_notify = cq_notify; irdma_uk_cq_request_notification(ukcq, cq_notify); } - if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq)) + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && + (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); @@ -3854,7 +3889,7 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev, { struct cqp_cmds_info *cqp_info; struct irdma_cqp_request *cqp_request; - enum irdma_status_code status; + int status; cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); if (!cqp_request) @@ -3868,10 +3903,8 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev, cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); - if (status) - return -ENOMEM; - return 0; + return status; } /** @@ -3927,11 +3960,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) int ret = 0; bool ipv4; u16 vlan_id; - union { - struct sockaddr saddr; - struct sockaddr_in saddr_in; - struct sockaddr_in6 saddr_in6; - } sgid_addr; + union irdma_sockaddr sgid_addr; unsigned char dmac[ETH_ALEN]; rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); @@ -4067,11 +4096,7 @@ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) struct irdma_mcast_grp_ctx_entry_info mcg_info = {}; int ret; unsigned long flags; - union { - struct sockaddr saddr; - struct sockaddr_in saddr_in; - struct sockaddr_in6 saddr_in6; - } sgid_addr; + union irdma_sockaddr sgid_addr; rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) @@ -4127,17 +4152,47 @@ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) return 0; } -/** - * irdma_create_ah - create address handle - * @ibah: address handle - * @attr: address handle attributes - * @udata: User data - * - * returns 0 on success, error otherwise - */ -static int irdma_create_ah(struct ib_ah *ibah, - struct rdma_ah_init_attr *attr, - struct ib_udata *udata) +static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep) +{ + struct irdma_pci_f *rf = iwdev->rf; + int err; + + err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx, + &rf->next_ah); + if (err) + return err; + + err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep, + irdma_gsi_ud_qp_ah_cb, &ah->sc_ah); + + if (err) { + ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + if (!sleep) { + int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD; + + do { + irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); + mdelay(1); + } while (!ah->sc_ah.ah_info.ah_valid && --cnt); + + if (!cnt) { + ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out"); + err = -ETIMEDOUT; + goto err_ah_create; + } + } + return 0; + +err_ah_create: + irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx); + + return err; +} + +static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr) { struct irdma_pd *pd = to_iwpd(ibah->pd); struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); @@ -4146,25 +4201,13 @@ static int irdma_create_ah(struct ib_ah *ibah, struct irdma_device *iwdev = to_iwdev(ibah->pd->device); struct irdma_pci_f *rf = iwdev->rf; struct irdma_sc_ah *sc_ah; - u32 ah_id = 0; struct irdma_ah_info *ah_info; - struct irdma_create_ah_resp uresp; - union { - struct sockaddr saddr; - struct sockaddr_in saddr_in; - struct sockaddr_in6 saddr_in6; - } sgid_addr, dgid_addr; + union irdma_sockaddr sgid_addr, dgid_addr; int err; u8 dmac[ETH_ALEN]; - err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, - &rf->next_ah); - if (err) - return err; - ah->pd = pd; sc_ah = &ah->sc_ah; - sc_ah->ah_info.ah_idx = ah_id; sc_ah->ah_info.vsi = &iwdev->vsi; irdma_sc_init_ah(&rf->sc_dev, sc_ah); ah->sgid_index = ah_attr->grh.sgid_index; @@ -4174,10 +4217,7 @@ static int irdma_create_ah(struct ib_ah *ibah, rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid); ah->av.attrs = *ah_attr; ah->av.net_type = rdma_gid_attr_network_type(sgid_attr); - ah->av.sgid_addr.saddr = sgid_addr.saddr; - ah->av.dgid_addr.saddr = dgid_addr.saddr; ah_info = &sc_ah->ah_info; - ah_info->ah_idx = ah_id; ah_info->pd_idx = pd->sc_pd.pd_id; if (ah_attr->ah_flags & IB_AH_GRH) { ah_info->flow_label = ah_attr->grh.flow_label; @@ -4186,7 +4226,7 @@ static int irdma_create_ah(struct ib_ah *ibah, } ether_addr_copy(dmac, ah_attr->roce.dmac); - if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) { + if (ah->av.net_type == RDMA_NETWORK_IPV4) { ah_info->ipv4_valid = true; ah_info->dest_ip_addr[0] = ntohl(dgid_addr.saddr_in.sin_addr.s_addr); @@ -4214,17 +4254,15 @@ static int irdma_create_ah(struct ib_ah *ibah, err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, ah_info->mac_addr); if (err) - goto error; + return err; ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, ah_info->ipv4_valid, dmac); - if (ah_info->dst_arpindex == -1) { - err = -EINVAL; - goto error; - } + if (ah_info->dst_arpindex == -1) + return -EINVAL; - if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb) + if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) ah_info->vlan_tag = 0; if (ah_info->vlan_tag < VLAN_N_VID) { @@ -4233,43 +4271,38 @@ static int irdma_create_ah(struct ib_ah *ibah, rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT; } - err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, - attr->flags & RDMA_CREATE_AH_SLEEPABLE, - irdma_gsi_ud_qp_ah_cb, sc_ah); - - if (err) { - ibdev_dbg(&iwdev->ibdev, - "VERBS: CQP-OP Create AH fail"); - goto error; - } - - if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) { - int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD; - - do { - irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); - mdelay(1); - } while (!sc_ah->ah_info.ah_valid && --cnt); + return 0; +} - if (!cnt) { - ibdev_dbg(&iwdev->ibdev, - "VERBS: CQP create AH timed out"); - err = -ETIMEDOUT; - goto error; +/** + * irdma_ah_exists - Check for existing identical AH + * @iwdev: irdma device + * @new_ah: AH to check for + * + * returns true if AH is found, false if not found. + */ +static bool irdma_ah_exists(struct irdma_device *iwdev, + struct irdma_ah *new_ah) +{ + struct irdma_ah *ah; + u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^ + new_ah->sc_ah.ah_info.dest_ip_addr[1] ^ + new_ah->sc_ah.ah_info.dest_ip_addr[2] ^ + new_ah->sc_ah.ah_info.dest_ip_addr[3]; + + hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) { + /* Set ah_valid and ah_id the same so memcmp can work */ + new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; + new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid; + if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, + sizeof(ah->sc_ah.ah_info))) { + refcount_inc(&ah->refcnt); + new_ah->parent_ah = ah; + return true; } } - if (udata) { - uresp.ah_id = ah->sc_ah.ah_info.ah_idx; - err = ib_copy_to_udata(udata, &uresp, - min(sizeof(uresp), udata->outlen)); - } - return 0; - -error: - irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); - - return err; + return false; } /** @@ -4282,6 +4315,17 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) struct irdma_device *iwdev = to_iwdev(ibah->device); struct irdma_ah *ah = to_iwah(ibah); + if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) { + mutex_lock(&iwdev->ah_tbl_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_tbl_lock); + return 0; + } + hash_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + mutex_unlock(&iwdev->ah_tbl_lock); + } + irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, false, NULL, ah); @@ -4292,6 +4336,84 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) } /** + * irdma_create_user_ah - create user address handle + * @ibah: address handle + * @attr: address handle attributes + * @udata: User data + * + * returns 0 on success, error otherwise + */ +static int irdma_create_user_ah(struct ib_ah *ibah, + struct rdma_ah_init_attr *attr, + struct ib_udata *udata) +{ +#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) + struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); + struct irdma_device *iwdev = to_iwdev(ibah->pd->device); + struct irdma_create_ah_resp uresp; + struct irdma_ah *parent_ah; + int err; + + if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + return -EINVAL; + + err = irdma_setup_ah(ibah, attr); + if (err) + return err; + mutex_lock(&iwdev->ah_tbl_lock); + if (!irdma_ah_exists(iwdev, ah)) { + err = irdma_create_hw_ah(iwdev, ah, true); + if (err) { + mutex_unlock(&iwdev->ah_tbl_lock); + return err; + } + /* Add new AH to list */ + parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL); + if (parent_ah) { + u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^ + parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^ + parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^ + parent_ah->sc_ah.ah_info.dest_ip_addr[3]; + + ah->parent_ah = parent_ah; + hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key); + refcount_set(&parent_ah->refcnt, 1); + } + } + mutex_unlock(&iwdev->ah_tbl_lock); + + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)); + if (err) + irdma_destroy_ah(ibah, attr->flags); + + return err; +} + +/** + * irdma_create_ah - create address handle + * @ibah: address handle + * @attr: address handle attributes + * @udata: NULL + * + * returns 0 on success, error otherwise + */ +static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, + struct ib_udata *udata) +{ + struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah); + struct irdma_device *iwdev = to_iwdev(ibah->pd->device); + int err; + + err = irdma_setup_ah(ibah, attr); + if (err) + return err; + err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE); + + return err; +} + +/** * irdma_query_ah - Query address handle * @ibah: pointer to address handle * @ah_attr: address handle attributes @@ -4321,28 +4443,10 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -static __be64 irdma_mac_to_guid(struct net_device *ndev) -{ - const unsigned char *mac = ndev->dev_addr; - __be64 guid; - unsigned char *dst = (unsigned char *)&guid; - - dst[0] = mac[0] ^ 2; - dst[1] = mac[1]; - dst[2] = mac[2]; - dst[3] = 0xff; - dst[4] = 0xfe; - dst[5] = mac[3]; - dst[6] = mac[4]; - dst[7] = mac[5]; - - return guid; -} - static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, - .create_user_ah = irdma_create_ah, + .create_user_ah = irdma_create_user_ah, .destroy_ah = irdma_destroy_ah, .detach_mcast = irdma_detach_mcast, .get_link_layer = irdma_get_link_layer, @@ -4408,7 +4512,8 @@ static const struct ib_device_ops irdma_dev_ops = { static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } @@ -4421,7 +4526,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev) struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + netdev->dev_addr); iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index d0fdef8d09ea..4309b7159f42 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -4,6 +4,7 @@ #define IRDMA_VERBS_H #define IRDMA_MAX_SAVED_PHY_PGADDR 4 +#define IRDMA_FLUSH_DELAY_MS 20 #define IRDMA_PKEY_TBL_SZ 1 #define IRDMA_DEFAULT_PKEY 0xFFFF @@ -25,14 +26,16 @@ struct irdma_pd { struct irdma_sc_pd sc_pd; }; +union irdma_sockaddr { + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; +}; + struct irdma_av { u8 macaddr[16]; struct rdma_ah_attr attrs; - union { - struct sockaddr saddr; - struct sockaddr_in saddr_in; - struct sockaddr_in6 saddr_in6; - } sgid_addr, dgid_addr; + union irdma_sockaddr sgid_addr; + union irdma_sockaddr dgid_addr; u8 net_type; }; @@ -43,6 +46,9 @@ struct irdma_ah { struct irdma_av av; u8 sgid_index; union ib_gid dgid; + struct hlist_node list; + refcount_t refcnt; + struct irdma_ah *parent_ah; /* AH from cached list */ }; struct irdma_hmc_pble { @@ -110,7 +116,7 @@ struct irdma_cq { u16 cq_size; u16 cq_num; bool user_mode; - bool armed; + atomic_t armed; enum irdma_cmpl_notify last_notify; u32 polled_cmpls; u32 cq_mem_size; @@ -121,6 +127,12 @@ struct irdma_cq { struct irdma_pbl *iwpbl_shadow; struct list_head resize_list; struct irdma_cq_poll_info cur_cqe; + struct list_head cmpl_generated; +}; + +struct irdma_cmpl_gen { + struct list_head list; + struct irdma_cq_poll_info cpi; }; struct disconn_work { @@ -161,6 +173,7 @@ struct irdma_qp { refcount_t refcnt; struct iw_cm_id *cm_id; struct irdma_cm_node *cm_node; + struct delayed_work dwork_flush; struct ib_mr *lsmm_mr; atomic_t hw_mod_qp_pend; enum ib_qp_state ibqp_state; @@ -224,4 +237,7 @@ int irdma_ib_register_device(struct irdma_device *iwdev); void irdma_ib_unregister_device(struct irdma_device *iwdev); void irdma_ib_dealloc_device(struct ib_device *ibdev); void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event); +void irdma_generate_flush_completions(struct irdma_qp *iwqp); +void irdma_remove_cmpls_list(struct irdma_cq *iwcq); +int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info); #endif /* IRDMA_VERBS_H */ diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c index b0d6ee0739f5..20bc8d0d7f1f 100644 --- a/drivers/infiniband/hw/irdma/ws.c +++ b/drivers/infiniband/hw/irdma/ws.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2017 - 2021 Intel Corporation */ #include "osdep.h" -#include "status.h" #include "hmc.h" #include "defs.h" #include "type.h" @@ -87,8 +86,8 @@ static void irdma_free_node(struct irdma_sc_vsi *vsi, * @node: pointer to node * @cmd: add, remove or modify */ -static enum irdma_status_code -irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) +static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, + struct irdma_ws_node *node, u8 cmd) { struct irdma_ws_node_info node_info = {}; @@ -106,7 +105,7 @@ irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) node_info.enable = node->enable; if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) { ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n"); - return IRDMA_ERR_NO_MEMORY; + return -ENOMEM; } if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) { @@ -234,18 +233,18 @@ static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri) * @vsi: vsi pointer * @user_pri: user priority */ -enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) +int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) { struct irdma_ws_node *ws_tree_root; struct irdma_ws_node *vsi_node; struct irdma_ws_node *tc_node; u16 traffic_class; - enum irdma_status_code ret = 0; + int ret = 0; int i; mutex_lock(&vsi->dev->ws_mutex); if (vsi->tc_change_pending) { - ret = IRDMA_ERR_NOT_READY; + ret = -EBUSY; goto exit; } @@ -258,7 +257,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) ws_tree_root = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT, NULL); if (!ws_tree_root) { - ret = IRDMA_ERR_NO_MEMORY; + ret = -ENOMEM; goto exit; } @@ -283,7 +282,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT, ws_tree_root); if (!vsi_node) { - ret = IRDMA_ERR_NO_MEMORY; + ret = -ENOMEM; goto vsi_add_err; } @@ -310,7 +309,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF, vsi_node); if (!tc_node) { - ret = IRDMA_ERR_NO_MEMORY; + ret = -ENOMEM; goto leaf_add_err; } diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h index f0e16f630701..d431e3327d26 100644 --- a/drivers/infiniband/hw/irdma/ws.h +++ b/drivers/infiniband/hw/irdma/ws.h @@ -34,7 +34,7 @@ struct irdma_ws_node { }; struct irdma_sc_vsi; -enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri); +int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri); void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri); void irdma_ws_reset(struct irdma_sc_vsi *vsi); |