aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/irdma/ctrl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/irdma/ctrl.c')
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c602
1 files changed, 287 insertions, 315 deletions
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 3141a9c85de5..58c0e181ca2b 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -3,7 +3,6 @@
#include <linux/etherdevice.h>
#include "osdep.h"
-#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -70,6 +69,31 @@ void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
}
}
+static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2p)
+{
+ u8 i;
+
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ vsi->dscp_mode = l2p->dscp_mode;
+ if (l2p->dscp_mode) {
+ memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2p->up2tc[i] = i;
+ }
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ vsi->qos[i].traffic_class = l2p->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ }
+}
+
/**
* irdma_change_l2params - given the new l2 parameters, change all qp
* @vsi: RDMA VSI pointer
@@ -88,6 +112,7 @@ void irdma_change_l2params(struct irdma_sc_vsi *vsi,
return;
vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
}
@@ -154,17 +179,16 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
- struct irdma_add_arp_cache_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8, info->reach_max);
set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
@@ -192,16 +216,15 @@ irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
* @arp_index: arp index to delete arp entry
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 arp_index, bool post_sq)
+static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
@@ -226,17 +249,16 @@ irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
- struct irdma_apbvt_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->port);
@@ -274,7 +296,7 @@ irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
* quad hash entry in the hardware will point to iwarp's qp
* number and requires no calls from the driver.
*/
-static enum irdma_status_code
+static int
irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
struct irdma_qhash_table_info *info,
u64 scratch, bool post_sq)
@@ -287,7 +309,7 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
@@ -350,10 +372,9 @@ irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
* @qp: sc qp
* @info: initialization qp info
*/
-enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
- struct irdma_qp_init_info *info)
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
{
- enum irdma_status_code ret_code;
+ int ret_code;
u32 pble_obj_cnt;
u16 wqe_size;
@@ -361,7 +382,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
info->qp_uk_init_info.max_rq_frag_cnt >
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
- return IRDMA_ERR_INVALID_FRAG_COUNT;
+ return -EINVAL;
qp->dev = info->pd->dev;
qp->vsi = info->vsi;
@@ -384,7 +405,7 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
(info->virtual_map && info->rq_pa >= pble_obj_cnt))
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
@@ -424,8 +445,8 @@ enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -433,12 +454,12 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
cqp = qp->dev->cqp;
if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
- qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
- return IRDMA_ERR_INVALID_QP_ID;
+ qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -475,9 +496,8 @@ enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_c
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
- struct irdma_modify_qp_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -488,7 +508,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
if (info->dont_send_fin)
@@ -546,9 +566,8 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
* @ignore_mw_bnd: memory window bind flag
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
- bool remove_hash_idx, bool ignore_mw_bnd,
- bool post_sq)
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -557,7 +576,7 @@ enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
@@ -739,16 +758,15 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
@@ -774,17 +792,16 @@ irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
- struct irdma_local_mac_entry_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
@@ -813,16 +830,16 @@ irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
* @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
- u16 entry_idx, u8 ignore_ref_count, bool post_sq)
+static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
@@ -1035,10 +1052,9 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
- struct irdma_allocate_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -1055,7 +1071,7 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
@@ -1097,10 +1113,9 @@ irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
- struct irdma_reg_ns_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 fbo;
@@ -1118,7 +1133,7 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
else if (info->page_size == 0x1000)
page_size = IRDMA_PAGE_SIZE_4K;
else
- return IRDMA_ERR_PARAM;
+ return -EINVAL;
if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
@@ -1128,12 +1143,12 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
fbo = info->va & (info->page_size - 1);
set_64bit_val(wqe, 0,
@@ -1186,10 +1201,9 @@ irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
- struct irdma_dealloc_stag_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
{
u64 hdr;
__le64 *wqe;
@@ -1198,7 +1212,7 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1227,9 +1241,9 @@ irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
{
u64 hdr;
struct irdma_sc_cqp *cqp;
@@ -1238,7 +1252,7 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
@@ -1268,9 +1282,9 @@ irdma_sc_mw_alloc(struct irdma_sc_dev *dev, struct irdma_mw_alloc_info *info,
* @info: fast mr info
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code
-irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
- struct irdma_fast_reg_stag_info *info, bool post_sq)
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
{
u64 temp, hdr;
__le64 *wqe;
@@ -1292,7 +1306,7 @@ irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
+ return -ENOMEM;
irdma_clr_wqes(&qp->qp_uk, wqe_idx);
@@ -1821,8 +1835,7 @@ void irdma_terminate_received(struct irdma_sc_qp *qp,
}
}
-static enum irdma_status_code irdma_null_ws_add(struct irdma_sc_vsi *vsi,
- u8 user_pri)
+static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
return 0;
}
@@ -1845,7 +1858,6 @@ static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info)
{
- struct irdma_l2params *l2p;
int i;
vsi->dev = info->dev;
@@ -1858,18 +1870,8 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
vsi->fcn_id = info->dev->hmc_fn_id;
- l2p = info->params;
- vsi->qos_rel_bw = l2p->vsi_rel_bw;
- vsi->qos_prio_type = l2p->vsi_prio_type;
+ irdma_set_qos_info(vsi, info->params);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
- if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
- vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
- vsi->qos[i].traffic_class = info->params->up2tc[i];
- vsi->qos[i].rel_bw =
- l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
- vsi->qos[i].prio_type =
- l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
- vsi->qos[i].valid = false;
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
@@ -1918,8 +1920,8 @@ static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
* @vsi: pointer to the vsi structure
* @info: The info structure used for initialization
*/
-enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
- struct irdma_vsi_stats_info *info)
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
{
u8 fcn_id = info->fcn_id;
struct irdma_dma_mem *stats_buff_mem;
@@ -1934,7 +1936,7 @@ enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
&stats_buff_mem->pa,
GFP_KERNEL);
if (!stats_buff_mem->va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
vsi->pestat->gather_info.last_gather_stats_va =
@@ -1961,7 +1963,7 @@ stats_error:
stats_buff_mem->va, stats_buff_mem->pa);
stats_buff_mem->va = NULL;
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
/**
@@ -2023,19 +2025,19 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
* @info: gather stats info structure
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
- struct irdma_stats_gather_info *info, u64 scratch)
+static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
{
__le64 *wqe;
u64 temp;
if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
- return IRDMA_ERR_BUF_TOO_SHORT;
+ return -ENOMEM;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
@@ -2070,17 +2072,16 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
* @alloc: alloc vs. delete flag
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
- struct irdma_stats_inst_info *info, bool alloc,
- u64 scratch)
+static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
{
__le64 *wqe;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
@@ -2108,9 +2109,8 @@ irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
* @info: User priority map info
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
- struct irdma_up_info *info,
- u64 scratch)
+static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
@@ -2118,7 +2118,7 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
temp |= (u64)info->map[i] << (i * 8);
@@ -2151,17 +2151,16 @@ static enum irdma_status_code irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
* @node_op: 0 for add 1 for modify, 2 for delete
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
- struct irdma_ws_node_info *info,
- enum irdma_ws_node_op node_op, u64 scratch)
+static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
{
__le64 *wqe;
u64 temp = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
@@ -2194,9 +2193,9 @@ irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
- struct irdma_qp_flush_info *info,
- u64 scratch, bool post_sq)
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp = 0;
__le64 *wqe;
@@ -2215,13 +2214,13 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
ibdev_dbg(to_ibdev(qp->dev),
"CQP: Additional flush request ignored for qp %x\n",
qp->qp_uk.qp_id);
- return IRDMA_ERR_FLUSHED_Q;
+ return -EALREADY;
}
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
if (info->userflushcode) {
if (flush_rq)
@@ -2268,9 +2267,9 @@ enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
- struct irdma_gen_ae_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
{
u64 temp;
__le64 *wqe;
@@ -2280,7 +2279,7 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
cqp = qp->pd->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src);
@@ -2308,10 +2307,9 @@ static enum irdma_status_code irdma_sc_gen_ae(struct irdma_sc_qp *qp,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
- struct irdma_upload_context_info *info, u64 scratch,
- bool post_sq)
+static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -2320,7 +2318,7 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->buf_pa);
@@ -2349,21 +2347,20 @@ irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_manage_push_page_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
if (info->free_page &&
info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
- return IRDMA_ERR_INVALID_PUSH_PAGE_INDEX;
+ return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, info->qs_handle);
hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
@@ -2389,16 +2386,15 @@ irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
@@ -2420,16 +2416,15 @@ static enum irdma_status_code irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp,
* @qp: sc qp struct
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code irdma_sc_resume_qp(struct irdma_sc_cqp *cqp,
- struct irdma_sc_qp *qp,
- u64 scratch)
+static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
{
u64 hdr;
__le64 *wqe;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
@@ -2462,14 +2457,13 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
* @cq: cq struct
* @info: cq initialization info
*/
-enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
- struct irdma_cq_init_info *info)
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
{
u32 pble_obj_cnt;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cq->cq_pa = info->cq_base_pa;
cq->dev = info->dev;
@@ -2500,23 +2494,21 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
* @check_overflow: flag for overflow check
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
- u64 scratch,
- bool check_overflow,
- bool post_sq)
+static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_sc_ceq *ceq;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
cqp = cq->dev->cqp;
- if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
- return IRDMA_ERR_INVALID_CQ_ID;
+ if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
+ return -EINVAL;
- if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2529,7 +2521,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
if (!wqe) {
if (ceq && ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, cq);
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
}
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
@@ -2575,8 +2567,7 @@ static enum irdma_status_code irdma_sc_cq_create(struct irdma_sc_cq *cq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
- bool post_sq)
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2586,7 +2577,7 @@ enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
ceq = cq->dev->ceq[cq->ceq_id];
if (ceq && ceq->reg_cq)
@@ -2642,9 +2633,9 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag to post to sq
*/
-static enum irdma_status_code
-irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -2654,12 +2645,12 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info,
pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->cq_resize && info->virtual_map &&
info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, info->cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -2733,8 +2724,8 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
* @tail: wqtail register value
* @count: how many times to try for completion
*/
-static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
- u32 tail, u32 count)
+static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
{
u32 i = 0;
u32 newtail, error, val;
@@ -2746,7 +2737,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
ibdev_dbg(to_ibdev(cqp->dev),
"CQP: CQPERRCODES error_code[x%08X]\n",
error);
- return IRDMA_ERR_CQP_COMPL_ERROR;
+ return -EIO;
}
if (newtail != tail) {
/* SUCCESS */
@@ -2757,7 +2748,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp,
udelay(cqp->dev->hw_attrs.max_sleep_count);
}
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
}
/**
@@ -2912,10 +2903,9 @@ static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
* parses fpm query buffer and copy max_cnt and
* size value of hmc objects in hmc_info
*/
-static enum irdma_status_code
-irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
- struct irdma_hmc_info *hmc_info,
- struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
u64 temp;
@@ -2954,7 +2944,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
if (!hmc_fpm_misc->xf_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
get_64bit_val(buf, 80, &temp);
@@ -2963,7 +2953,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
if (!hmc_fpm_misc->q1_block_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
@@ -2987,7 +2977,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
if (!hmc_fpm_misc->rrf_block_size &&
obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
@@ -2999,7 +2989,7 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
if (!hmc_fpm_misc->ooiscf_block_size &&
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
return 0;
}
@@ -3027,8 +3017,7 @@ static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
* @ceq: ceq sc structure
* @cq: cq sc structure
*/
-enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
- struct irdma_sc_cq *cq)
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
{
unsigned long flags;
@@ -3036,7 +3025,7 @@ enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq,
if (ceq->reg_cq_size == ceq->elem_cnt) {
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
- return IRDMA_ERR_REG_CQ_FULL;
+ return -ENOMEM;
}
ceq->reg_cq[ceq->reg_cq_size++] = cq;
@@ -3077,15 +3066,15 @@ exit:
*
* Initializes the object and context buffers for a control Queue Pair.
*/
-enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
- struct irdma_cqp_init_info *info)
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
{
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
((info->sq_size & (info->sq_size - 1))))
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
IRDMA_QUEUE_TYPE_CQP);
@@ -3135,13 +3124,12 @@ enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
* @maj_err: If error, major err number
* @min_err: If error, minor err number
*/
-enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
- u16 *min_err)
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
{
u64 temp;
u8 hw_rev;
u32 cnt = 0, p1, p2, val = 0, err_code;
- enum irdma_status_code ret_code;
+ int ret_code;
hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
@@ -3150,7 +3138,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
cqp->sdbuf.size, &cqp->sdbuf.pa,
GFP_KERNEL);
if (!cqp->sdbuf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
@@ -3205,7 +3193,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
goto err;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3213,7 +3201,7 @@ enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_er
} while (!val);
if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
- ret_code = IRDMA_ERR_DEVICE_NOT_SUPPORTED;
+ ret_code = -EOPNOTSUPP;
goto err;
}
@@ -3254,7 +3242,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
u32 *wqe_idx)
{
__le64 *wqe = NULL;
- enum irdma_status_code ret_code;
+ int ret_code;
if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
ibdev_dbg(to_ibdev(cqp->dev),
@@ -3281,16 +3269,16 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
* irdma_sc_cqp_destroy - destroy cqp during close
* @cqp: struct for cqp hw
*/
-enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
{
u32 cnt = 0, val;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
do {
if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
- ret_code = IRDMA_ERR_TIMEOUT;
+ ret_code = -ETIMEDOUT;
break;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@@ -3335,8 +3323,8 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
* @ccq: ccq sc struct
* @info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
- struct irdma_ccq_cqe_info *info)
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
{
u64 qp_ctx, temp, temp1;
__le64 *cqe;
@@ -3344,7 +3332,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 wqe_idx;
u32 error;
u8 polarity;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3354,7 +3342,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 24, &temp);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
if (polarity != ccq->cq_uk.polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
@@ -3401,25 +3389,25 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
* @op_code: cqp opcode for completion
* @compl_info: completion q entry to return
*/
-enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
- struct irdma_ccq_cqe_info *compl_info)
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
{
struct irdma_ccq_cqe_info info = {};
struct irdma_sc_cq *ccq;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 cnt = 0;
ccq = cqp->dev->ccq;
while (1) {
if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
udelay(cqp->dev->hw_attrs.max_sleep_count);
continue;
}
if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
- ret_code = IRDMA_ERR_CQP_COMPL_ERROR;
+ ret_code = -EIO;
break;
}
/* make sure op code matches*/
@@ -3443,17 +3431,16 @@ enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u
* @info: info for the manage function table operation
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code
-irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
- struct irdma_hmc_fcn_info *info,
- u64 scratch, bool post_sq)
+static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 8, 0);
@@ -3486,8 +3473,7 @@ irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
* for fpm commit
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
NULL);
@@ -3502,19 +3488,19 @@ irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
-static enum irdma_status_code
-irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
- struct irdma_dma_mem *commit_fpm_mem, bool post_sq,
- u8 wait_type)
+static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem,
+ bool post_sq, u8 wait_type)
{
__le64 *wqe;
u64 hdr;
u32 tail, val, error;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, hmc_fn_id);
set_64bit_val(wqe, 32, commit_fpm_mem->pa);
@@ -3548,8 +3534,7 @@ irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
* query fpm
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
NULL);
@@ -3564,19 +3549,19 @@ irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
* @post_sq: flag for cqp db to ring
* @wait_type: poll ccq or cqp registers for cqp completion
*/
-static enum irdma_status_code
-irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
- struct irdma_dma_mem *query_fpm_mem, bool post_sq,
- u8 wait_type)
+static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem,
+ bool post_sq, u8 wait_type)
{
__le64 *wqe;
u64 hdr;
u32 tail, val, error;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, hmc_fn_id);
set_64bit_val(wqe, 32, query_fpm_mem->pa);
@@ -3608,21 +3593,21 @@ irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id,
* @ceq: ceq sc structure
* @info: ceq initialization info
*/
-enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
- struct irdma_ceq_init_info *info)
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
{
u32 pble_obj_cnt;
if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
ceq->size = sizeof(*ceq);
ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
@@ -3655,8 +3640,8 @@ enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
- bool post_sq)
+static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -3665,7 +3650,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64
cqp = ceq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 32,
(ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
@@ -3697,8 +3682,7 @@ static enum irdma_status_code irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64
* irdma_sc_cceq_create_done - poll for control ceq wqe to complete
* @ceq: ceq sc structure
*/
-static enum irdma_status_code
-irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
@@ -3711,7 +3695,7 @@ irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
* irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
* @ceq: ceq sc structure
*/
-enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
@@ -3730,9 +3714,9 @@ enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
* @ceq: ceq sc structure
* @scratch: u64 saved to be used during cqp completion
*/
-enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
{
- enum irdma_status_code ret_code;
+ int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
dev->ccq->vsi = ceq->vsi;
@@ -3755,8 +3739,7 @@ enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratc
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
- bool post_sq)
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
@@ -3765,7 +3748,7 @@ enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratc
cqp = ceq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
@@ -3884,19 +3867,19 @@ void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
* @aeq: aeq structure ptr
* @info: aeq initialization info
*/
-enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
- struct irdma_aeq_init_info *info)
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
{
u32 pble_obj_cnt;
if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
aeq->size = sizeof(*aeq);
aeq->polarity = 1;
@@ -3921,8 +3904,8 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
- u64 scratch, bool post_sq)
+static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -3931,7 +3914,7 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
cqp = aeq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 32,
(aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
@@ -3960,8 +3943,8 @@ static enum irdma_status_code irdma_sc_aeq_create(struct irdma_sc_aeq *aeq,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
- u64 scratch, bool post_sq)
+static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
@@ -3974,7 +3957,7 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
@@ -3997,8 +3980,8 @@ static enum irdma_status_code irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq,
* @aeq: aeq structure ptr
* @info: aeqe info to be returned
*/
-enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
- struct irdma_aeqe_info *info)
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
{
u64 temp, compl_ctx;
__le64 *aeqe;
@@ -4012,7 +3995,7 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
- return IRDMA_ERR_Q_EMPTY;
+ return -ENOENT;
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
@@ -4157,22 +4140,21 @@ void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
* @cq: sc's cq ctruct
* @info: info for control cq initialization
*/
-enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
- struct irdma_ccq_init_info *info)
+int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
{
u32 pble_obj_cnt;
if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
- return IRDMA_ERR_INVALID_SIZE;
+ return -EINVAL;
- if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
- return IRDMA_ERR_INVALID_CEQ_ID;
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
- return IRDMA_ERR_INVALID_PBLE_INDEX;
+ return -EINVAL;
cq->cq_pa = info->cq_pa;
cq->cq_uk.cq_base = info->cq_base;
@@ -4209,7 +4191,7 @@ enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *cq,
* irdma_sc_ccq_create_done - poll cqp for ccq create
* @ccq: ccq sc struct
*/
-static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
{
struct irdma_sc_cqp *cqp;
@@ -4225,10 +4207,10 @@ static inline enum irdma_status_code irdma_sc_ccq_create_done(struct irdma_sc_cq
* @check_overflow: overlow flag for ccq
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
- bool check_overflow, bool post_sq)
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
{
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
if (ret_code)
@@ -4250,19 +4232,18 @@ enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
-enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
- bool post_sq)
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u32 tail, val, error;
cqp = ccq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
@@ -4301,13 +4282,12 @@ enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch
* @dev : ptr to irdma_dev struct
* @hmc_fn_id: hmc function id
*/
-enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
- u8 hmc_fn_id)
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
{
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
struct irdma_dma_mem query_fpm_mem;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 wait_type;
hmc_info = dev->hmc_info;
@@ -4338,14 +4318,13 @@ enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev,
* @dev : ptr to irdma_dev struct
* @hmc_fn_id: hmc function id
*/
-static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
- u8 hmc_fn_id)
+static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
{
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_obj_info *obj_info;
__le64 *buf;
struct irdma_dma_mem commit_fpm_mem;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 wait_type;
hmc_info = dev->hmc_info;
@@ -4408,9 +4387,8 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
* @info: sd info for wqe
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
- u64 scratch)
+static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
+ struct irdma_update_sds_info *info, u64 scratch)
{
u64 data;
u64 hdr;
@@ -4422,7 +4400,7 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
mem_entries = info->cnt - wqe_entries;
@@ -4488,12 +4466,11 @@ cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, struct irdma_update_sds_info *info,
* @info: sd info for sd's
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_update_pe_sds(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info, u64 scratch)
+static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info, u64 scratch)
{
struct irdma_sc_cqp *cqp = dev->cqp;
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
if (!ret_code)
@@ -4507,13 +4484,12 @@ irdma_update_pe_sds(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @info: sd info for sd's
*/
-enum irdma_status_code
-irdma_update_sds_noccq(struct irdma_sc_dev *dev,
- struct irdma_update_sds_info *info)
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info)
{
u32 error, val, tail;
struct irdma_sc_cqp *cqp = dev->cqp;
- enum irdma_status_code ret_code;
+ int ret_code;
ret_code = cqp_sds_wqe_fill(cqp, info, 0);
if (ret_code)
@@ -4534,10 +4510,9 @@ irdma_update_sds_noccq(struct irdma_sc_dev *dev,
* @post_sq: flag for cqp db to ring
* @poll_registers: flag to poll register for cqp completion
*/
-enum irdma_status_code
-irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
- u8 hmc_fn_id, bool post_sq,
- bool poll_registers)
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers)
{
u64 hdr;
__le64 *wqe;
@@ -4545,7 +4520,7 @@ irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
@@ -4620,8 +4595,7 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
* irdma_sc_query_rdma_features_done - poll cqp for query features done
* @cqp: struct for cqp hw
*/
-static enum irdma_status_code
-irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
+static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
{
return irdma_sc_poll_for_cqp_op_done(cqp,
IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
@@ -4634,16 +4608,15 @@ irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
* @buf: buffer to hold query info
* @scratch: u64 saved to be used during cqp completion
*/
-static enum irdma_status_code
-irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
- struct irdma_dma_mem *buf, u64 scratch)
+static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
+ struct irdma_dma_mem *buf, u64 scratch)
{
__le64 *wqe;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
- return IRDMA_ERR_RING_FULL;
+ return -ENOMEM;
temp = buf->pa;
set_64bit_val(wqe, 32, temp);
@@ -4667,9 +4640,9 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
* irdma_get_rdma_features - get RDMA features
* @dev: sc device struct
*/
-enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
+int irdma_get_rdma_features(struct irdma_sc_dev *dev)
{
- enum irdma_status_code ret_code;
+ int ret_code;
struct irdma_dma_mem feat_buf;
u64 temp;
u16 byte_idx, feat_type, feat_cnt, feat_idx;
@@ -4679,7 +4652,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
&feat_buf.pa, GFP_KERNEL);
if (!feat_buf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
if (!ret_code)
@@ -4690,7 +4663,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
get_64bit_val(feat_buf.va, 0, &temp);
feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < 2) {
- ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ ret_code = -EINVAL;
goto exit;
} else if (feat_cnt > IRDMA_MAX_FEATURES) {
ibdev_dbg(to_ibdev(dev),
@@ -4704,7 +4677,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
feat_buf.size, &feat_buf.pa,
GFP_KERNEL);
if (!feat_buf.va)
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
if (!ret_code)
@@ -4715,7 +4688,7 @@ enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev)
get_64bit_val(feat_buf.va, 0, &temp);
feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
if (feat_cnt < 2) {
- ret_code = IRDMA_ERR_INVALID_FEAT_CNT;
+ ret_code = -EINVAL;
goto exit;
}
}
@@ -4794,7 +4767,7 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @qp_count: desired qp count
*/
-enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
struct irdma_virt_mem virt_mem;
u32 i, mem_size;
@@ -4805,7 +4778,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
u32 loop_count = 0;
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
hmc_info = dev->hmc_info;
hmc_fpm_misc = &dev->hmc_fpm_misc;
@@ -4932,7 +4905,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
ibdev_dbg(to_ibdev(dev),
"HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
- return IRDMA_ERR_CFG;
+ return -EINVAL;
}
if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
@@ -4968,7 +4941,7 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!virt_mem.va) {
ibdev_dbg(to_ibdev(dev),
"HMC: failed to allocate memory for sd_entry buffer\n");
- return IRDMA_ERR_NO_MEMORY;
+ return -ENOMEM;
}
hmc_info->sd_table.sd_entry = virt_mem.va;
@@ -4980,10 +4953,10 @@ enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
* @dev: rdma device
* @pcmdinfo: cqp command info
*/
-static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
- struct cqp_cmds_info *pcmdinfo)
+static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
{
- enum irdma_status_code status;
+ int status;
struct irdma_dma_mem val_mem;
bool alloc = false;
@@ -5245,7 +5218,7 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
pcmdinfo->in.u.mc_modify.scratch);
break;
default:
- status = IRDMA_NOT_SUPPORTED;
+ status = -EOPNOTSUPP;
break;
}
@@ -5257,10 +5230,10 @@ static enum irdma_status_code irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
* @dev: sc device struct
* @pcmdinfo: cqp command info
*/
-enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
- struct cqp_cmds_info *pcmdinfo)
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
{
- enum irdma_status_code status = 0;
+ int status = 0;
unsigned long flags;
spin_lock_irqsave(&dev->cqp_lock, flags);
@@ -5276,9 +5249,9 @@ enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
* irdma_process_bh - called from tasklet for cqp list
* @dev: sc device struct
*/
-enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev)
+int irdma_process_bh(struct irdma_sc_dev *dev)
{
- enum irdma_status_code status = 0;
+ int status = 0;
struct cqp_cmds_info *pcmdinfo;
unsigned long flags;
@@ -5366,12 +5339,11 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
* @dev: Device pointer
* @info: Device init info
*/
-enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
- struct irdma_sc_dev *dev,
- struct irdma_device_init_info *info)
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info)
{
u32 val;
- enum irdma_status_code ret_code = 0;
+ int ret_code = 0;
u8 db_size;
INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
@@ -5415,7 +5387,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
irdma_sc_init_hw(dev);
if (irdma_wait_pe_ready(dev))
- return IRDMA_ERR_TIMEOUT;
+ return -ETIMEDOUT;
val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
@@ -5423,7 +5395,7 @@ enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver,
ibdev_dbg(to_ibdev(dev),
"DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
val, db_size);
- return IRDMA_ERR_PE_DOORBELL_NOT_ENA;
+ return -ENODEV;
}
dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];