aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c35
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h9
3 files changed, 162 insertions, 20 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 1fff0473ddbb..939e85cc63a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2038,9 +2038,12 @@ static void get_function_id(struct qed_hwfn *p_hwfn)
static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
- struct qed_sb_cnt_info sb_cnt_info;
+ struct qed_sb_cnt_info sb_cnt;
u32 non_l2_sbs = 0;
+ memset(&sb_cnt, 0, sizeof(sb_cnt));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt);
+
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
@@ -2048,7 +2051,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* consideration as to how many l2 queues / cnqs we have.
*/
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
+ min_t(u32, sb_cnt.cnt / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
non_l2_sbs = feat_num[QED_RDMA_CNQ];
@@ -2057,14 +2060,11 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE),
- sb_cnt_info.iov_cnt);
+ sb_cnt.iov_cnt);
feat_num[QED_PF_L2_QUE] = min_t(u32,
- RESC_NUM(p_hwfn, QED_SB) -
- non_l2_sbs,
+ sb_cnt.cnt - non_l2_sbs,
RESC_NUM(p_hwfn,
QED_L2_QUEUE) -
FEAT_NUM(p_hwfn,
@@ -2072,7 +2072,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
}
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
@@ -2082,7 +2082,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
- RESC_NUM(p_hwfn, QED_SB));
+ (int)sb_cnt.cnt);
}
const char *qed_hw_get_resc_name(enum qed_resources res_id)
@@ -2201,7 +2201,6 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
- struct qed_sb_cnt_info sb_cnt_info;
switch (res_id) {
case QED_L2_QUEUE:
@@ -2253,9 +2252,10 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_num = 1;
break;
case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.cnt;
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
break;
default:
return -EINVAL;
@@ -2324,11 +2324,6 @@ static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Special handling for status blocks; Would be revised in future */
- if (res_id == QED_SB) {
- *p_resc_num -= 1;
- *p_resc_start -= p_hwfn->enabled_func_idx;
- }
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -2426,6 +2421,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return -EINVAL;
}
+ /* This will also learn the number of SBs from MFW */
+ if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
+ return -EINVAL;
+
qed_hw_set_feat(p_hwfn);
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index c9cad2e25dd8..719cdbfe1695 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1853,6 +1853,140 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
b_set);
}
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, QED_SB)) {
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, QED_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
+ }
+
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov - p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return -EINVAL;
+ }
+
+ /* Currently cap the number of VFs SBs by the
+ * number of VFs.
+ */
+ p_info->usage.iov_cnt = vfs;
+ }
+ }
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & QED_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id, val);
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf,
+ p_block->vector_number, rval, val);
+ }
+ }
+
+ return 0;
+}
+
static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 igu_sb_id)
{
@@ -1919,7 +2053,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
/* Mark the First entry belonging to the PF or its VFs
- * as the default SB.
+ * as the default SB [we'll reset IGU prior to first usage].
*/
if ((p_block->status & QED_IGU_STATUS_VALID) &&
(p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
@@ -1952,7 +2086,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n",
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
p_igu_info->igu_dsb_id,
p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index bc61c5013b6e..5199634ed630 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -224,9 +224,18 @@ struct qed_igu_info {
struct qed_sb_cnt_info usage;
+ bool b_allow_pf_vf_change;
};
/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief Translate the weakly-defined client sb-id into an IGU sb-id
*
* @param p_hwfn