aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c521
1 files changed, 345 insertions, 176 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d6430dfebd83..2f1049b0b93a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -44,9 +44,9 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
- {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
- {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
- {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+ {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
@@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
@@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
-
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units.
+ */
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
@@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
ext_voq, wrr) \
do { \
typeof(map) __map; \
memset(&__map, 0, sizeof(__map)); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
- rl_valid); \
+ rl_valid ? 1 : 0);\
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
vp_pq_id); \
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
@@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
- ((rl_valid) << 22) | ((rl) << 24))
+ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
+ (((rl) >> 8) << 9))
+
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+ XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
STORE_RT_REG(p_hwfn,
QM_REG_RLPFVOQENABLE_RT_OFFSET,
(u32)voq_bit_mask);
- if (num_ext_voqs >= 32)
- STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
- (u32)(voq_bit_mask >> 32));
/* Write RL period */
STORE_RT_REG(p_hwfn,
@@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
QM_WFQ_UPPER_BOUND);
}
-/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
+/* Prepare global RL enable/disable runtime init values */
+static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
- vport_rl_en ? 1 : 0);
- if (vport_rl_en) {
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
@@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init(
continue;
/* Find number of command queue lines to divide between the
- * active physical TCs. In E5, 1/8 of the lines are reserved.
- * the lines for pure LB TC are subtracted.
+ * active physical TCs.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
@@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init(
ext_voq = qed_get_ext_voq(p_hwfn,
port_id,
PURE_LB_TC, max_phys_tcs_per_port);
- qed_cmdq_lines_voq_rt_init(p_hwfn,
- ext_voq, PBF_CMDQ_PURE_LB_LINES);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
}
}
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
+ */
static void qed_btb_blocks_rt_init(
struct qed_hwfn *p_hwfn,
u8 max_ports_per_engine,
@@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init(
}
}
+/* Prepare runtime init values for the specified RL.
+ * Set max link speed (100Gbps) per rate limiter.
+ * Return -1 on error.
+ */
+static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
- u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
- u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u16 *p_first_tx_pq_id, vport_id_in_pf;
struct qm_rf_pq_map_e4 tx_pq_map;
- bool is_vf_pq, rl_valid;
- u16 *p_first_tx_pq_id;
+ u8 tc_id = pq_params[i].tc_id;
+ bool is_vf_pq;
+ u8 ext_voq;
ext_voq = qed_get_ext_voq(p_hwfn,
pq_params[i].port_id,
tc_id,
p_params->max_phys_tcs_per_port);
is_vf_pq = (i >= p_params->num_pf_pqs);
- rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
@@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
map_val);
}
- /* Check RL ID */
- if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- rl_valid = false;
- }
-
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
E4,
pq_id,
- rl_valid ? 1 : 0,
*p_first_tx_pq_id,
- rl_valid ? pq_params[i].vport_id : 0,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id,
ext_voq, pq_params[i].wrr_group);
/* Set PQ base address */
@@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
p_params->pf_id,
tc_id,
pq_params[i].port_id,
- rl_valid ? 1 : 0,
- rl_valid ?
- pq_params[i].vport_id : 0);
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
pq_info);
}
@@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 num_vports,
+ u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id;
+ u16 vport_pq_id, i;
u32 inc_val;
- u8 tc, i;
+ u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].vport_wfq)
+ if (!vport_params[i].wfq)
continue;
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
if (inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn,
"Invalid VPORT WFQ weight configuration\n");
@@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
-/* Prepare VPORT RL runtime init values for the specified VPORTs.
- * Return -1 on error.
- */
-static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
- u8 num_vports,
- u32 link_speed,
- struct init_qm_vport_params *vport_params)
-{
- u8 i, vport_id;
- u32 inc_val;
-
- if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
- return -1;
- }
-
- /* Go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
- vport_params[i].vport_rl :
- link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_VP_RL_UPPER_BOUND(link_speed) |
- (u32)QM_RL_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
- inc_val);
- }
-
- return 0;
-}
-
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params)
{
- /* Init AFullOprtnstcCrdMask */
- u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (p_params->pf_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (p_params->vport_wfq_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (p_params->pf_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (p_params->vport_rl_en <<
- QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask = 0;
+ /* Init AFullOprtnstcCrdMask */
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
+ p_params->global_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask,
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
/* Enable/disable PF RL */
@@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
/* Enable/disable PF WFQ */
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
- /* Enable/disable VPORT RL */
- qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+ /* Enable/disable global RL */
+ qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
/* Enable/disable VPORT WFQ */
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
@@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
p_params->max_phys_tcs_per_port,
p_params->port_params);
+ qed_global_rl_rt_init(p_hwfn);
+
return 0;
}
@@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
p_params->num_tids) *
QM_OTHER_PQS_PER_PF;
- u8 tc, i;
+ u16 i;
+ u8 tc;
+
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
@@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- /* Set VPORT WFQ */
+ /* Init VPORT WFQ */
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
- /* Set VPORT RL */
- if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, p_params->link_speed,
- vport_params))
- return -1;
-
return 0;
}
@@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
u16 vport_pq_id;
u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ inc_val = QM_WFQ_INC_VAL(wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
+ /* A VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID)
@@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id, u32 vport_rl, u32 link_speed)
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
{
- u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ u32 inc_val;
- if (vport_id >= max_qm_global_rls) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT ID for rate limiter configuration\n");
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
+ DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
- DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
- return -1;
- }
-
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
- qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
}
@@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
-
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
do { \
typeof(var) *__p_var = &(var); \
@@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
*__p_var = (*__p_var & ~BIT(__offset)) | \
((enable) ? BIT(__offset) : 0); \
} while (0)
-#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
-#define PRS_ETH_OUTPUT_FORMAT -46832
+
+#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, \
+ ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+/**
+ * @brief qed_dmae_to_grc - is an internal function - writes from host to
+ * wide-bus registers (split registers are not supported yet)
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param p_data - pointer to source data.
+ * @param addr - Destination register address.
+ * @param len_in_dwords - data length in DWARDS (u32)
+ */
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_data, u32 addr, u32 len_in_dwords)
+{
+ struct qed_dmae_params params = {};
+ int rc;
+
+ if (!p_data)
+ return -1;
+
+ /* Set DMAE params */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)(p_data),
+ addr, len_in_dwords, &params);
+
+ /* If not read using DMAE, read using GRC */
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+ /* write to registers using GRC */
+ ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+ }
+
+ return len_in_dwords;
+}
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port)
@@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
-#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
+ struct regpair ram_line = { };
+
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- qed_wr(p_hwfn,
- p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- 0);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
}
void qed_gft_config(struct qed_hwfn *p_hwfn,
@@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
+ u32 reg_val, cam_line, search_non_ip_as_gft;
+ struct regpair ram_line = { };
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn,
@@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ram_line_lo = 0;
- ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
- SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
- SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+ SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
@@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
- ram_line_lo);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
- ram_line_hi);
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
/* Set default profile so that no filter match will happen */
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
- qed_wr(p_hwfn,
- p_ptt,
- PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
- PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+ ram_line.lo = 0xffffffff;
+ ram_line.hi = 0x3ff;
+ qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
/* Enable gft search */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
@@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+
+ default:
+ return 0;
+ }
+}
+
+struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const
+ fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
+{
+ u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
+ struct phys_mem_desc *allocated_mem;
+
+ if (!buf_size)
+ return NULL;
+
+ allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!allocated_mem)
+ return NULL;
+
+ memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
+
+ /* For each Storm, set physical address in RAM */
+ while (buf_offset < buf_size) {
+ struct phys_mem_desc *storm_mem_desc;
+ struct fw_overlay_buf_hdr *hdr;
+ u32 storm_buf_size;
+ u8 storm_id;
+
+ hdr =
+ (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data,
+ FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ storm_mem_desc = allocated_mem + storm_id;
+ storm_mem_desc->size = storm_buf_size * sizeof(u32);
+
+ /* Allocate physical memory for Storm's overlays buffer */
+ storm_mem_desc->virt_addr =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ &storm_mem_desc->phys_addr, GFP_KERNEL);
+ if (!storm_mem_desc->virt_addr)
+ break;
+
+ /* Skip overlays buffer header */
+ buf_offset += OVERLAY_HDR_SIZE_DWORDS;
+
+ /* Copy Storm's overlays buffer to allocated memory */
+ memcpy(storm_mem_desc->virt_addr,
+ &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
+
+ /* Advance to next Storm */
+ buf_offset += storm_buf_size;
+ }
+
+ /* If memory allocation has failed, free all allocated memory */
+ if (buf_offset < buf_size) {
+ qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ return NULL;
+ }
+
+ return allocated_mem;
+}
+
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ u32 ram_addr, i;
+
+ /* Skip Storms with no FW overlays */
+ if (!storm_mem_desc->virt_addr)
+ continue;
+
+ /* Calculate overlay RAM GRC address of current PF */
+ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
+ sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
+
+ /* Write Storm's overlay physical address to RAM */
+ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, ram_addr,
+ ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ }
+}
+
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ if (!fw_overlay_mem)
+ return;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+
+ /* Free Storm's physical memory */
+ if (storm_mem_desc->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr);
+ }
+
+ /* Free allocated virtual memory */
+ kfree(fw_overlay_mem);
+}