aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1665
1 files changed, 1002 insertions, 663 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5523da3afcdc..c4daee1b7286 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,6 +59,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -144,39 +145,49 @@ enum bnx2x_board_type {
BCM57711E,
BCM57712,
BCM57712_MF,
+ BCM57712_VF,
BCM57800,
BCM57800_MF,
+ BCM57800_VF,
BCM57810,
BCM57810_MF,
- BCM57840_O,
+ BCM57810_VF,
BCM57840_4_10,
BCM57840_2_20,
- BCM57840_MFO,
BCM57840_MF,
+ BCM57840_VF,
BCM57811,
- BCM57811_MF
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] = {
- { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
+ [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -194,12 +205,18 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
#endif
@@ -209,6 +226,9 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
#endif
@@ -221,29 +241,41 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
{ 0 }
};
@@ -346,6 +378,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+}
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -396,7 +487,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
@@ -412,9 +503,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
dmae->comp_val = DMAE_COMP_VAL;
}
-/* issue a dmae command over the init-channel and wailt for completion */
-static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
- struct dmae_command *dmae)
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
@@ -692,12 +782,16 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
+
+ /* dump buffer after the mark */
for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
+
+ /* dump buffer before the mark */
for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -712,7 +806,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
-void bnx2x_panic_dump(struct bnx2x *bp)
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
@@ -722,6 +880,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
u16 start = 0, end = 0;
u8 cos;
#endif
+ if (disable_int)
+ bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
@@ -867,6 +1027,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
#ifdef BNX2X_STOP_ON_ERROR
+
+ /* event queue */
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ }
+
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
@@ -1038,8 +1209,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val;
}
-static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
- char *msg, u32 poll_cnt)
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
{
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) {
@@ -1049,7 +1220,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0;
}
-static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp))
@@ -1061,7 +1233,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT;
}
-static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ?
@@ -1136,10 +1308,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
-static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
- u32 poll_cnt)
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
- struct sdm_op_gen op_gen = {0};
+ u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
@@ -1150,19 +1321,20 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return 1;
}
- op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
- op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
- op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
- op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
- REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr)));
- ret = 1;
+ bnx2x_panic();
+ return 1;
}
/* Zero completion for nxt FLR */
REG_WR(bp, comp_addr, 0);
@@ -1170,7 +1342,7 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return ret;
}
-static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
u16 status;
@@ -1382,26 +1554,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix)
val |= IGU_PF_CONF_SINGLE_ISR_EN;
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_INT_LINE_EN |
+ val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
@@ -1436,71 +1613,6 @@ void bnx2x_int_enable(struct bnx2x *bp)
bnx2x_igu_int_enable(bp);
}
-static void bnx2x_hc_int_disable(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- u32 val = REG_RD(bp, addr);
-
- /*
- * in E1 we must use only PCI configuration space to disable
- * MSI/MSIX capablility
- * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
- */
- if (CHIP_IS_E1(bp)) {
- /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
- * Use mask register to prevent from HC sending interrupts
- * after we exit the function
- */
- REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
-
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- } else
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
- DP(NETIF_MSG_IFDOWN,
- "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, addr, val);
- if (REG_RD(bp, addr) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_igu_int_disable(struct bnx2x *bp)
-{
- u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
-
- val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
- IGU_PF_CONF_INT_LINE_EN |
- IGU_PF_CONF_ATTN_BIT_EN);
-
- DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
- if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
- if (bp->common.int_block == INT_BLOCK_HC)
- bnx2x_hc_int_disable(bp);
- else
- bnx2x_igu_int_disable(bp);
-}
-
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1586,11 +1698,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
}
/**
- * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
*
* @bp: driver handle
*
- * Tries to aquire a leader lock for current engine.
+ * Tries to acquire a leader lock for current engine.
*/
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
@@ -1599,6 +1711,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1613,6 +1743,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -1664,6 +1801,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -1680,7 +1819,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* mark pending ACK to MCP bit.
* prevent case that both bits are cleared.
* At the end of load/unload driver checks that
- * sp_state is cleaerd, and this order prevents
+ * sp_state is cleared, and this order prevents
* races
*/
smp_mb__before_clear_bit();
@@ -1689,22 +1828,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
smp_mb__after_clear_bit();
- /* schedule workqueue to send ack to MCP */
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
}
return;
}
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
-{
- u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
-
- bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
- start);
-}
-
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1745,21 +1875,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data,
- NULL);
- rcu_read_unlock();
- }
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
status &= ~0x1;
if (!status)
@@ -2459,23 +2591,55 @@ void bnx2x__link_status_update(struct bnx2x *bp)
return;
/* read updated dcb configuration */
- bnx2x_dcbx_pmf_update(bp);
-
- bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
- if (bp->link_vars.link_up)
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
- else
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* indicate link status */
- bnx2x_link_report(bp);
+ }
}
static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
u16 vlan_val, u8 allowed_prio)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_update_params *f_update_params =
&func_params.params.afex_update;
@@ -2500,7 +2664,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
u16 vif_index, u8 func_bit_map)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_viflists_params *update_params =
&func_params.params.afex_viflists;
int rc;
@@ -2516,7 +2680,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
/* set parameters according to cmd_type */
update_params->afex_vif_list_command = cmd_type;
- update_params->vif_list_index = cpu_to_le16(vif_index);
+ update_params->vif_list_index = vif_index;
update_params->func_bit_map =
(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
update_params->func_to_clear = 0;
@@ -2800,6 +2964,10 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
return flags;
}
@@ -2875,15 +3043,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
- tpa_agg_size = min_t(u32,
- (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ tpa_agg_size = TPA_AGG_SIZE;
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
- 0xffff);
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
}
/* pause - not for e1 */
@@ -2928,7 +3093,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
- * For PF Clients it should be the maximum avaliable number.
+ * For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@ -3022,7 +3187,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* init Event Queue */
+ /* init Event Queue - PCI bus guarantees correct endianity*/
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
eq_data.producer = bp->eq_prod;
@@ -3112,65 +3277,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
- ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_ucast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_bcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_mcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
- ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->ucast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->bcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->mcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
}
/* ask L5 driver to add data to the struct */
@@ -3641,7 +3816,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
"Please contact OEM Support for assistance\n");
/*
- * Scheudle device reset (unload)
+ * Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
@@ -3791,6 +3966,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_vf_handle_flr_event(bp);
+
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4587,8 +4766,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
@@ -4616,7 +4795,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
- bnx2x_panic_dump(bp);
+ bnx2x_panic_dump(bp, false);
}
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
@@ -4658,7 +4837,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+ >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -4735,7 +4915,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
struct bnx2x_queue_update_params *q_update_params =
&queue_params.params.update;
- /* Send Q update command with afex vlan removal values for all Qs */
+ /* Send Q update command with afex vlan removal values for all Qs */
queue_params.cmd = BNX2X_Q_CMD_UPDATE;
/* set silent vlan removal values according to vlan mode */
@@ -4809,7 +4989,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u8 echo;
u32 cid;
u8 opcode;
- int spqe_cnt = 0;
+ int rc, spqe_cnt = 0;
struct bnx2x_queue_sp_obj *q_obj;
struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
@@ -4837,15 +5017,27 @@ static void bnx2x_eq_int(struct bnx2x *bp)
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
-
elem = &bp->eq_ring[EQ_DESC(sw_cons)];
- cid = SW_CID(elem->message.data.cfc_del_event.cid);
- opcode = elem->message.opcode;
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
+ bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ continue;
+
case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
"got statistics comp event %d\n",
@@ -5011,50 +5203,65 @@ next_spqe:
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
- u16 status;
- status = bnx2x_update_dsb_idx(bp);
-/* if (status == 0) */
-/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
+ /* make sure the atomic interupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
- /* HW attentions */
- if (status & BNX2X_DEF_SB_ATT_IDX) {
- bnx2x_attn_int(bp);
- status &= ~BNX2X_DEF_SB_ATT_IDX;
- }
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
- /* SP events: STAT_QUERY and others */
- if (status & BNX2X_DEF_SB_IDX) {
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
if (FCOE_INIT(bp) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- /*
- * Prevent local bottom-halves from running as
- * we are going to change the local NAPI list.
- */
- local_bh_disable();
- napi_schedule(&bnx2x_fcoe(bp, napi));
- local_bh_enable();
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
}
- /* Handle EQ completions */
- bnx2x_eq_int(bp);
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
- bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
- le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
- status &= ~BNX2X_DEF_SB_IDX;
}
- if (unlikely(status))
- DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
- status);
-
- bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
- le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ /* must be called after the EQ processing (since eq leads to sriov
+ * ramrod completion flows).
+ * This flow may have been scheduled by the arrival of a ramrod
+ * completion, or by the sriov code rescheduling itself.
+ */
+ bnx2x_iov_sp_task(bp);
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@ -5087,7 +5294,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
rcu_read_unlock();
}
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
return IRQ_HANDLED;
}
@@ -5101,7 +5311,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -5109,7 +5318,8 @@ static void bnx2x_timer(unsigned long data)
if (!netif_running(bp->dev))
return;
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
u32 mcp_pulse;
@@ -5136,6 +5346,10 @@ static void bnx2x_timer(unsigned long data)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5278,7 +5492,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
-static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -5334,7 +5548,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
- /* write indecies to HW */
+ /* write indices to HW - PCI guarantees endianity of regpairs */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
@@ -5422,6 +5636,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ /* PCI guarantees endianity of regpairs */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
@@ -5478,13 +5693,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-
/* called with netif_addr_lock_bh() */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5514,22 +5728,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
- return;
+ return rc;
}
+
+ return 0;
}
-/* called with netif_addr_lock_bh() */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
- if (!NO_FCOE(bp))
-
- /* Configure rx_mode of FCoE Queue */
- __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-
- switch (bp->rx_mode) {
+ switch (rx_mode) {
case BNX2X_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
@@ -5537,25 +5750,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/
break;
case BNX2X_RX_MODE_NORMAL:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_PROMISC:
@@ -5563,36 +5776,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp))
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break;
default:
- BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
- return;
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
}
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
__set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
- tx_accept_flags, ramrod_flags);
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
@@ -5699,6 +5933,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid;
}
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
@@ -5708,13 +5949,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
- bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
- fp->fw_sb_id, fp->igu_sb_id);
-
- bnx2x_update_fpsb_idx(fp);
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@ -5786,17 +6024,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+
+ if (IS_VF(bp))
+ return;
+
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
BP_PORT(bp));
- /* ensure status block indices were read */
- rmb();
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
- bnx2x_init_rx_rings(bp);
- bnx2x_init_tx_rings(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
@@ -6236,49 +6479,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
-{
- u32 offset = 0;
-
- if (CHIP_IS_E1(bp))
- return;
- if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
- return;
-
- switch (BP_ABS_FUNC(bp)) {
- case 0:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
- break;
- case 1:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
- break;
- case 2:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
- break;
- case 3:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
- break;
- case 4:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
- break;
- case 5:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
- break;
- case 6:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
- break;
- case 7:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
- break;
- default:
- return;
- }
-
- REG_WR(bp, offset, pretend_func_num);
- REG_RD(bp, offset);
- DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
-}
-
void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@ -6322,7 +6522,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
- * take the UNDI lock to protect undi_unload flow from accessing
+ * take the RESET lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
@@ -6452,7 +6652,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
- * occured while driver was down)
+ * occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
@@ -6493,7 +6693,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -6510,7 +6710,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
-
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
@@ -6535,6 +6734,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+ bnx2x_iov_init_dmae(bp);
+
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@ -6991,7 +7192,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
-
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
@@ -7020,15 +7220,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
REG_WR_DMAE(bp, reg, wb_write, 2);
}
-static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
- u8 idu_sb_id, bool is_Pf)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -7219,8 +7418,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
/* FLR cleanup - hmmm */
if (!CHIP_IS_E1x(bp)) {
rc = bnx2x_pf_flr_clnup(bp);
- if (rc)
+ if (rc) {
+ bnx2x_fw_dump(bp);
return rc;
+ }
}
/* set MSI reconfigure capability */
@@ -7237,12 +7438,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
bp->context[i].cxt_mapping;
ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
+
bnx2x_ilt_init_op(bp, INITOP_SET);
if (!CONFIGURE_NIC_MODE(bp)) {
@@ -7315,6 +7525,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ bnx2x_iov_init_dq(bp);
+
bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@ -7523,10 +7736,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- /* fastpath */
- bnx2x_free_fp_mem(bp);
- /* end of fastpath */
-
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7547,69 +7756,11 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-}
-
-static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
-{
- int num_groups;
- int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
-
- /* number of queues for statistics is number of eth queues + FCoE */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
-
- /* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
- * num of queues
- */
- bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
-
-
- /* Request is built from stats_query_header and an array of
- * stats_query_cmd_group each of which contains
- * STATS_QUERY_CMD_COUNT rules. The real number or requests is
- * configured in the stats_query_header.
- */
- num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
- (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
-
- bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
- num_groups * sizeof(struct stats_query_cmd_group);
-
- /* Data for statistics requests + stats_conter
- *
- * stats_counter holds per-STORM counters that are incremented
- * when STORM has finished with the current request.
- *
- * memory for FCoE offloaded statistics are counted anyway,
- * even if they will not be sent.
- */
- bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
- sizeof(struct per_pf_stats) +
- sizeof(struct fcoe_statistics_params) +
- sizeof(struct per_queue_stats) * num_queue_stats +
- sizeof(struct stats_counter);
-
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- /* Set shortcuts */
- bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
- bp->fw_stats_req_mapping = bp->fw_stats_mapping;
-
- bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
- ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
-
- bp->fw_stats_data_mapping = bp->fw_stats_mapping +
- bp->fw_stats_req_sz;
- return 0;
-
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- BNX2X_ERR("Can't allocate memory\n");
- return -ENOMEM;
+ bnx2x_iov_free_mem(bp);
}
+
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp))
@@ -7655,10 +7806,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- goto alloc_mem_err;
-
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
* functions because CDU differs in few aspects:
@@ -7687,6 +7834,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
/* Slow path ring */
BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
@@ -7694,13 +7844,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-
- /* fastpath */
- /* need to be done at the end, since it's self adjusting to amount
- * of memory available for RSS queues
- */
- if (bnx2x_alloc_fp_mem(bp))
- goto alloc_mem_err;
return 0;
alloc_mem_err:
@@ -7803,43 +7946,53 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-void bnx2x_set_int_mode(struct bnx2x *bp)
+int bnx2x_set_int_mode(struct bnx2x *bp)
{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ return -EINVAL;
+
switch (int_mode) {
- case INT_MODE_MSI:
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ /* falling through... */
+ case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
+
/* falling through... */
- case INT_MODE_INTx:
+ case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- if (bnx2x_enable_msix(bp) ||
- bp->flags & USING_SINGLE_MSIX_FLAG) {
- /* failed to enable multiple MSI-X */
- BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues,
- 1 + bp->num_cnic_queues);
-
- bp->num_queues = 1 + bp->num_cnic_queues;
-
- /* Try to enable MSI */
- if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
- !(bp->flags & DISABLE_MSI_FLAG))
- bnx2x_enable_msi(bp);
- }
- break;
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
}
+ return 0;
}
-/* must be called prioir to any HW initializations */
+/* must be called prior to any HW initializations */
static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
return L2_ILT_LINES(bp);
}
@@ -8222,8 +8375,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
- SB_DISABLED);
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -8524,7 +8677,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/* Give HW time to discard old tx messages */
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
@@ -8562,6 +8715,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
netif_addr_unlock_bh(bp->dev);
+ bnx2x_iov_chip_cleanup(bp);
/*
@@ -8947,7 +9101,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
if (pend_bits == 0)
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8964,8 +9118,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
- u32 tags_63_32 = 0;
-
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
@@ -8983,7 +9136,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -9016,7 +9169,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Prepare to chip reset: */
/* MCP */
@@ -9299,8 +9452,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(bp->dev))
- goto sp_rtnl_exit;
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
@@ -9319,7 +9474,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_parity_recover(bp);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
@@ -9333,7 +9489,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
#ifdef BNX2X_STOP_ON_ERROR
sp_rtnl_not_reset:
@@ -9351,13 +9508,33 @@ sp_rtnl_not_reset:
DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
}
-sp_rtnl_exit:
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
rtnl_unlock();
-}
-/* end of nic load/unload */
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state))
+ bnx2x_enable_sriov(bp);
+}
static void bnx2x_period_task(struct work_struct *work)
{
@@ -9394,43 +9571,13 @@ period_task_exit:
* Init service functions
*/
-static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
-{
- u32 reg = bnx2x_get_pretend_reg(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Pretend to be function 0 */
- REG_WR(bp, reg, 0);
- REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
-
- /* From now we are in the "like-E1" mode */
- bnx2x_int_disable(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Restore the original function */
- REG_WR(bp, reg, BP_ABS_FUNC(bp));
- REG_RD(bp, reg);
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- bnx2x_int_disable(bp);
- else
- bnx2x_undi_int_disable_e1h(bp);
-}
-
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals)
{
@@ -9658,11 +9805,13 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_test_firmware_version(bp, false);
+ rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
if (!rc) {
/* fw version is good */
@@ -9718,7 +9867,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
@@ -9726,6 +9874,8 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
prev_undi = true;
/* clear the UNDI indication */
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
/* wait until BRB is empty */
@@ -9792,7 +9942,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ DP(BNX2X_MSG_SP,
+ "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
@@ -9834,7 +9985,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
-
do {
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
@@ -10401,10 +10551,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
- mac_hi = cpu_to_be16(mac_hi);
- mac_lo = cpu_to_be32(mac_lo);
- memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
- memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
}
static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
@@ -10440,6 +10590,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
@@ -10547,21 +10704,21 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_upper);
bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_lower);
/* Node info */
bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_upper);
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
@@ -10659,7 +10816,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
- if (IS_MF_FCOE_AFEX(bp))
+ if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
@@ -10722,7 +10879,6 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10787,7 +10943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
tout--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@ -11125,9 +11281,13 @@ static int bnx2x_init_bp(struct bnx2x *bp)
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
- rc = bnx2x_get_hwinfo(bp);
- if (rc)
- return rc;
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ }
bnx2x_set_modes_bitmap(bp);
@@ -11140,7 +11300,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* init fw_seq */
bp->fw_seq =
SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -11177,6 +11337,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11205,12 +11367,18 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->cnic_base_cl_id = FP_SB_MAX_E2;
/* multiple tx priority */
- if (CHIP_IS_E1x(bp))
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
- if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
- if (CHIP_IS_E3B0(bp))
+ else if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
/* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for
@@ -11234,6 +11402,26 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
+static int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held).
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
+ return 0;
+}
+
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11241,6 +11429,7 @@ static int bnx2x_open(struct net_device *dev)
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
bool other_load_status, load_status;
+ int rc;
bp->stats_init = true;
@@ -11248,53 +11437,57 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
- other_load_status = bnx2x_get_load_status(bp, other_engine);
- load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
-
- /*
- * If parity had happen during the unload, then attentions
+ /* If parity had happen during the unload, then attentions
* and/or RECOVERY_IN_PROGRES may still be set. In this case we
* want the first function loaded on the current engine to
* complete the recovery.
+ * Parity recovery is only relevant for PF driver.
*/
- if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
- bnx2x_chk_parity_attn(bp, &global, true))
- do {
- /*
- * If there are attentions and they are in a global
- * blocks, set the GLOBAL_RESET bit regardless whether
- * it will be this function that will complete the
- * recovery or not.
- */
- if (global)
- bnx2x_set_reset_global(bp);
+ if (IS_PF(bp)) {
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
- /*
- * Only the first function on the current engine should
- * try to recover in open. In case of attentions in
- * global blocks only the first in the chip should try
- * to recover.
- */
- if ((!load_status &&
- (!global || !other_load_status)) &&
- bnx2x_trylock_leader_lock(bp) &&
- !bnx2x_leader_reset(bp)) {
- netdev_info(bp->dev, "Recovered in open\n");
- break;
- }
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
- /* recovery has failed... */
- bnx2x_set_power_state(bp, PCI_D3hot);
- bp->recovery_state = BNX2X_RECOVERY_FAILED;
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
- "If you still see this message after a few retries then power cycle is required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
- return -EAGAIN;
- } while (0);
+ return -EAGAIN;
+ } while (0);
+ }
+ }
bp->recovery_state = BNX2X_RECOVERY_DONE;
- return bnx2x_nic_load(bp, LOAD_OPEN);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+ return bnx2x_open_epilog(bp);
}
/* called with rtnl_lock */
@@ -11428,7 +11621,6 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
return rc;
}
-
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
void bnx2x_set_rx_mode(struct net_device *dev)
{
@@ -11449,12 +11641,25 @@ void bnx2x_set_rx_mode(struct net_device *dev)
CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else {
- /* some multicasts */
- if (bnx2x_set_mc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- if (bnx2x_set_uc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
bp->rx_mode = rx_mode;
@@ -11468,7 +11673,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
return;
}
- bnx2x_set_storm_rx_mode(bp);
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ } else {
+ /* configuring rx mode to storms in a vf involves sleeping (when
+ * we wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
/* called with rtnl_lock */
@@ -11571,7 +11789,9 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
@@ -11595,10 +11815,9 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
- unsigned long board_type)
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
{
- struct bnx2x *bp;
int rc;
u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
@@ -11606,11 +11825,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
- bp = netdev_priv(dev);
bp->dev = dev;
bp->pdev = pdev;
- bp->flags = 0;
rc = pci_enable_device(pdev);
if (rc) {
@@ -11626,9 +11843,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
goto err_out_disable;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&bp->pdev->dev, "Cannot find second PCI device"
- " base address, aborting\n");
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11653,12 +11869,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
pci_save_state(pdev);
}
- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (bp->pm_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find power management capability, aborting\n");
- rc = -EIO;
- goto err_out_release;
+ if (IS_PF(bp)) {
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
}
if (!pci_is_pcie(pdev)) {
@@ -11690,13 +11908,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* support Physical Device Assignment where kernel BDF maybe arbitrary
* (depending on hypervisor).
*/
- if (chip_is_e1x)
+ if (chip_is_e1x) {
bp->pf_num = PCI_FUNC(pdev->devfn);
- else {/* chip is E2/3*/
+ } else {
+ /* chip is E2/3*/
pci_read_config_dword(bp->pdev,
PCICFG_ME_REGISTER, &pci_cfg_dword);
bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
+ ME_REG_ABS_PF_NUM_SHIFT);
}
BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
@@ -11709,25 +11928,28 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
- if (chip_is_e1x) {
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
- /*
- * Enable internal target-read (in case we are probed after PF FLR).
- * Must be done prior to any BAR read access. Only for 57712 and up
- */
- if (!chip_is_e1x)
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11778,8 +12000,9 @@ err_out:
static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
{
- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+ u32 val = 0;
+ pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
/* return value of 1=2.5GHz 2=5GHz */
@@ -11792,7 +12015,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
struct bnx2x_fw_file_hdr *fw_hdr;
struct bnx2x_fw_file_section *sections;
u32 offset, len, num_ops;
- u16 *ops_offsets;
+ __be16 *ops_offsets;
int i;
const u8 *fw_ver;
@@ -11817,7 +12040,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Likewise for the init_ops offsets */
offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
- ops_offsets = (u16 *)(firmware->data + offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@ -12044,8 +12267,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
if (CNIC_SUPPORT(bp))
cid_count += CNIC_CID_MAX;
+
return roundup(cid_count, QM_CID_ROUND);
}
@@ -12056,10 +12283,10 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt)
+ int cnic_cnt, bool is_vf)
{
- int pos;
- u16 control;
+ int pos, index;
+ u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -12067,85 +12294,114 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos)
+ if (!pos) {
+ dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
/*
* The value in the PCI configuration space is the index of the last
* entry, namely one less than the actual size of the table, which is
* exactly what we want to return from this function: number of all SBs
* without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
*/
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- return control & PCI_MSIX_FLAGS_QSIZE;
-}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ index = control & PCI_MSIX_FLAGS_QSIZE;
-static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct bnx2x *bp;
- int pcie_width, pcie_speed;
- int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count, doorbell_size;
- int cnic_cnt;
- /*
- * An estimated maximum supported CoS number according to the chip
- * version.
- * We will try to roughly estimate the maximum number of CoSes this chip
- * may support in order to minimize the memory allocated for Tx
- * netdev_queue's. This number will be accurately calculated during the
- * initialization of bp->max_cos based on the chip versions AND chip
- * revision in the bnx2x_init_bp().
- */
- u8 max_cos_est = 0;
+ return is_vf ? index + 1 : index;
+}
- switch (ent->driver_data) {
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
case BCM57710:
case BCM57711:
case BCM57711E:
- max_cos_est = BNX2X_MULTI_TX_COS_E1X;
- break;
-
+ return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
- break;
-
+ case BCM57712_VF:
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
+ case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
- case BCM57840_O:
case BCM57840_4_10:
case BCM57840_2_20:
+ case BCM57840_O:
case BCM57840_MFO:
+ case BCM57810_VF:
case BCM57840_MF:
+ case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
- break;
-
+ case BCM57811_VF:
+ return BNX2X_MULTI_TX_COS_E3B0;
+ return 1;
default:
- pr_err("Unknown board_type (%ld), aborting\n",
- ent->driver_data);
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
return -ENODEV;
}
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
- cnic_cnt = 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
- WARN_ON(!max_non_def_sbs);
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - cnic_cnt;
+ rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+
+ if (rss_count < 1)
+ return -EINVAL;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
rx_count = rss_count + cnic_cnt;
- /*
- * Maximum number of netdev Tx queues:
+ /* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
tx_count = rss_count * max_cos_est + cnic_cnt;
@@ -12157,42 +12413,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
bp->msg_enable = debug;
bp->cnic_support = cnic_cnt;
bp->cnic_probe = bnx2x_cnic_probe;
pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
- BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
-
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
+ tx_count, rx_count);
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
- /*
- * Map doorbels here as we need the real value of bp->max_cos which
- * is initialized in bnx2x_init_bp().
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
*/
- doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
- if (doorbell_size > pci_resource_len(pdev, 2)) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbells, bar size too small, aborting\n");
- rc = -ENOMEM;
- goto init_one_exit;
+ if (IS_VF(bp)) {
+ bnx2x_vf_map_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ doorbell_size);
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -12200,8 +12469,25 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_one_exit;
}
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_exit;
+ }
+
+ /* Enable SRIOV if capability found in configuration space.
+ * Once the generic SR-IOV framework makes it in from the
+ * pci tree this will be revised, to allow dynamic control
+ * over the number of VFs. Right now, change the num of vfs
+ * param below to enable SR-IOV.
+ */
+ rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ if (rc)
+ goto init_one_exit;
+
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
/* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
@@ -12223,13 +12509,20 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure interrupt mode: try to enable MSI-X/MSI if
* needed.
*/
- bnx2x_set_int_mode(bp);
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_exit;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+ /* register the net device */
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto init_one_exit;
}
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
if (!NO_FCOE(bp)) {
@@ -12240,6 +12533,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+ BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
+ pcie_width, pcie_speed);
BNX2X_DEV_INFO(
"%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
@@ -12257,7 +12552,7 @@ init_one_exit:
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
+ if (IS_PF(bp) && bp->doorbells)
iounmap(bp->doorbells);
free_netdev(dev);
@@ -12297,25 +12592,37 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- bnx2x_set_power_state(bp, PCI_D0);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
/* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->sp_rtnl_task);
+ bnx2x_iov_remove_one(bp);
+
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
+
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
- iounmap(bp->doorbells);
-
- bnx2x_release_firmware(bp);
+ /* for vf doorbells are part of the regview and were unmapped along with
+ * it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+ bnx2x_release_firmware(bp);
+ }
bnx2x_free_mem_bp(bp);
free_netdev(dev);
@@ -13103,4 +13410,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}